From 7985ad3434d94c7171355ab9fa911161f0783b9e Mon Sep 17 00:00:00 2001 From: Zack Buhman Date: Tue, 13 Jan 2026 12:50:02 -0600 Subject: [PATCH] remove dependency on d3dx This is mostly because it is not possible to statically link d3dx10. Coincidentally, the directxmath API appears to be superior to d3dxmath in most ways. --- Makefile | 3 +- include/cube.hpp | 17 +- include/directxmath/directxmath.h | 2216 ++ include/directxmath/directxmathconvert.inl | 2187 ++ include/directxmath/directxmathmatrix.inl | 3413 ++ include/directxmath/directxmathmisc.inl | 2425 ++ include/directxmath/directxmathvector.inl | 14689 ++++++++ include/gltf.hpp | 36 +- include/gltf_instance.hpp | 6 +- include/robot_player.hpp | 2386 +- models/robot_player/robot_player.glb | Bin 0 -> 239380 bytes src/cube.cpp | 306 +- src/main.cpp | 266 +- src/render_state.cpp | 1 - src/robot_player.cpp | 36257 ++++++++++--------- 15 files changed, 44561 insertions(+), 19647 deletions(-) create mode 100644 include/directxmath/directxmath.h create mode 100644 include/directxmath/directxmathconvert.inl create mode 100644 include/directxmath/directxmathmatrix.inl create mode 100644 include/directxmath/directxmathmisc.inl create mode 100644 include/directxmath/directxmathvector.inl create mode 100644 models/robot_player/robot_player.glb diff --git a/Makefile b/Makefile index 9c7bda6..ad19699 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,7 @@ CXXSTD += -std=gnu++14 CFLAGS += -Wall -Werror -Wfatal-errors CFLAGS += -Wno-unused-but-set-variable +CFLAGS += -Wno-unknown-pragmas CXXFLAGS += -fno-exceptions CFLAGS += -municode @@ -47,4 +48,4 @@ OBJS = \ $(BUILD_TYPE)/main.res $(BUILD_TYPE)/d3d10.exe: $(OBJS) - $(CXX) $(LDFLAGS) $(WOPT) -o $@ $(OBJS) -ld3dx10 -ld3d10 + $(CXX) $(LDFLAGS) $(WOPT) -o $@ $(OBJS) -ld3d10 diff --git a/include/cube.hpp b/include/cube.hpp index 632d8a0..6303851 100644 --- a/include/cube.hpp +++ b/include/cube.hpp @@ -1,25 +1,26 @@ +#pragma once #ifndef _CUBE_HPP_ #define _CUBE_HPP_ namespace cube { -extern const D3DXVECTOR3 accessor_0[]; +extern const XMFLOAT3 accessor_0[]; const int accessor_0__length = 24; -const int accessor_0__size = (sizeof (D3DXVECTOR3)) * 24; +const int accessor_0__size = (sizeof (XMFLOAT3)) * 24; -extern const D3DXVECTOR3 accessor_1[]; +extern const XMFLOAT3 accessor_1[]; const int accessor_1__length = 24; -const int accessor_1__size = (sizeof (D3DXVECTOR3)) * 24; +const int accessor_1__size = (sizeof (XMFLOAT3)) * 24; -extern const D3DXVECTOR2 accessor_2[]; +extern const XMFLOAT2 accessor_2[]; const int accessor_2__length = 24; -const int accessor_2__size = (sizeof (D3DXVECTOR2)) * 24; +const int accessor_2__size = (sizeof (XMFLOAT2)) * 24; -extern const DWORD accessor_3[]; +extern const int accessor_3[]; const int accessor_3__length = 36; -const int accessor_3__size = (sizeof (DWORD)) * 36; +const int accessor_3__size = (sizeof (int)) * 36; extern const Node node_0; extern const Node * nodes[]; diff --git a/include/directxmath/directxmath.h b/include/directxmath/directxmath.h new file mode 100644 index 0000000..f167223 --- /dev/null +++ b/include/directxmath/directxmath.h @@ -0,0 +1,2216 @@ +//------------------------------------------------------------------------------------- +// DirectXMath.h -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +#ifndef __cplusplus +#error DirectX Math requires C++ +#endif + +#define DIRECTX_MATH_VERSION 314 + +#if defined(_MSC_VER) && (_MSC_VER < 1910) +#error DirectX Math requires Visual C++ 2017 or later. +#endif + +#if defined(_MSC_VER) && !defined(_M_ARM) && !defined(_M_ARM64) && !defined(_M_HYBRID_X86_ARM64) && (!_MANAGED) && (!_M_CEE) && (!defined(_M_IX86_FP) || (_M_IX86_FP > 1)) && !defined(_XM_NO_INTRINSICS_) && !defined(_XM_VECTORCALL_) +#define _XM_VECTORCALL_ 1 +#endif + +#if _XM_VECTORCALL_ +#define XM_CALLCONV __vectorcall +#elif defined(__GNUC__) +#define XM_CALLCONV +#else +#define XM_CALLCONV __fastcall +#endif + +#ifndef XM_DEPRECATED +#ifdef __GNUC__ +#define XM_DEPRECATED __attribute__ ((deprecated)) +#else +#define XM_DEPRECATED __declspec(deprecated("This is deprecated and will be removed in a future version.")) +#endif +#endif + +#if !defined(_XM_AVX2_INTRINSICS_) && defined(__AVX2__) && !defined(_XM_NO_INTRINSICS_) +#define _XM_AVX2_INTRINSICS_ +#endif + +#if !defined(_XM_FMA3_INTRINSICS_) && defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) +#define _XM_FMA3_INTRINSICS_ +#endif + +#if !defined(_XM_F16C_INTRINSICS_) && defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) +#define _XM_F16C_INTRINSICS_ +#endif + +#if !defined(_XM_F16C_INTRINSICS_) && defined(__F16C__) && !defined(_XM_NO_INTRINSICS_) +#define _XM_F16C_INTRINSICS_ +#endif + +#if defined(_XM_FMA3_INTRINSICS_) && !defined(_XM_AVX_INTRINSICS_) +#define _XM_AVX_INTRINSICS_ +#endif + +#if defined(_XM_F16C_INTRINSICS_) && !defined(_XM_AVX_INTRINSICS_) +#define _XM_AVX_INTRINSICS_ +#endif + +#if !defined(_XM_AVX_INTRINSICS_) && defined(__AVX__) && !defined(_XM_NO_INTRINSICS_) +#define _XM_AVX_INTRINSICS_ +#endif + +#if defined(_XM_AVX_INTRINSICS_) && !defined(_XM_SSE4_INTRINSICS_) +#define _XM_SSE4_INTRINSICS_ +#endif + +#if defined(_XM_SSE4_INTRINSICS_) && !defined(_XM_SSE3_INTRINSICS_) +#define _XM_SSE3_INTRINSICS_ +#endif + +#if defined(_XM_SSE3_INTRINSICS_) && !defined(_XM_SSE_INTRINSICS_) +#define _XM_SSE_INTRINSICS_ +#endif + +#if !defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) +#if (defined(_M_IX86) || defined(_M_X64) || __i386__ || __x86_64__) && !defined(_M_HYBRID_X86_ARM64) +#define _XM_SSE_INTRINSICS_ +#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __arm__ || __aarch64__ +#define _XM_ARM_NEON_INTRINSICS_ +#elif !defined(_XM_NO_INTRINSICS_) +#error DirectX Math does not support this target +#endif +#endif // !_XM_ARM_NEON_INTRINSICS_ && !_XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_ + +#if !defined(_XM_NO_XMVECTOR_OVERLOADS_) && (defined(__clang__) || defined(__GNUC__)) +#define _XM_NO_XMVECTOR_OVERLOADS_ +#endif + +#pragma warning(push) +#pragma warning(disable:4514 4820) +// C4514/4820: Off by default noise +#include +#include +#pragma warning(pop) + +#ifndef _XM_NO_INTRINSICS_ + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4987) +// C4987: Off by default noise +#include +#pragma warning(pop) +#endif + +#if (defined(__clang__) || defined(__GNUC__)) && (__x86_64__ || __i386__) +#include +#endif + +#ifdef _XM_SSE_INTRINSICS_ +#include +#include + +#ifdef _XM_SSE3_INTRINSICS_ +#include +#endif + +#ifdef _XM_SSE4_INTRINSICS_ +#include +#endif + +#ifdef _XM_AVX_INTRINSICS_ +#include +#endif + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_MSC_VER) && (defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)) +#include +#else +#include +#endif +#endif +#endif // !_XM_NO_INTRINSICS_ + +#include "sal.h" +#include + +#pragma warning(push) +#pragma warning(disable : 4005 4668) +// C4005/4668: Old header issue +#include +#pragma warning(pop) + +#ifdef __GNUC__ +#define XM_ALIGNED_DATA(x) __attribute__ ((aligned(x))) +#define XM_ALIGNED_STRUCT(x) struct __attribute__ ((aligned(x))) +#else +#define XM_ALIGNED_DATA(x) __declspec(align(x)) +#define XM_ALIGNED_STRUCT(x) __declspec(align(x)) struct +#endif + +/**************************************************************************** + * + * Conditional intrinsics + * + ****************************************************************************/ + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + +#if defined(_XM_NO_MOVNT_) +#define XM_STREAM_PS( p, a ) _mm_store_ps((p), (a)) +#define XM256_STREAM_PS( p, a ) _mm256_store_ps((p), (a)) +#define XM_SFENCE() +#else +#define XM_STREAM_PS( p, a ) _mm_stream_ps((p), (a)) +#define XM256_STREAM_PS( p, a ) _mm256_stream_ps((p), (a)) +#define XM_SFENCE() _mm_sfence() +#endif + +#if defined(_XM_FMA3_INTRINSICS_) +#define XM_FMADD_PS( a, b, c ) _mm_fmadd_ps((a), (b), (c)) +#define XM_FNMADD_PS( a, b, c ) _mm_fnmadd_ps((a), (b), (c)) +#else +#define XM_FMADD_PS( a, b, c ) _mm_add_ps(_mm_mul_ps((a), (b)), (c)) +#define XM_FNMADD_PS( a, b, c ) _mm_sub_ps((c), _mm_mul_ps((a), (b))) +#endif + +#if defined(_XM_AVX_INTRINSICS_) && defined(_XM_FAVOR_INTEL_) +#define XM_PERMUTE_PS( v, c ) _mm_permute_ps((v), c ) +#else +#define XM_PERMUTE_PS( v, c ) _mm_shuffle_ps((v), (v), c ) +#endif + +#endif // _XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_ + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + +#if defined(__clang__) +#define XM_PREFETCH( a ) __builtin_prefetch(a) +#elif defined(_MSC_VER) +#define XM_PREFETCH( a ) __prefetch(a) +#else +#define XM_PREFETCH( a ) +#endif + +#endif // _XM_ARM_NEON_INTRINSICS_ && !_XM_NO_INTRINSICS_ + +namespace DirectX +{ + + /**************************************************************************** + * + * Constant definitions + * + ****************************************************************************/ + +#if defined(__XNAMATH_H__) && defined(XM_PI) +#undef XM_PI +#undef XM_2PI +#undef XM_1DIVPI +#undef XM_1DIV2PI +#undef XM_PIDIV2 +#undef XM_PIDIV4 +#undef XM_SELECT_0 +#undef XM_SELECT_1 +#undef XM_PERMUTE_0X +#undef XM_PERMUTE_0Y +#undef XM_PERMUTE_0Z +#undef XM_PERMUTE_0W +#undef XM_PERMUTE_1X +#undef XM_PERMUTE_1Y +#undef XM_PERMUTE_1Z +#undef XM_PERMUTE_1W +#undef XM_CRMASK_CR6 +#undef XM_CRMASK_CR6TRUE +#undef XM_CRMASK_CR6FALSE +#undef XM_CRMASK_CR6BOUNDS +#undef XM_CACHE_LINE_SIZE +#endif + + constexpr float XM_PI = 3.141592654f; + constexpr float XM_2PI = 6.283185307f; + constexpr float XM_1DIVPI = 0.318309886f; + constexpr float XM_1DIV2PI = 0.159154943f; + constexpr float XM_PIDIV2 = 1.570796327f; + constexpr float XM_PIDIV4 = 0.785398163f; + + constexpr uint32_t XM_SELECT_0 = 0x00000000; + constexpr uint32_t XM_SELECT_1 = 0xFFFFFFFF; + + constexpr uint32_t XM_PERMUTE_0X = 0; + constexpr uint32_t XM_PERMUTE_0Y = 1; + constexpr uint32_t XM_PERMUTE_0Z = 2; + constexpr uint32_t XM_PERMUTE_0W = 3; + constexpr uint32_t XM_PERMUTE_1X = 4; + constexpr uint32_t XM_PERMUTE_1Y = 5; + constexpr uint32_t XM_PERMUTE_1Z = 6; + constexpr uint32_t XM_PERMUTE_1W = 7; + + constexpr uint32_t XM_SWIZZLE_X = 0; + constexpr uint32_t XM_SWIZZLE_Y = 1; + constexpr uint32_t XM_SWIZZLE_Z = 2; + constexpr uint32_t XM_SWIZZLE_W = 3; + + constexpr uint32_t XM_CRMASK_CR6 = 0x000000F0; + constexpr uint32_t XM_CRMASK_CR6TRUE = 0x00000080; + constexpr uint32_t XM_CRMASK_CR6FALSE = 0x00000020; + constexpr uint32_t XM_CRMASK_CR6BOUNDS = XM_CRMASK_CR6FALSE; + + constexpr size_t XM_CACHE_LINE_SIZE = 64; + + + /**************************************************************************** + * + * Macros + * + ****************************************************************************/ + +#if defined(__XNAMATH_H__) && defined(XMComparisonAllTrue) +#undef XMComparisonAllTrue +#undef XMComparisonAnyTrue +#undef XMComparisonAllFalse +#undef XMComparisonAnyFalse +#undef XMComparisonMixed +#undef XMComparisonAllInBounds +#undef XMComparisonAnyOutOfBounds +#endif + + // Unit conversion + + inline constexpr float XMConvertToRadians(float fDegrees) noexcept { return fDegrees * (XM_PI / 180.0f); } + inline constexpr float XMConvertToDegrees(float fRadians) noexcept { return fRadians * (180.0f / XM_PI); } + + // Condition register evaluation proceeding a recording (R) comparison + + inline constexpr bool XMComparisonAllTrue(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6TRUE) == XM_CRMASK_CR6TRUE); } + inline constexpr bool XMComparisonAnyTrue(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6FALSE) != XM_CRMASK_CR6FALSE); } + inline constexpr bool XMComparisonAllFalse(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6FALSE) == XM_CRMASK_CR6FALSE); } + inline constexpr bool XMComparisonAnyFalse(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6TRUE) != XM_CRMASK_CR6TRUE); } + inline constexpr bool XMComparisonMixed(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6) == 0); } + inline constexpr bool XMComparisonAllInBounds(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6BOUNDS) == XM_CRMASK_CR6BOUNDS); } + inline constexpr bool XMComparisonAnyOutOfBounds(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6BOUNDS) != XM_CRMASK_CR6BOUNDS); } + + + /**************************************************************************** + * + * Data types + * + ****************************************************************************/ + +#pragma warning(push) +#pragma warning(disable:4068 4201 4365 4324 4820) + // C4068: ignore unknown pragmas + // C4201: nonstandard extension used : nameless struct/union + // C4365: Off by default noise + // C4324/4820: padding warnings + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 25000, "FXMVECTOR is 16 bytes") +#endif + +//------------------------------------------------------------------------------ +#if defined(_XM_NO_INTRINSICS_) + struct __vector4 + { + union + { + float vector4_f32[4]; + uint32_t vector4_u32[4]; + }; + }; +#endif // _XM_NO_INTRINSICS_ + + //------------------------------------------------------------------------------ + // Vector intrinsic: Four 32 bit floating point components aligned on a 16 byte + // boundary and mapped to hardware vector registers +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + typedef __m128 XMVECTOR; +#elif defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + typedef float32x4_t XMVECTOR; +#else + typedef __vector4 XMVECTOR; +#endif + + // Fix-up for (1st-3rd) XMVECTOR parameters that are pass-in-register for x86, ARM, ARM64, and vector call; by reference otherwise +#if ( defined(_M_IX86) || defined(_M_ARM) || defined(_M_ARM64) || _XM_VECTORCALL_ || __i386__ || __arm__ || __aarch64__ ) && !defined(_XM_NO_INTRINSICS_) + typedef const XMVECTOR FXMVECTOR; +#else + typedef const XMVECTOR& FXMVECTOR; +#endif + + // Fix-up for (4th) XMVECTOR parameter to pass in-register for ARM, ARM64, and x64 vector call; by reference otherwise +#if ( defined(_M_ARM) || defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || (_XM_VECTORCALL_ && !defined(_M_IX86) ) || __arm__ || __aarch64__ ) && !defined(_XM_NO_INTRINSICS_) + typedef const XMVECTOR GXMVECTOR; +#else + typedef const XMVECTOR& GXMVECTOR; +#endif + + // Fix-up for (5th & 6th) XMVECTOR parameter to pass in-register for ARM64 and vector call; by reference otherwise +#if ( defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || _XM_VECTORCALL_ || __aarch64__ ) && !defined(_XM_NO_INTRINSICS_) + typedef const XMVECTOR HXMVECTOR; +#else + typedef const XMVECTOR& HXMVECTOR; +#endif + + // Fix-up for (7th+) XMVECTOR parameters to pass by reference + typedef const XMVECTOR& CXMVECTOR; + + //------------------------------------------------------------------------------ + // Conversion types for constants + XM_ALIGNED_STRUCT(16) XMVECTORF32 + { + union + { + float f[4]; + XMVECTOR v; + }; + + inline operator XMVECTOR() const noexcept { return v; } + inline operator const float* () const noexcept { return f; } +#if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_) + inline operator __m128i() const noexcept { return _mm_castps_si128(v); } + inline operator __m128d() const noexcept { return _mm_castps_pd(v); } +#endif + }; + + XM_ALIGNED_STRUCT(16) XMVECTORI32 + { + union + { + int32_t i[4]; + XMVECTOR v; + }; + + inline operator XMVECTOR() const noexcept { return v; } +#if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_) + inline operator __m128i() const noexcept { return _mm_castps_si128(v); } + inline operator __m128d() const noexcept { return _mm_castps_pd(v); } +#endif + }; + + XM_ALIGNED_STRUCT(16) XMVECTORU8 + { + union + { + uint8_t u[16]; + XMVECTOR v; + }; + + inline operator XMVECTOR() const noexcept { return v; } +#if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_) + inline operator __m128i() const noexcept { return _mm_castps_si128(v); } + inline operator __m128d() const noexcept { return _mm_castps_pd(v); } +#endif + }; + + XM_ALIGNED_STRUCT(16) XMVECTORU32 + { + union + { + uint32_t u[4]; + XMVECTOR v; + }; + + inline operator XMVECTOR() const noexcept { return v; } +#if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_) + inline operator __m128i() const noexcept { return _mm_castps_si128(v); } + inline operator __m128d() const noexcept { return _mm_castps_pd(v); } +#endif + }; + + //------------------------------------------------------------------------------ + // Vector operators + +#ifndef _XM_NO_XMVECTOR_OVERLOADS_ + XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV operator- (FXMVECTOR V) noexcept; + + XMVECTOR& XM_CALLCONV operator+= (XMVECTOR& V1, FXMVECTOR V2) noexcept; + XMVECTOR& XM_CALLCONV operator-= (XMVECTOR& V1, FXMVECTOR V2) noexcept; + XMVECTOR& XM_CALLCONV operator*= (XMVECTOR& V1, FXMVECTOR V2) noexcept; + XMVECTOR& XM_CALLCONV operator/= (XMVECTOR& V1, FXMVECTOR V2) noexcept; + + XMVECTOR& operator*= (XMVECTOR& V, float S) noexcept; + XMVECTOR& operator/= (XMVECTOR& V, float S) noexcept; + + XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV operator- (FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV operator* (FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV operator/ (FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV operator* (FXMVECTOR V, float S) noexcept; + XMVECTOR XM_CALLCONV operator* (float S, FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV operator/ (FXMVECTOR V, float S) noexcept; +#endif /* !_XM_NO_XMVECTOR_OVERLOADS_ */ + + //------------------------------------------------------------------------------ + // Matrix type: Sixteen 32 bit floating point components aligned on a + // 16 byte boundary and mapped to four hardware vector registers + + struct XMMATRIX; + + // Fix-up for (1st) XMMATRIX parameter to pass in-register for ARM64 and vector call; by reference otherwise +#if ( defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || _XM_VECTORCALL_ || __aarch64__ ) && !defined(_XM_NO_INTRINSICS_) + typedef const XMMATRIX FXMMATRIX; +#else + typedef const XMMATRIX& FXMMATRIX; +#endif + + // Fix-up for (2nd+) XMMATRIX parameters to pass by reference + typedef const XMMATRIX& CXMMATRIX; + +#ifdef _XM_NO_INTRINSICS_ + struct XMMATRIX +#else + XM_ALIGNED_STRUCT(16) XMMATRIX +#endif + { +#ifdef _XM_NO_INTRINSICS_ + union + { + XMVECTOR r[4]; + struct + { + float _11, _12, _13, _14; + float _21, _22, _23, _24; + float _31, _32, _33, _34; + float _41, _42, _43, _44; + }; + float m[4][4]; + }; +#else + XMVECTOR r[4]; +#endif + + XMMATRIX() = default; + + XMMATRIX(const XMMATRIX&) = default; + +#if defined(_MSC_VER) && (_MSC_FULL_VER < 191426431) + XMMATRIX& operator= (const XMMATRIX& M) noexcept { r[0] = M.r[0]; r[1] = M.r[1]; r[2] = M.r[2]; r[3] = M.r[3]; return *this; } +#else + XMMATRIX& operator=(const XMMATRIX&) = default; + + XMMATRIX(XMMATRIX&&) = default; + XMMATRIX& operator=(XMMATRIX&&) = default; +#endif + + constexpr XMMATRIX(FXMVECTOR R0, FXMVECTOR R1, FXMVECTOR R2, CXMVECTOR R3) noexcept : r{ R0,R1,R2,R3 } {} + XMMATRIX(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33) noexcept; + explicit XMMATRIX(_In_reads_(16) const float* pArray) noexcept; + +#ifdef _XM_NO_INTRINSICS_ + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } +#endif + + XMMATRIX operator+ () const noexcept { return *this; } + XMMATRIX operator- () const noexcept; + + XMMATRIX& XM_CALLCONV operator+= (FXMMATRIX M) noexcept; + XMMATRIX& XM_CALLCONV operator-= (FXMMATRIX M) noexcept; + XMMATRIX& XM_CALLCONV operator*= (FXMMATRIX M) noexcept; + XMMATRIX& operator*= (float S) noexcept; + XMMATRIX& operator/= (float S) noexcept; + + XMMATRIX XM_CALLCONV operator+ (FXMMATRIX M) const noexcept; + XMMATRIX XM_CALLCONV operator- (FXMMATRIX M) const noexcept; + XMMATRIX XM_CALLCONV operator* (FXMMATRIX M) const noexcept; + XMMATRIX operator* (float S) const noexcept; + XMMATRIX operator/ (float S) const noexcept; + + friend XMMATRIX XM_CALLCONV operator* (float S, FXMMATRIX M) noexcept; + }; + + //------------------------------------------------------------------------------ + // 2D Vector; 32 bit floating point components + struct XMFLOAT2 + { + float x; + float y; + + XMFLOAT2() = default; + + XMFLOAT2(const XMFLOAT2&) = default; + XMFLOAT2& operator=(const XMFLOAT2&) = default; + + XMFLOAT2(XMFLOAT2&&) = default; + XMFLOAT2& operator=(XMFLOAT2&&) = default; + + constexpr XMFLOAT2(float _x, float _y) noexcept : x(_x), y(_y) {} + explicit XMFLOAT2(_In_reads_(2) const float* pArray) noexcept : x(pArray[0]), y(pArray[1]) {} + }; + + // 2D Vector; 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT2A : public XMFLOAT2 + { + XMFLOAT2A() = default; + + XMFLOAT2A(const XMFLOAT2A&) = default; + XMFLOAT2A& operator=(const XMFLOAT2A&) = default; + + XMFLOAT2A(XMFLOAT2A&&) = default; + XMFLOAT2A& operator=(XMFLOAT2A&&) = default; + + constexpr XMFLOAT2A(float _x, float _y) noexcept : XMFLOAT2(_x, _y) {} + explicit XMFLOAT2A(_In_reads_(2) const float* pArray) noexcept : XMFLOAT2(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 2D Vector; 32 bit signed integer components + struct XMINT2 + { + int32_t x; + int32_t y; + + XMINT2() = default; + + XMINT2(const XMINT2&) = default; + XMINT2& operator=(const XMINT2&) = default; + + XMINT2(XMINT2&&) = default; + XMINT2& operator=(XMINT2&&) = default; + + constexpr XMINT2(int32_t _x, int32_t _y) noexcept : x(_x), y(_y) {} + explicit XMINT2(_In_reads_(2) const int32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]) {} + }; + + // 2D Vector; 32 bit unsigned integer components + struct XMUINT2 + { + uint32_t x; + uint32_t y; + + XMUINT2() = default; + + XMUINT2(const XMUINT2&) = default; + XMUINT2& operator=(const XMUINT2&) = default; + + XMUINT2(XMUINT2&&) = default; + XMUINT2& operator=(XMUINT2&&) = default; + + constexpr XMUINT2(uint32_t _x, uint32_t _y) noexcept : x(_x), y(_y) {} + explicit XMUINT2(_In_reads_(2) const uint32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]) {} + }; + + //------------------------------------------------------------------------------ + // 3D Vector; 32 bit floating point components + struct XMFLOAT3 + { + float x; + float y; + float z; + + XMFLOAT3() = default; + + XMFLOAT3(const XMFLOAT3&) = default; + XMFLOAT3& operator=(const XMFLOAT3&) = default; + + XMFLOAT3(XMFLOAT3&&) = default; + XMFLOAT3& operator=(XMFLOAT3&&) = default; + + constexpr XMFLOAT3(float _x, float _y, float _z) noexcept : x(_x), y(_y), z(_z) {} + explicit XMFLOAT3(_In_reads_(3) const float* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]) {} + }; + + // 3D Vector; 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT3A : public XMFLOAT3 + { + XMFLOAT3A() = default; + + XMFLOAT3A(const XMFLOAT3A&) = default; + XMFLOAT3A& operator=(const XMFLOAT3A&) = default; + + XMFLOAT3A(XMFLOAT3A&&) = default; + XMFLOAT3A& operator=(XMFLOAT3A&&) = default; + + constexpr XMFLOAT3A(float _x, float _y, float _z) noexcept : XMFLOAT3(_x, _y, _z) {} + explicit XMFLOAT3A(_In_reads_(3) const float* pArray) noexcept : XMFLOAT3(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 3D Vector; 32 bit signed integer components + struct XMINT3 + { + int32_t x; + int32_t y; + int32_t z; + + XMINT3() = default; + + XMINT3(const XMINT3&) = default; + XMINT3& operator=(const XMINT3&) = default; + + XMINT3(XMINT3&&) = default; + XMINT3& operator=(XMINT3&&) = default; + + constexpr XMINT3(int32_t _x, int32_t _y, int32_t _z) noexcept : x(_x), y(_y), z(_z) {} + explicit XMINT3(_In_reads_(3) const int32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]) {} + }; + + // 3D Vector; 32 bit unsigned integer components + struct XMUINT3 + { + uint32_t x; + uint32_t y; + uint32_t z; + + XMUINT3() = default; + + XMUINT3(const XMUINT3&) = default; + XMUINT3& operator=(const XMUINT3&) = default; + + XMUINT3(XMUINT3&&) = default; + XMUINT3& operator=(XMUINT3&&) = default; + + constexpr XMUINT3(uint32_t _x, uint32_t _y, uint32_t _z) noexcept : x(_x), y(_y), z(_z) {} + explicit XMUINT3(_In_reads_(3) const uint32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]) {} + }; + + //------------------------------------------------------------------------------ + // 4D Vector; 32 bit floating point components + struct XMFLOAT4 + { + float x; + float y; + float z; + float w; + + XMFLOAT4() = default; + + XMFLOAT4(const XMFLOAT4&) = default; + XMFLOAT4& operator=(const XMFLOAT4&) = default; + + XMFLOAT4(XMFLOAT4&&) = default; + XMFLOAT4& operator=(XMFLOAT4&&) = default; + + constexpr XMFLOAT4(float _x, float _y, float _z, float _w) noexcept : x(_x), y(_y), z(_z), w(_w) {} + explicit XMFLOAT4(_In_reads_(4) const float* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {} + }; + + // 4D Vector; 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT4A : public XMFLOAT4 + { + XMFLOAT4A() = default; + + XMFLOAT4A(const XMFLOAT4A&) = default; + XMFLOAT4A& operator=(const XMFLOAT4A&) = default; + + XMFLOAT4A(XMFLOAT4A&&) = default; + XMFLOAT4A& operator=(XMFLOAT4A&&) = default; + + constexpr XMFLOAT4A(float _x, float _y, float _z, float _w) noexcept : XMFLOAT4(_x, _y, _z, _w) {} + explicit XMFLOAT4A(_In_reads_(4) const float* pArray) noexcept : XMFLOAT4(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 4D Vector; 32 bit signed integer components + struct XMINT4 + { + int32_t x; + int32_t y; + int32_t z; + int32_t w; + + XMINT4() = default; + + XMINT4(const XMINT4&) = default; + XMINT4& operator=(const XMINT4&) = default; + + XMINT4(XMINT4&&) = default; + XMINT4& operator=(XMINT4&&) = default; + + constexpr XMINT4(int32_t _x, int32_t _y, int32_t _z, int32_t _w) noexcept : x(_x), y(_y), z(_z), w(_w) {} + explicit XMINT4(_In_reads_(4) const int32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {} + }; + + // 4D Vector; 32 bit unsigned integer components + struct XMUINT4 + { + uint32_t x; + uint32_t y; + uint32_t z; + uint32_t w; + + XMUINT4() = default; + + XMUINT4(const XMUINT4&) = default; + XMUINT4& operator=(const XMUINT4&) = default; + + XMUINT4(XMUINT4&&) = default; + XMUINT4& operator=(XMUINT4&&) = default; + + constexpr XMUINT4(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w) noexcept : x(_x), y(_y), z(_z), w(_w) {} + explicit XMUINT4(_In_reads_(4) const uint32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {} + }; + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +#pragma clang diagnostic ignored "-Wnested-anon-types" +#endif + + //------------------------------------------------------------------------------ + // 3x3 Matrix: 32 bit floating point components + struct XMFLOAT3X3 + { + union + { + struct + { + float _11, _12, _13; + float _21, _22, _23; + float _31, _32, _33; + }; + float m[3][3]; + }; + + XMFLOAT3X3() = default; + + XMFLOAT3X3(const XMFLOAT3X3&) = default; + XMFLOAT3X3& operator=(const XMFLOAT3X3&) = default; + + XMFLOAT3X3(XMFLOAT3X3&&) = default; + XMFLOAT3X3& operator=(XMFLOAT3X3&&) = default; + + constexpr XMFLOAT3X3(float m00, float m01, float m02, + float m10, float m11, float m12, + float m20, float m21, float m22) noexcept + : _11(m00), _12(m01), _13(m02), + _21(m10), _22(m11), _23(m12), + _31(m20), _32(m21), _33(m22) {} + explicit XMFLOAT3X3(_In_reads_(9) const float* pArray) noexcept; + + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } + }; + + //------------------------------------------------------------------------------ + // 4x3 Row-major Matrix: 32 bit floating point components + struct XMFLOAT4X3 + { + union + { + struct + { + float _11, _12, _13; + float _21, _22, _23; + float _31, _32, _33; + float _41, _42, _43; + }; + float m[4][3]; + float f[12]; + }; + + XMFLOAT4X3() = default; + + XMFLOAT4X3(const XMFLOAT4X3&) = default; + XMFLOAT4X3& operator=(const XMFLOAT4X3&) = default; + + XMFLOAT4X3(XMFLOAT4X3&&) = default; + XMFLOAT4X3& operator=(XMFLOAT4X3&&) = default; + + constexpr XMFLOAT4X3(float m00, float m01, float m02, + float m10, float m11, float m12, + float m20, float m21, float m22, + float m30, float m31, float m32) noexcept + : _11(m00), _12(m01), _13(m02), + _21(m10), _22(m11), _23(m12), + _31(m20), _32(m21), _33(m22), + _41(m30), _42(m31), _43(m32) {} + explicit XMFLOAT4X3(_In_reads_(12) const float* pArray) noexcept; + + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } + }; + + // 4x3 Row-major Matrix: 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT4X3A : public XMFLOAT4X3 + { + XMFLOAT4X3A() = default; + + XMFLOAT4X3A(const XMFLOAT4X3A&) = default; + XMFLOAT4X3A& operator=(const XMFLOAT4X3A&) = default; + + XMFLOAT4X3A(XMFLOAT4X3A&&) = default; + XMFLOAT4X3A& operator=(XMFLOAT4X3A&&) = default; + + constexpr XMFLOAT4X3A(float m00, float m01, float m02, + float m10, float m11, float m12, + float m20, float m21, float m22, + float m30, float m31, float m32) noexcept : + XMFLOAT4X3(m00, m01, m02, m10, m11, m12, m20, m21, m22, m30, m31, m32) {} + explicit XMFLOAT4X3A(_In_reads_(12) const float* pArray) noexcept : XMFLOAT4X3(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 3x4 Column-major Matrix: 32 bit floating point components + struct XMFLOAT3X4 + { + union + { + struct + { + float _11, _12, _13, _14; + float _21, _22, _23, _24; + float _31, _32, _33, _34; + }; + float m[3][4]; + float f[12]; + }; + + XMFLOAT3X4() = default; + + XMFLOAT3X4(const XMFLOAT3X4&) = default; + XMFLOAT3X4& operator=(const XMFLOAT3X4&) = default; + + XMFLOAT3X4(XMFLOAT3X4&&) = default; + XMFLOAT3X4& operator=(XMFLOAT3X4&&) = default; + + constexpr XMFLOAT3X4(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23) noexcept + : _11(m00), _12(m01), _13(m02), _14(m03), + _21(m10), _22(m11), _23(m12), _24(m13), + _31(m20), _32(m21), _33(m22), _34(m23) {} + explicit XMFLOAT3X4(_In_reads_(12) const float* pArray) noexcept; + + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } + }; + + // 3x4 Column-major Matrix: 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT3X4A : public XMFLOAT3X4 + { + XMFLOAT3X4A() = default; + + XMFLOAT3X4A(const XMFLOAT3X4A&) = default; + XMFLOAT3X4A& operator=(const XMFLOAT3X4A&) = default; + + XMFLOAT3X4A(XMFLOAT3X4A&&) = default; + XMFLOAT3X4A& operator=(XMFLOAT3X4A&&) = default; + + constexpr XMFLOAT3X4A(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23) noexcept : + XMFLOAT3X4(m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23) {} + explicit XMFLOAT3X4A(_In_reads_(12) const float* pArray) noexcept : XMFLOAT3X4(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 4x4 Matrix: 32 bit floating point components + struct XMFLOAT4X4 + { + union + { + struct + { + float _11, _12, _13, _14; + float _21, _22, _23, _24; + float _31, _32, _33, _34; + float _41, _42, _43, _44; + }; + float m[4][4]; + }; + + XMFLOAT4X4() = default; + + XMFLOAT4X4(const XMFLOAT4X4&) = default; + XMFLOAT4X4& operator=(const XMFLOAT4X4&) = default; + + XMFLOAT4X4(XMFLOAT4X4&&) = default; + XMFLOAT4X4& operator=(XMFLOAT4X4&&) = default; + + constexpr XMFLOAT4X4(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33) noexcept + : _11(m00), _12(m01), _13(m02), _14(m03), + _21(m10), _22(m11), _23(m12), _24(m13), + _31(m20), _32(m21), _33(m22), _34(m23), + _41(m30), _42(m31), _43(m32), _44(m33) {} + explicit XMFLOAT4X4(_In_reads_(16) const float* pArray) noexcept; + + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } + }; + + // 4x4 Matrix: 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT4X4A : public XMFLOAT4X4 + { + XMFLOAT4X4A() = default; + + XMFLOAT4X4A(const XMFLOAT4X4A&) = default; + XMFLOAT4X4A& operator=(const XMFLOAT4X4A&) = default; + + XMFLOAT4X4A(XMFLOAT4X4A&&) = default; + XMFLOAT4X4A& operator=(XMFLOAT4X4A&&) = default; + + constexpr XMFLOAT4X4A(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33) noexcept + : XMFLOAT4X4(m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23, m30, m31, m32, m33) {} + explicit XMFLOAT4X4A(_In_reads_(16) const float* pArray) noexcept : XMFLOAT4X4(pArray) {} + }; + + //////////////////////////////////////////////////////////////////////////////// + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +#pragma warning(pop) + +/**************************************************************************** + * + * Data conversion operations + * + ****************************************************************************/ + + XMVECTOR XM_CALLCONV XMConvertVectorIntToFloat(FXMVECTOR VInt, uint32_t DivExponent) noexcept; + XMVECTOR XM_CALLCONV XMConvertVectorFloatToInt(FXMVECTOR VFloat, uint32_t MulExponent) noexcept; + XMVECTOR XM_CALLCONV XMConvertVectorUIntToFloat(FXMVECTOR VUInt, uint32_t DivExponent) noexcept; + XMVECTOR XM_CALLCONV XMConvertVectorFloatToUInt(FXMVECTOR VFloat, uint32_t MulExponent) noexcept; + +#if defined(__XNAMATH_H__) && defined(XMVectorSetBinaryConstant) +#undef XMVectorSetBinaryConstant +#undef XMVectorSplatConstant +#undef XMVectorSplatConstantInt +#endif + + XMVECTOR XM_CALLCONV XMVectorSetBinaryConstant(uint32_t C0, uint32_t C1, uint32_t C2, uint32_t C3) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatConstant(int32_t IntConstant, uint32_t DivExponent) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatConstantInt(int32_t IntConstant) noexcept; + + /**************************************************************************** + * + * Load operations + * + ****************************************************************************/ + + XMVECTOR XM_CALLCONV XMLoadInt(_In_ const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat(_In_ const float* pSource) noexcept; + + XMVECTOR XM_CALLCONV XMLoadInt2(_In_reads_(2) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadInt2A(_In_reads_(2) const uint32_t* PSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat2(_In_ const XMFLOAT2* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat2A(_In_ const XMFLOAT2A* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadSInt2(_In_ const XMINT2* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadUInt2(_In_ const XMUINT2* pSource) noexcept; + + XMVECTOR XM_CALLCONV XMLoadInt3(_In_reads_(3) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadInt3A(_In_reads_(3) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat3(_In_ const XMFLOAT3* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat3A(_In_ const XMFLOAT3A* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadSInt3(_In_ const XMINT3* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadUInt3(_In_ const XMUINT3* pSource) noexcept; + + XMVECTOR XM_CALLCONV XMLoadInt4(_In_reads_(4) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadInt4A(_In_reads_(4) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat4(_In_ const XMFLOAT4* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat4A(_In_ const XMFLOAT4A* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadSInt4(_In_ const XMINT4* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadUInt4(_In_ const XMUINT4* pSource) noexcept; + + XMMATRIX XM_CALLCONV XMLoadFloat3x3(_In_ const XMFLOAT3X3* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat4x3(_In_ const XMFLOAT4X3* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat4x3A(_In_ const XMFLOAT4X3A* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat3x4(_In_ const XMFLOAT3X4* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat3x4A(_In_ const XMFLOAT3X4A* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat4x4(_In_ const XMFLOAT4X4* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat4x4A(_In_ const XMFLOAT4X4A* pSource) noexcept; + + /**************************************************************************** + * + * Store operations + * + ****************************************************************************/ + + void XM_CALLCONV XMStoreInt(_Out_ uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat(_Out_ float* pDestination, _In_ FXMVECTOR V) noexcept; + + void XM_CALLCONV XMStoreInt2(_Out_writes_(2) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreInt2A(_Out_writes_(2) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat2(_Out_ XMFLOAT2* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat2A(_Out_ XMFLOAT2A* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreSInt2(_Out_ XMINT2* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreUInt2(_Out_ XMUINT2* pDestination, _In_ FXMVECTOR V) noexcept; + + void XM_CALLCONV XMStoreInt3(_Out_writes_(3) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreInt3A(_Out_writes_(3) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat3(_Out_ XMFLOAT3* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat3A(_Out_ XMFLOAT3A* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreSInt3(_Out_ XMINT3* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreUInt3(_Out_ XMUINT3* pDestination, _In_ FXMVECTOR V) noexcept; + + void XM_CALLCONV XMStoreInt4(_Out_writes_(4) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreInt4A(_Out_writes_(4) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat4(_Out_ XMFLOAT4* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat4A(_Out_ XMFLOAT4A* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreSInt4(_Out_ XMINT4* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreUInt4(_Out_ XMUINT4* pDestination, _In_ FXMVECTOR V) noexcept; + + void XM_CALLCONV XMStoreFloat3x3(_Out_ XMFLOAT3X3* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat4x3(_Out_ XMFLOAT4X3* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat4x3A(_Out_ XMFLOAT4X3A* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat3x4(_Out_ XMFLOAT3X4* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat3x4A(_Out_ XMFLOAT3X4A* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat4x4(_Out_ XMFLOAT4X4* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat4x4A(_Out_ XMFLOAT4X4A* pDestination, _In_ FXMMATRIX M) noexcept; + + /**************************************************************************** + * + * General vector operations + * + ****************************************************************************/ + + XMVECTOR XM_CALLCONV XMVectorZero() noexcept; + XMVECTOR XM_CALLCONV XMVectorSet(float x, float y, float z, float w) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetInt(uint32_t x, uint32_t y, uint32_t z, uint32_t w) noexcept; + XMVECTOR XM_CALLCONV XMVectorReplicate(float Value) noexcept; + XMVECTOR XM_CALLCONV XMVectorReplicatePtr(_In_ const float* pValue) noexcept; + XMVECTOR XM_CALLCONV XMVectorReplicateInt(uint32_t Value) noexcept; + XMVECTOR XM_CALLCONV XMVectorReplicateIntPtr(_In_ const uint32_t* pValue) noexcept; + XMVECTOR XM_CALLCONV XMVectorTrueInt() noexcept; + XMVECTOR XM_CALLCONV XMVectorFalseInt() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatX(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatY(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatZ(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatW(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatOne() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatInfinity() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatQNaN() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatEpsilon() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatSignMask() noexcept; + + float XM_CALLCONV XMVectorGetByIndex(FXMVECTOR V, size_t i) noexcept; + float XM_CALLCONV XMVectorGetX(FXMVECTOR V) noexcept; + float XM_CALLCONV XMVectorGetY(FXMVECTOR V) noexcept; + float XM_CALLCONV XMVectorGetZ(FXMVECTOR V) noexcept; + float XM_CALLCONV XMVectorGetW(FXMVECTOR V) noexcept; + + void XM_CALLCONV XMVectorGetByIndexPtr(_Out_ float* f, _In_ FXMVECTOR V, _In_ size_t i) noexcept; + void XM_CALLCONV XMVectorGetXPtr(_Out_ float* x, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetYPtr(_Out_ float* y, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetZPtr(_Out_ float* z, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetWPtr(_Out_ float* w, _In_ FXMVECTOR V) noexcept; + + uint32_t XM_CALLCONV XMVectorGetIntByIndex(FXMVECTOR V, size_t i) noexcept; + uint32_t XM_CALLCONV XMVectorGetIntX(FXMVECTOR V) noexcept; + uint32_t XM_CALLCONV XMVectorGetIntY(FXMVECTOR V) noexcept; + uint32_t XM_CALLCONV XMVectorGetIntZ(FXMVECTOR V) noexcept; + uint32_t XM_CALLCONV XMVectorGetIntW(FXMVECTOR V) noexcept; + + void XM_CALLCONV XMVectorGetIntByIndexPtr(_Out_ uint32_t* x, _In_ FXMVECTOR V, _In_ size_t i) noexcept; + void XM_CALLCONV XMVectorGetIntXPtr(_Out_ uint32_t* x, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetIntYPtr(_Out_ uint32_t* y, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetIntZPtr(_Out_ uint32_t* z, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetIntWPtr(_Out_ uint32_t* w, _In_ FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVectorSetByIndex(FXMVECTOR V, float f, size_t i) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetX(FXMVECTOR V, float x) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetY(FXMVECTOR V, float y) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetZ(FXMVECTOR V, float z) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetW(FXMVECTOR V, float w) noexcept; + + XMVECTOR XM_CALLCONV XMVectorSetByIndexPtr(_In_ FXMVECTOR V, _In_ const float* f, _In_ size_t i) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetXPtr(_In_ FXMVECTOR V, _In_ const float* x) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetYPtr(_In_ FXMVECTOR V, _In_ const float* y) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetZPtr(_In_ FXMVECTOR V, _In_ const float* z) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetWPtr(_In_ FXMVECTOR V, _In_ const float* w) noexcept; + + XMVECTOR XM_CALLCONV XMVectorSetIntByIndex(FXMVECTOR V, uint32_t x, size_t i) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntX(FXMVECTOR V, uint32_t x) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntY(FXMVECTOR V, uint32_t y) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntZ(FXMVECTOR V, uint32_t z) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntW(FXMVECTOR V, uint32_t w) noexcept; + + XMVECTOR XM_CALLCONV XMVectorSetIntByIndexPtr(_In_ FXMVECTOR V, _In_ const uint32_t* x, _In_ size_t i) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntXPtr(_In_ FXMVECTOR V, _In_ const uint32_t* x) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntYPtr(_In_ FXMVECTOR V, _In_ const uint32_t* y) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntZPtr(_In_ FXMVECTOR V, _In_ const uint32_t* z) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntWPtr(_In_ FXMVECTOR V, _In_ const uint32_t* w) noexcept; + +#if defined(__XNAMATH_H__) && defined(XMVectorSwizzle) +#undef XMVectorSwizzle +#endif + + XMVECTOR XM_CALLCONV XMVectorSwizzle(FXMVECTOR V, uint32_t E0, uint32_t E1, uint32_t E2, uint32_t E3) noexcept; + XMVECTOR XM_CALLCONV XMVectorPermute(FXMVECTOR V1, FXMVECTOR V2, uint32_t PermuteX, uint32_t PermuteY, uint32_t PermuteZ, uint32_t PermuteW) noexcept; + XMVECTOR XM_CALLCONV XMVectorSelectControl(uint32_t VectorIndex0, uint32_t VectorIndex1, uint32_t VectorIndex2, uint32_t VectorIndex3) noexcept; + XMVECTOR XM_CALLCONV XMVectorSelect(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Control) noexcept; + XMVECTOR XM_CALLCONV XMVectorMergeXY(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorMergeZW(FXMVECTOR V1, FXMVECTOR V2) noexcept; + +#if defined(__XNAMATH_H__) && defined(XMVectorShiftLeft) +#undef XMVectorShiftLeft +#undef XMVectorRotateLeft +#undef XMVectorRotateRight +#undef XMVectorInsert +#endif + + XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2, uint32_t Elements) noexcept; + XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V, uint32_t Elements) noexcept; + XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V, uint32_t Elements) noexcept; + XMVECTOR XM_CALLCONV XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS, uint32_t VSLeftRotateElements, + uint32_t Select0, uint32_t Select1, uint32_t Select2, uint32_t Select3) noexcept; + + XMVECTOR XM_CALLCONV XMVectorEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorEqualR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorEqualIntR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V, _In_ FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorNearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon) noexcept; + XMVECTOR XM_CALLCONV XMVectorNotEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorNotEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorGreater(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorGreaterR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorGreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorGreaterOrEqualR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorLess(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorLessOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorInBounds(FXMVECTOR V, FXMVECTOR Bounds) noexcept; + XMVECTOR XM_CALLCONV XMVectorInBoundsR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V, _In_ FXMVECTOR Bounds) noexcept; + + XMVECTOR XM_CALLCONV XMVectorIsNaN(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorIsInfinite(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVectorMin(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorMax(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorRound(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorTruncate(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorFloor(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorCeiling(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorClamp(FXMVECTOR V, FXMVECTOR Min, FXMVECTOR Max) noexcept; + XMVECTOR XM_CALLCONV XMVectorSaturate(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVectorAndInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorAndCInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorOrInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorNorInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorXorInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + + XMVECTOR XM_CALLCONV XMVectorNegate(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorAdd(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorSum(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorAddAngles(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorSubtract(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorSubtractAngles(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorMultiply(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorMultiplyAdd(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3) noexcept; + XMVECTOR XM_CALLCONV XMVectorDivide(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorNegativeMultiplySubtract(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3) noexcept; + XMVECTOR XM_CALLCONV XMVectorScale(FXMVECTOR V, float ScaleFactor) noexcept; + XMVECTOR XM_CALLCONV XMVectorReciprocalEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorReciprocal(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSqrtEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSqrt(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorReciprocalSqrtEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorReciprocalSqrt(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorExp2(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorExpE(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorExp(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorLog2(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorLogE(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorLog(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorPow(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorAbs(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorMod(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorModAngles(FXMVECTOR Angles) noexcept; + XMVECTOR XM_CALLCONV XMVectorSin(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSinEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorCos(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorCosEst(FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorSinCos(_Out_ XMVECTOR* pSin, _Out_ XMVECTOR* pCos, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorSinCosEst(_Out_ XMVECTOR* pSin, _Out_ XMVECTOR* pCos, _In_ FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorTan(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorTanEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSinH(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorCosH(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorTanH(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorASin(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorASinEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorACos(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorACosEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorATan(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorATanEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorATan2(FXMVECTOR Y, FXMVECTOR X) noexcept; + XMVECTOR XM_CALLCONV XMVectorATan2Est(FXMVECTOR Y, FXMVECTOR X) noexcept; + XMVECTOR XM_CALLCONV XMVectorLerp(FXMVECTOR V0, FXMVECTOR V1, float t) noexcept; + XMVECTOR XM_CALLCONV XMVectorLerpV(FXMVECTOR V0, FXMVECTOR V1, FXMVECTOR T) noexcept; + XMVECTOR XM_CALLCONV XMVectorHermite(FXMVECTOR Position0, FXMVECTOR Tangent0, FXMVECTOR Position1, GXMVECTOR Tangent1, float t) noexcept; + XMVECTOR XM_CALLCONV XMVectorHermiteV(FXMVECTOR Position0, FXMVECTOR Tangent0, FXMVECTOR Position1, GXMVECTOR Tangent1, HXMVECTOR T) noexcept; + XMVECTOR XM_CALLCONV XMVectorCatmullRom(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR Position3, float t) noexcept; + XMVECTOR XM_CALLCONV XMVectorCatmullRomV(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR Position3, HXMVECTOR T) noexcept; + XMVECTOR XM_CALLCONV XMVectorBaryCentric(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, float f, float g) noexcept; + XMVECTOR XM_CALLCONV XMVectorBaryCentricV(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR F, HXMVECTOR G) noexcept; + + /**************************************************************************** + * + * 2D vector operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMVector2Equal(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector2EqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2EqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector2EqualIntR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon) noexcept; + bool XM_CALLCONV XMVector2NotEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2NotEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2Greater(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector2GreaterR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector2GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2Less(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2LessOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2InBounds(FXMVECTOR V, FXMVECTOR Bounds) noexcept; + + bool XM_CALLCONV XMVector2IsNaN(FXMVECTOR V) noexcept; + bool XM_CALLCONV XMVector2IsInfinite(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVector2Dot(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector2Cross(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector2LengthSq(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2ReciprocalLengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2ReciprocalLength(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2LengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2Length(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2NormalizeEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2Normalize(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2ClampLength(FXMVECTOR V, float LengthMin, float LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector2ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector2Reflect(FXMVECTOR Incident, FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMVector2Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector2RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector2Orthogonal(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector2AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector2LinePointDistance(FXMVECTOR LinePoint1, FXMVECTOR LinePoint2, FXMVECTOR Point) noexcept; + XMVECTOR XM_CALLCONV XMVector2IntersectLine(FXMVECTOR Line1Point1, FXMVECTOR Line1Point2, FXMVECTOR Line2Point1, GXMVECTOR Line2Point2) noexcept; + XMVECTOR XM_CALLCONV XMVector2Transform(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT4* XM_CALLCONV XMVector2TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4) + OutputStride * (VectorCount - 1)) XMFLOAT4* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT2) + InputStride * (VectorCount - 1)) const XMFLOAT2* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector2TransformCoord(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT2* XM_CALLCONV XMVector2TransformCoordStream(_Out_writes_bytes_(sizeof(XMFLOAT2) + OutputStride * (VectorCount - 1)) XMFLOAT2* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT2) + InputStride * (VectorCount - 1)) const XMFLOAT2* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector2TransformNormal(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT2* XM_CALLCONV XMVector2TransformNormalStream(_Out_writes_bytes_(sizeof(XMFLOAT2) + OutputStride * (VectorCount - 1)) XMFLOAT2* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT2) + InputStride * (VectorCount - 1)) const XMFLOAT2* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + + /**************************************************************************** + * + * 3D vector operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMVector3Equal(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector3EqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3EqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector3EqualIntR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon) noexcept; + bool XM_CALLCONV XMVector3NotEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3NotEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3Greater(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector3GreaterR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector3GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3Less(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3LessOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3InBounds(FXMVECTOR V, FXMVECTOR Bounds) noexcept; + + bool XM_CALLCONV XMVector3IsNaN(FXMVECTOR V) noexcept; + bool XM_CALLCONV XMVector3IsInfinite(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVector3Dot(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector3Cross(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector3LengthSq(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3ReciprocalLengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3ReciprocalLength(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3LengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3Length(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3NormalizeEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3Normalize(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3ClampLength(FXMVECTOR V, float LengthMin, float LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector3ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector3Reflect(FXMVECTOR Incident, FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMVector3Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector3RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector3Orthogonal(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector3AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector3LinePointDistance(FXMVECTOR LinePoint1, FXMVECTOR LinePoint2, FXMVECTOR Point) noexcept; + void XM_CALLCONV XMVector3ComponentsFromNormal(_Out_ XMVECTOR* pParallel, _Out_ XMVECTOR* pPerpendicular, _In_ FXMVECTOR V, _In_ FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMVector3Rotate(FXMVECTOR V, FXMVECTOR RotationQuaternion) noexcept; + XMVECTOR XM_CALLCONV XMVector3InverseRotate(FXMVECTOR V, FXMVECTOR RotationQuaternion) noexcept; + XMVECTOR XM_CALLCONV XMVector3Transform(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT4* XM_CALLCONV XMVector3TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4) + OutputStride * (VectorCount - 1)) XMFLOAT4* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector3TransformCoord(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT3* XM_CALLCONV XMVector3TransformCoordStream(_Out_writes_bytes_(sizeof(XMFLOAT3) + OutputStride * (VectorCount - 1)) XMFLOAT3* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector3TransformNormal(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT3* XM_CALLCONV XMVector3TransformNormalStream(_Out_writes_bytes_(sizeof(XMFLOAT3) + OutputStride * (VectorCount - 1)) XMFLOAT3* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector3Project(FXMVECTOR V, float ViewportX, float ViewportY, float ViewportWidth, float ViewportHeight, float ViewportMinZ, float ViewportMaxZ, + FXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World) noexcept; + XMFLOAT3* XM_CALLCONV XMVector3ProjectStream(_Out_writes_bytes_(sizeof(XMFLOAT3) + OutputStride * (VectorCount - 1)) XMFLOAT3* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, + _In_ float ViewportX, _In_ float ViewportY, _In_ float ViewportWidth, _In_ float ViewportHeight, _In_ float ViewportMinZ, _In_ float ViewportMaxZ, + _In_ FXMMATRIX Projection, _In_ CXMMATRIX View, _In_ CXMMATRIX World) noexcept; + XMVECTOR XM_CALLCONV XMVector3Unproject(FXMVECTOR V, float ViewportX, float ViewportY, float ViewportWidth, float ViewportHeight, float ViewportMinZ, float ViewportMaxZ, + FXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World) noexcept; + XMFLOAT3* XM_CALLCONV XMVector3UnprojectStream(_Out_writes_bytes_(sizeof(XMFLOAT3) + OutputStride * (VectorCount - 1)) XMFLOAT3* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, + _In_ float ViewportX, _In_ float ViewportY, _In_ float ViewportWidth, _In_ float ViewportHeight, _In_ float ViewportMinZ, _In_ float ViewportMaxZ, + _In_ FXMMATRIX Projection, _In_ CXMMATRIX View, _In_ CXMMATRIX World) noexcept; + + /**************************************************************************** + * + * 4D vector operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMVector4Equal(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector4EqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4EqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector4EqualIntR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon) noexcept; + bool XM_CALLCONV XMVector4NotEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4NotEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4Greater(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector4GreaterR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector4GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4Less(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4LessOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4InBounds(FXMVECTOR V, FXMVECTOR Bounds) noexcept; + + bool XM_CALLCONV XMVector4IsNaN(FXMVECTOR V) noexcept; + bool XM_CALLCONV XMVector4IsInfinite(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVector4Dot(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector4Cross(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3) noexcept; + XMVECTOR XM_CALLCONV XMVector4LengthSq(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4ReciprocalLengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4ReciprocalLength(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4LengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4Length(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4NormalizeEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4Normalize(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4ClampLength(FXMVECTOR V, float LengthMin, float LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector4ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector4Reflect(FXMVECTOR Incident, FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMVector4Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector4RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector4Orthogonal(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector4AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector4Transform(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT4* XM_CALLCONV XMVector4TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4) + OutputStride * (VectorCount - 1)) XMFLOAT4* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT4) + InputStride * (VectorCount - 1)) const XMFLOAT4* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + + /**************************************************************************** + * + * Matrix operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMMatrixIsNaN(FXMMATRIX M) noexcept; + bool XM_CALLCONV XMMatrixIsInfinite(FXMMATRIX M) noexcept; + bool XM_CALLCONV XMMatrixIsIdentity(FXMMATRIX M) noexcept; + + XMMATRIX XM_CALLCONV XMMatrixMultiply(FXMMATRIX M1, CXMMATRIX M2) noexcept; + XMMATRIX XM_CALLCONV XMMatrixMultiplyTranspose(FXMMATRIX M1, CXMMATRIX M2) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTranspose(FXMMATRIX M) noexcept; + XMMATRIX XM_CALLCONV XMMatrixInverse(_Out_opt_ XMVECTOR* pDeterminant, _In_ FXMMATRIX M) noexcept; + XMMATRIX XM_CALLCONV XMMatrixVectorTensorProduct(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMMatrixDeterminant(FXMMATRIX M) noexcept; + + _Success_(return) + bool XM_CALLCONV XMMatrixDecompose(_Out_ XMVECTOR* outScale, _Out_ XMVECTOR* outRotQuat, _Out_ XMVECTOR* outTrans, _In_ FXMMATRIX M) noexcept; + + XMMATRIX XM_CALLCONV XMMatrixIdentity() noexcept; + XMMATRIX XM_CALLCONV XMMatrixSet(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTranslation(float OffsetX, float OffsetY, float OffsetZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTranslationFromVector(FXMVECTOR Offset) noexcept; + XMMATRIX XM_CALLCONV XMMatrixScaling(float ScaleX, float ScaleY, float ScaleZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixScalingFromVector(FXMVECTOR Scale) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationX(float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationY(float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationZ(float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYaw(float Pitch, float Yaw, float Roll) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYawFromVector(FXMVECTOR Angles) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationNormal(FXMVECTOR NormalAxis, float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationAxis(FXMVECTOR Axis, float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationQuaternion(FXMVECTOR Quaternion) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTransformation2D(FXMVECTOR ScalingOrigin, float ScalingOrientation, FXMVECTOR Scaling, + FXMVECTOR RotationOrigin, float Rotation, GXMVECTOR Translation) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTransformation(FXMVECTOR ScalingOrigin, FXMVECTOR ScalingOrientationQuaternion, FXMVECTOR Scaling, + GXMVECTOR RotationOrigin, HXMVECTOR RotationQuaternion, HXMVECTOR Translation) noexcept; + XMMATRIX XM_CALLCONV XMMatrixAffineTransformation2D(FXMVECTOR Scaling, FXMVECTOR RotationOrigin, float Rotation, FXMVECTOR Translation) noexcept; + XMMATRIX XM_CALLCONV XMMatrixAffineTransformation(FXMVECTOR Scaling, FXMVECTOR RotationOrigin, FXMVECTOR RotationQuaternion, GXMVECTOR Translation) noexcept; + XMMATRIX XM_CALLCONV XMMatrixReflect(FXMVECTOR ReflectionPlane) noexcept; + XMMATRIX XM_CALLCONV XMMatrixShadow(FXMVECTOR ShadowPlane, FXMVECTOR LightPosition) noexcept; + + XMMATRIX XM_CALLCONV XMMatrixLookAtLH(FXMVECTOR EyePosition, FXMVECTOR FocusPosition, FXMVECTOR UpDirection) noexcept; + XMMATRIX XM_CALLCONV XMMatrixLookAtRH(FXMVECTOR EyePosition, FXMVECTOR FocusPosition, FXMVECTOR UpDirection) noexcept; + XMMATRIX XM_CALLCONV XMMatrixLookToLH(FXMVECTOR EyePosition, FXMVECTOR EyeDirection, FXMVECTOR UpDirection) noexcept; + XMMATRIX XM_CALLCONV XMMatrixLookToRH(FXMVECTOR EyePosition, FXMVECTOR EyeDirection, FXMVECTOR UpDirection) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveLH(float ViewWidth, float ViewHeight, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveRH(float ViewWidth, float ViewHeight, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovLH(float FovAngleY, float AspectRatio, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovRH(float FovAngleY, float AspectRatio, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterLH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterRH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixOrthographicLH(float ViewWidth, float ViewHeight, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixOrthographicRH(float ViewWidth, float ViewHeight, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterLH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterRH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ) noexcept; + + + /**************************************************************************** + * + * Quaternion operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMQuaternionEqual(FXMVECTOR Q1, FXMVECTOR Q2) noexcept; + bool XM_CALLCONV XMQuaternionNotEqual(FXMVECTOR Q1, FXMVECTOR Q2) noexcept; + + bool XM_CALLCONV XMQuaternionIsNaN(FXMVECTOR Q) noexcept; + bool XM_CALLCONV XMQuaternionIsInfinite(FXMVECTOR Q) noexcept; + bool XM_CALLCONV XMQuaternionIsIdentity(FXMVECTOR Q) noexcept; + + XMVECTOR XM_CALLCONV XMQuaternionDot(FXMVECTOR Q1, FXMVECTOR Q2) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionMultiply(FXMVECTOR Q1, FXMVECTOR Q2) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionLengthSq(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionReciprocalLength(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionLength(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionNormalizeEst(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionNormalize(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionConjugate(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionInverse(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionLn(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionExp(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionSlerp(FXMVECTOR Q0, FXMVECTOR Q1, float t) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionSlerpV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR T) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionSquad(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR Q3, float t) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionSquadV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR Q3, HXMVECTOR T) noexcept; + void XM_CALLCONV XMQuaternionSquadSetup(_Out_ XMVECTOR* pA, _Out_ XMVECTOR* pB, _Out_ XMVECTOR* pC, _In_ FXMVECTOR Q0, _In_ FXMVECTOR Q1, _In_ FXMVECTOR Q2, _In_ GXMVECTOR Q3) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionBaryCentric(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, float f, float g) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionBaryCentricV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR F, HXMVECTOR G) noexcept; + + XMVECTOR XM_CALLCONV XMQuaternionIdentity() noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYaw(float Pitch, float Yaw, float Roll) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYawFromVector(FXMVECTOR Angles) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationNormal(FXMVECTOR NormalAxis, float Angle) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationAxis(FXMVECTOR Axis, float Angle) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationMatrix(FXMMATRIX M) noexcept; + + void XM_CALLCONV XMQuaternionToAxisAngle(_Out_ XMVECTOR* pAxis, _Out_ float* pAngle, _In_ FXMVECTOR Q) noexcept; + + /**************************************************************************** + * + * Plane operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMPlaneEqual(FXMVECTOR P1, FXMVECTOR P2) noexcept; + bool XM_CALLCONV XMPlaneNearEqual(FXMVECTOR P1, FXMVECTOR P2, FXMVECTOR Epsilon) noexcept; + bool XM_CALLCONV XMPlaneNotEqual(FXMVECTOR P1, FXMVECTOR P2) noexcept; + + bool XM_CALLCONV XMPlaneIsNaN(FXMVECTOR P) noexcept; + bool XM_CALLCONV XMPlaneIsInfinite(FXMVECTOR P) noexcept; + + XMVECTOR XM_CALLCONV XMPlaneDot(FXMVECTOR P, FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMPlaneDotCoord(FXMVECTOR P, FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMPlaneDotNormal(FXMVECTOR P, FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMPlaneNormalizeEst(FXMVECTOR P) noexcept; + XMVECTOR XM_CALLCONV XMPlaneNormalize(FXMVECTOR P) noexcept; + XMVECTOR XM_CALLCONV XMPlaneIntersectLine(FXMVECTOR P, FXMVECTOR LinePoint1, FXMVECTOR LinePoint2) noexcept; + void XM_CALLCONV XMPlaneIntersectPlane(_Out_ XMVECTOR* pLinePoint1, _Out_ XMVECTOR* pLinePoint2, _In_ FXMVECTOR P1, _In_ FXMVECTOR P2) noexcept; + XMVECTOR XM_CALLCONV XMPlaneTransform(FXMVECTOR P, FXMMATRIX M) noexcept; + XMFLOAT4* XM_CALLCONV XMPlaneTransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4) + OutputStride * (PlaneCount - 1)) XMFLOAT4* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT4) + InputStride * (PlaneCount - 1)) const XMFLOAT4* pInputStream, + _In_ size_t InputStride, _In_ size_t PlaneCount, _In_ FXMMATRIX M) noexcept; + + XMVECTOR XM_CALLCONV XMPlaneFromPointNormal(FXMVECTOR Point, FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMPlaneFromPoints(FXMVECTOR Point1, FXMVECTOR Point2, FXMVECTOR Point3) noexcept; + + /**************************************************************************** + * + * Color operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMColorEqual(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorNotEqual(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorGreater(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorGreaterOrEqual(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorLess(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorLessOrEqual(FXMVECTOR C1, FXMVECTOR C2) noexcept; + + bool XM_CALLCONV XMColorIsNaN(FXMVECTOR C) noexcept; + bool XM_CALLCONV XMColorIsInfinite(FXMVECTOR C) noexcept; + + XMVECTOR XM_CALLCONV XMColorNegative(FXMVECTOR C) noexcept; + XMVECTOR XM_CALLCONV XMColorModulate(FXMVECTOR C1, FXMVECTOR C2) noexcept; + XMVECTOR XM_CALLCONV XMColorAdjustSaturation(FXMVECTOR C, float Saturation) noexcept; + XMVECTOR XM_CALLCONV XMColorAdjustContrast(FXMVECTOR C, float Contrast) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToHSL(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorHSLToRGB(FXMVECTOR hsl) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToHSV(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorHSVToRGB(FXMVECTOR hsv) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToYUV(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorYUVToRGB(FXMVECTOR yuv) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToYUV_HD(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorYUVToRGB_HD(FXMVECTOR yuv) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToXYZ(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorXYZToRGB(FXMVECTOR xyz) noexcept; + + XMVECTOR XM_CALLCONV XMColorXYZToSRGB(FXMVECTOR xyz) noexcept; + XMVECTOR XM_CALLCONV XMColorSRGBToXYZ(FXMVECTOR srgb) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToSRGB(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorSRGBToRGB(FXMVECTOR srgb) noexcept; + + + /**************************************************************************** + * + * Miscellaneous operations + * + ****************************************************************************/ + + bool XMVerifyCPUSupport() noexcept; + + XMVECTOR XM_CALLCONV XMFresnelTerm(FXMVECTOR CosIncidentAngle, FXMVECTOR RefractionIndex) noexcept; + + bool XMScalarNearEqual(float S1, float S2, float Epsilon) noexcept; + float XMScalarModAngle(float Value) noexcept; + + float XMScalarSin(float Value) noexcept; + float XMScalarSinEst(float Value) noexcept; + + float XMScalarCos(float Value) noexcept; + float XMScalarCosEst(float Value) noexcept; + + void XMScalarSinCos(_Out_ float* pSin, _Out_ float* pCos, float Value) noexcept; + void XMScalarSinCosEst(_Out_ float* pSin, _Out_ float* pCos, float Value) noexcept; + + float XMScalarASin(float Value) noexcept; + float XMScalarASinEst(float Value) noexcept; + + float XMScalarACos(float Value) noexcept; + float XMScalarACosEst(float Value) noexcept; + + /**************************************************************************** + * + * Templates + * + ****************************************************************************/ + +#if defined(__XNAMATH_H__) && defined(XMMin) +#undef XMMin +#undef XMMax +#endif + + template inline T XMMin(T a, T b) { return (a < b) ? a : b; } + template inline T XMMax(T a, T b) { return (a > b) ? a : b; } + + //------------------------------------------------------------------------------ + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + +// PermuteHelper internal template (SSE only) + namespace Internal + { + // Slow path fallback for permutes that do not map to a single SSE shuffle opcode. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2) noexcept + { + static const XMVECTORU32 selectMask = + { { { + WhichX ? 0xFFFFFFFF : 0, + WhichY ? 0xFFFFFFFF : 0, + WhichZ ? 0xFFFFFFFF : 0, + WhichW ? 0xFFFFFFFF : 0, + } } }; + + XMVECTOR shuffled1 = XM_PERMUTE_PS(v1, Shuffle); + XMVECTOR shuffled2 = XM_PERMUTE_PS(v2, Shuffle); + + XMVECTOR masked1 = _mm_andnot_ps(selectMask, shuffled1); + XMVECTOR masked2 = _mm_and_ps(selectMask, shuffled2); + + return _mm_or_ps(masked1, masked2); + } + }; + + // Fast path for permutes that only read from the first vector. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR) noexcept { return XM_PERMUTE_PS(v1, Shuffle); } + }; + + // Fast path for permutes that only read from the second vector. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR, FXMVECTOR v2) noexcept { return XM_PERMUTE_PS(v2, Shuffle); } + }; + + // Fast path for permutes that read XY from the first vector, ZW from the second. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2) noexcept { return _mm_shuffle_ps(v1, v2, Shuffle); } + }; + + // Fast path for permutes that read XY from the second vector, ZW from the first. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2) noexcept { return _mm_shuffle_ps(v2, v1, Shuffle); } + }; + } + +#endif // _XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_ + + // General permute template + template + inline XMVECTOR XM_CALLCONV XMVectorPermute(FXMVECTOR V1, FXMVECTOR V2) noexcept + { + static_assert(PermuteX <= 7, "PermuteX template parameter out of range"); + static_assert(PermuteY <= 7, "PermuteY template parameter out of range"); + static_assert(PermuteZ <= 7, "PermuteZ template parameter out of range"); + static_assert(PermuteW <= 7, "PermuteW template parameter out of range"); + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + const uint32_t Shuffle = _MM_SHUFFLE(PermuteW & 3, PermuteZ & 3, PermuteY & 3, PermuteX & 3); + + const bool WhichX = PermuteX > 3; + const bool WhichY = PermuteY > 3; + const bool WhichZ = PermuteZ > 3; + const bool WhichW = PermuteW > 3; + + return Internal::PermuteHelper::Permute(V1, V2); +#else + + return XMVectorPermute(V1, V2, PermuteX, PermuteY, PermuteZ, PermuteW); + +#endif + } + + // Special-case permute templates + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 2, 3>(FXMVECTOR V1, FXMVECTOR) noexcept { return V1; } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 5, 6, 7>(FXMVECTOR, FXMVECTOR V2) noexcept { return V2; } + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_movelh_ps(V1, V2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<6, 7, 2, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_movehl_ps(V1, V2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 4, 1, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_unpacklo_ps(V1, V2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 6, 3, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_unpackhi_ps(V1, V2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(V1), _mm_castps_pd(V2))); } +#endif + +#if defined(_XM_SSE4_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 1, 2, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x1); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 5, 2, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 5, 2, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x3); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 6, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x4); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 1, 6, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x5); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 5, 6, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x6); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 5, 6, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x7); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 2, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x8); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 1, 2, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x9); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 5, 2, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xA); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 5, 2, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xB); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xC); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 1, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xD); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 5, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xE); } +#endif + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + + // If the indices are all in the range 0-3 or 4-7, then use XMVectorSwizzle instead + // The mirror cases are not spelled out here as the programmer can always swap the arguments + // (i.e. prefer permutes where the X element comes from the V1 vector instead of the V2 vector) + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_low_f32(V1), vget_low_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 0, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V1)), vget_low_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 5, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_low_f32(V1), vrev64_f32(vget_low_f32(V2))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 0, 5, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V1)), vrev64_f32(vget_low_f32(V2))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_high_f32(V1), vget_high_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 2, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V1)), vget_high_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 7, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_high_f32(V1), vrev64_f32(vget_high_f32(V2))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 2, 7, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V1)), vrev64_f32(vget_high_f32(V2))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_low_f32(V1), vget_high_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 0, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V1)), vget_high_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 7, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_low_f32(V1), vrev64_f32(vget_high_f32(V2))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 0, 7, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V1)), vrev64_f32(vget_high_f32(V2))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 2, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V1)), vget_low_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 5, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_high_f32(V1), vrev64_f32(vget_low_f32(V2))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 2, 5, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V1)), vrev64_f32(vget_low_f32(V2))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 4, 2, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vtrnq_f32(V1, V2).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 5, 3, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vtrnq_f32(V1, V2).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 4, 1, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vzipq_f32(V1, V2).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 6, 3, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vzipq_f32(V1, V2).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 2, 4, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vuzpq_f32(V1, V2).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 3, 5, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vuzpq_f32(V1, V2).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 2, 3, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vextq_f32(V1, V2, 1); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vextq_f32(V1, V2, 2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 4, 5, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vextq_f32(V1, V2, 3); } + +#endif // _XM_ARM_NEON_INTRINSICS_ && !_XM_NO_INTRINSICS_ + + //------------------------------------------------------------------------------ + + // General swizzle template + template + inline XMVECTOR XM_CALLCONV XMVectorSwizzle(FXMVECTOR V) noexcept + { + static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range"); + static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range"); + static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range"); + static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range"); + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(SwizzleW, SwizzleZ, SwizzleY, SwizzleX)); +#else + + return XMVectorSwizzle(V, SwizzleX, SwizzleY, SwizzleZ, SwizzleW); + +#endif + } + + // Specialized swizzles + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 1, 2, 3>(FXMVECTOR V) noexcept { return V; } + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 1, 0, 1>(FXMVECTOR V) noexcept { return _mm_movelh_ps(V, V); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 3, 2, 3>(FXMVECTOR V) noexcept { return _mm_movehl_ps(V, V); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 1, 1>(FXMVECTOR V) noexcept { return _mm_unpacklo_ps(V, V); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 2, 3, 3>(FXMVECTOR V) noexcept { return _mm_unpackhi_ps(V, V); } +#endif + +#if defined(_XM_SSE3_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 2, 2>(FXMVECTOR V) noexcept { return _mm_moveldup_ps(V); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 1, 3, 3>(FXMVECTOR V) noexcept { return _mm_movehdup_ps(V); } +#endif + +#if defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) && defined(_XM_FAVOR_INTEL_) + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 0, 0>(FXMVECTOR V) noexcept { return _mm_broadcastss_ps(V); } +#endif + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 0, 0>(FXMVECTOR V) noexcept { return vdupq_lane_f32(vget_low_f32(V), 0); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 1, 1, 1>(FXMVECTOR V) noexcept { return vdupq_lane_f32(vget_low_f32(V), 1); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 2, 2, 2>(FXMVECTOR V) noexcept { return vdupq_lane_f32(vget_high_f32(V), 0); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 3, 3, 3>(FXMVECTOR V) noexcept { return vdupq_lane_f32(vget_high_f32(V), 1); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 0, 3, 2>(FXMVECTOR V) noexcept { return vrev64q_f32(V); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 1, 0, 1>(FXMVECTOR V) noexcept { float32x2_t vt = vget_low_f32(V); return vcombine_f32(vt, vt); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 3, 2, 3>(FXMVECTOR V) noexcept { float32x2_t vt = vget_high_f32(V); return vcombine_f32(vt, vt); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 0, 1, 0>(FXMVECTOR V) noexcept { float32x2_t vt = vrev64_f32(vget_low_f32(V)); return vcombine_f32(vt, vt); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 2, 3, 2>(FXMVECTOR V) noexcept { float32x2_t vt = vrev64_f32(vget_high_f32(V)); return vcombine_f32(vt, vt); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 1, 3, 2>(FXMVECTOR V) noexcept { return vcombine_f32(vget_low_f32(V), vrev64_f32(vget_high_f32(V))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 0, 2, 3>(FXMVECTOR V) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V)), vget_high_f32(V)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 3, 1, 0>(FXMVECTOR V) noexcept { return vcombine_f32(vget_high_f32(V), vrev64_f32(vget_low_f32(V))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 2, 0, 1>(FXMVECTOR V) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V)), vget_low_f32(V)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 2, 1, 0>(FXMVECTOR V) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V)), vrev64_f32(vget_low_f32(V))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 2, 2>(FXMVECTOR V) noexcept { return vtrnq_f32(V, V).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 1, 3, 3>(FXMVECTOR V) noexcept { return vtrnq_f32(V, V).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 1, 1>(FXMVECTOR V) noexcept { return vzipq_f32(V, V).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 2, 3, 3>(FXMVECTOR V) noexcept { return vzipq_f32(V, V).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 2, 0, 2>(FXMVECTOR V) noexcept { return vuzpq_f32(V, V).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 3, 1, 3>(FXMVECTOR V) noexcept { return vuzpq_f32(V, V).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 2, 3, 0>(FXMVECTOR V) noexcept { return vextq_f32(V, V, 1); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 3, 0, 1>(FXMVECTOR V) noexcept { return vextq_f32(V, V, 2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 0, 1, 2>(FXMVECTOR V) noexcept { return vextq_f32(V, V, 3); } + +#endif // _XM_ARM_NEON_INTRINSICS_ && !_XM_NO_INTRINSICS_ + + //------------------------------------------------------------------------------ + + template + inline XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2) noexcept + { + static_assert(Elements < 4, "Elements template parameter out of range"); + return XMVectorPermute(V1, V2); + } + + template + inline XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V) noexcept + { + static_assert(Elements < 4, "Elements template parameter out of range"); + return XMVectorSwizzle(V); + } + + template + inline XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V) noexcept + { + static_assert(Elements < 4, "Elements template parameter out of range"); + return XMVectorSwizzle<(4 - Elements) & 3, (5 - Elements) & 3, (6 - Elements) & 3, (7 - Elements) & 3>(V); + } + + template + inline XMVECTOR XM_CALLCONV XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS) noexcept + { + XMVECTOR Control = XMVectorSelectControl(Select0 & 1, Select1 & 1, Select2 & 1, Select3 & 1); + return XMVectorSelect(VD, XMVectorRotateLeft(VS), Control); + } + + /**************************************************************************** + * + * Globals + * + ****************************************************************************/ + + // The purpose of the following global constants is to prevent redundant + // reloading of the constants when they are referenced by more than one + // separate inline math routine called within the same function. Declaring + // a constant locally within a routine is sufficient to prevent redundant + // reloads of that constant when that single routine is called multiple + // times in a function, but if the constant is used (and declared) in a + // separate math routine it would be reloaded. + +#ifndef XMGLOBALCONST +#if defined(__GNUC__) && !defined(__MINGW32__) +#define XMGLOBALCONST extern const __attribute__((weak)) +#else +#define XMGLOBALCONST extern const __declspec(selectany) +#endif +#endif + + XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients0 = { { { -0.16666667f, +0.0083333310f, -0.00019840874f, +2.7525562e-06f } } }; + XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients1 = { { { -2.3889859e-08f, -0.16665852f /*Est1*/, +0.0083139502f /*Est2*/, -0.00018524670f /*Est3*/ } } }; + XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients0 = { { { -0.5f, +0.041666638f, -0.0013888378f, +2.4760495e-05f } } }; + XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients1 = { { { -2.6051615e-07f, -0.49992746f /*Est1*/, +0.041493919f /*Est2*/, -0.0012712436f /*Est3*/ } } }; + XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients0 = { { { 1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f } } }; + XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients1 = { { { 2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f } } }; + XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients2 = { { { 5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f } } }; + XMGLOBALCONST XMVECTORF32 g_XMArcCoefficients0 = { { { +1.5707963050f, -0.2145988016f, +0.0889789874f, -0.0501743046f } } }; + XMGLOBALCONST XMVECTORF32 g_XMArcCoefficients1 = { { { +0.0308918810f, -0.0170881256f, +0.0066700901f, -0.0012624911f } } }; + XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients0 = { { { -0.3333314528f, +0.1999355085f, -0.1420889944f, +0.1065626393f } } }; + XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients1 = { { { -0.0752896400f, +0.0429096138f, -0.0161657367f, +0.0028662257f } } }; + XMGLOBALCONST XMVECTORF32 g_XMATanEstCoefficients0 = { { { +0.999866f, +0.999866f, +0.999866f, +0.999866f } } }; + XMGLOBALCONST XMVECTORF32 g_XMATanEstCoefficients1 = { { { -0.3302995f, +0.180141f, -0.085133f, +0.0208351f } } }; + XMGLOBALCONST XMVECTORF32 g_XMTanEstCoefficients = { { { 2.484f, -1.954923183e-1f, 2.467401101f, XM_1DIVPI } } }; + XMGLOBALCONST XMVECTORF32 g_XMArcEstCoefficients = { { { +1.5707288f, -0.2121144f, +0.0742610f, -0.0187293f } } }; + XMGLOBALCONST XMVECTORF32 g_XMPiConstants0 = { { { XM_PI, XM_2PI, XM_1DIVPI, XM_1DIV2PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMIdentityR0 = { { { 1.0f, 0.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMIdentityR1 = { { { 0.0f, 1.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMIdentityR2 = { { { 0.0f, 0.0f, 1.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMIdentityR3 = { { { 0.0f, 0.0f, 0.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR0 = { { { -1.0f, 0.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR1 = { { { 0.0f, -1.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR2 = { { { 0.0f, 0.0f, -1.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR3 = { { { 0.0f, 0.0f, 0.0f, -1.0f } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegativeZero = { { { 0x80000000, 0x80000000, 0x80000000, 0x80000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegate3 = { { { 0x80000000, 0x80000000, 0x80000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskXY = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMask3 = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskX = { { { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskY = { { { 0x00000000, 0xFFFFFFFF, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskZ = { { { 0x00000000, 0x00000000, 0xFFFFFFFF, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskW = { { { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF } } }; + XMGLOBALCONST XMVECTORF32 g_XMOne = { { { 1.0f, 1.0f, 1.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMOne3 = { { { 1.0f, 1.0f, 1.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMZero = { { { 0.0f, 0.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMTwo = { { { 2.f, 2.f, 2.f, 2.f } } }; + XMGLOBALCONST XMVECTORF32 g_XMFour = { { { 4.f, 4.f, 4.f, 4.f } } }; + XMGLOBALCONST XMVECTORF32 g_XMSix = { { { 6.f, 6.f, 6.f, 6.f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegativeOne = { { { -1.0f, -1.0f, -1.0f, -1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMOneHalf = { { { 0.5f, 0.5f, 0.5f, 0.5f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegativeOneHalf = { { { -0.5f, -0.5f, -0.5f, -0.5f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegativeTwoPi = { { { -XM_2PI, -XM_2PI, -XM_2PI, -XM_2PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegativePi = { { { -XM_PI, -XM_PI, -XM_PI, -XM_PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMHalfPi = { { { XM_PIDIV2, XM_PIDIV2, XM_PIDIV2, XM_PIDIV2 } } }; + XMGLOBALCONST XMVECTORF32 g_XMPi = { { { XM_PI, XM_PI, XM_PI, XM_PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMReciprocalPi = { { { XM_1DIVPI, XM_1DIVPI, XM_1DIVPI, XM_1DIVPI } } }; + XMGLOBALCONST XMVECTORF32 g_XMTwoPi = { { { XM_2PI, XM_2PI, XM_2PI, XM_2PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMReciprocalTwoPi = { { { XM_1DIV2PI, XM_1DIV2PI, XM_1DIV2PI, XM_1DIV2PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMEpsilon = { { { 1.192092896e-7f, 1.192092896e-7f, 1.192092896e-7f, 1.192092896e-7f } } }; + XMGLOBALCONST XMVECTORI32 g_XMInfinity = { { { 0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMQNaN = { { { 0x7FC00000, 0x7FC00000, 0x7FC00000, 0x7FC00000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMQNaNTest = { { { 0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF } } }; + XMGLOBALCONST XMVECTORI32 g_XMAbsMask = { { { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF } } }; + XMGLOBALCONST XMVECTORI32 g_XMFltMin = { { { 0x00800000, 0x00800000, 0x00800000, 0x00800000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMFltMax = { { { 0x7F7FFFFF, 0x7F7FFFFF, 0x7F7FFFFF, 0x7F7FFFFF } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegOneMask = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskA8R8G8B8 = { { { 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipA8R8G8B8 = { { { 0x00000000, 0x00000000, 0x00000000, 0x80000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixAA8R8G8B8 = { { { 0.0f, 0.0f, 0.0f, float(0x80000000U) } } }; + XMGLOBALCONST XMVECTORF32 g_XMNormalizeA8R8G8B8 = { { { 1.0f / (255.0f * float(0x10000)), 1.0f / (255.0f * float(0x100)), 1.0f / 255.0f, 1.0f / (255.0f * float(0x1000000)) } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskA2B10G10R10 = { { { 0x000003FF, 0x000FFC00, 0x3FF00000, 0xC0000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipA2B10G10R10 = { { { 0x00000200, 0x00080000, 0x20000000, 0x80000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixAA2B10G10R10 = { { { -512.0f, -512.0f * float(0x400), -512.0f * float(0x100000), float(0x80000000U) } } }; + XMGLOBALCONST XMVECTORF32 g_XMNormalizeA2B10G10R10 = { { { 1.0f / 511.0f, 1.0f / (511.0f * float(0x400)), 1.0f / (511.0f * float(0x100000)), 1.0f / (3.0f * float(0x40000000)) } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskX16Y16 = { { { 0x0000FFFF, 0xFFFF0000, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMFlipX16Y16 = { { { 0x00008000, 0x00000000, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixX16Y16 = { { { -32768.0f, 0.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNormalizeX16Y16 = { { { 1.0f / 32767.0f, 1.0f / (32767.0f * 65536.0f), 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskX16Y16Z16W16 = { { { 0x0000FFFF, 0x0000FFFF, 0xFFFF0000, 0xFFFF0000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMFlipX16Y16Z16W16 = { { { 0x00008000, 0x00008000, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixX16Y16Z16W16 = { { { -32768.0f, -32768.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNormalizeX16Y16Z16W16 = { { { 1.0f / 32767.0f, 1.0f / 32767.0f, 1.0f / (32767.0f * 65536.0f), 1.0f / (32767.0f * 65536.0f) } } }; + XMGLOBALCONST XMVECTORF32 g_XMNoFraction = { { { 8388608.0f, 8388608.0f, 8388608.0f, 8388608.0f } } }; + XMGLOBALCONST XMVECTORI32 g_XMMaskByte = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegateX = { { { -1.0f, 1.0f, 1.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegateY = { { { 1.0f, -1.0f, 1.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegateZ = { { { 1.0f, 1.0f, -1.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegateW = { { { 1.0f, 1.0f, 1.0f, -1.0f } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect0101 = { { { XM_SELECT_0, XM_SELECT_1, XM_SELECT_0, XM_SELECT_1 } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1010 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_1, XM_SELECT_0 } } }; + XMGLOBALCONST XMVECTORI32 g_XMOneHalfMinusEpsilon = { { { 0x3EFFFFFD, 0x3EFFFFFD, 0x3EFFFFFD, 0x3EFFFFFD } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1000 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_0, XM_SELECT_0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1100 = { { { XM_SELECT_1, XM_SELECT_1, XM_SELECT_0, XM_SELECT_0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1110 = { { { XM_SELECT_1, XM_SELECT_1, XM_SELECT_1, XM_SELECT_0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1011 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_1, XM_SELECT_1 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixupY16 = { { { 1.0f, 1.0f / 65536.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixupY16W16 = { { { 1.0f, 1.0f, 1.0f / 65536.0f, 1.0f / 65536.0f } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipY = { { { 0, 0x80000000, 0, 0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipZ = { { { 0, 0, 0x80000000, 0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipW = { { { 0, 0, 0, 0x80000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipYZ = { { { 0, 0x80000000, 0x80000000, 0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipZW = { { { 0, 0, 0x80000000, 0x80000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipYW = { { { 0, 0x80000000, 0, 0x80000000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMMaskDec4 = { { { 0x3FF, 0x3FF << 10, 0x3FF << 20, static_cast(0xC0000000) } } }; + XMGLOBALCONST XMVECTORI32 g_XMXorDec4 = { { { 0x200, 0x200 << 10, 0x200 << 20, 0 } } }; + XMGLOBALCONST XMVECTORF32 g_XMAddUDec4 = { { { 0, 0, 0, 32768.0f * 65536.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMAddDec4 = { { { -512.0f, -512.0f * 1024.0f, -512.0f * 1024.0f * 1024.0f, 0 } } }; + XMGLOBALCONST XMVECTORF32 g_XMMulDec4 = { { { 1.0f, 1.0f / 1024.0f, 1.0f / (1024.0f * 1024.0f), 1.0f / (1024.0f * 1024.0f * 1024.0f) } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskByte4 = { { { 0xFF, 0xFF00, 0xFF0000, 0xFF000000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMXorByte4 = { { { 0x80, 0x8000, 0x800000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMAddByte4 = { { { -128.0f, -128.0f * 256.0f, -128.0f * 65536.0f, 0 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixUnsigned = { { { 32768.0f * 65536.0f, 32768.0f * 65536.0f, 32768.0f * 65536.0f, 32768.0f * 65536.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMMaxInt = { { { 65536.0f * 32768.0f - 128.0f, 65536.0f * 32768.0f - 128.0f, 65536.0f * 32768.0f - 128.0f, 65536.0f * 32768.0f - 128.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMMaxUInt = { { { 65536.0f * 65536.0f - 256.0f, 65536.0f * 65536.0f - 256.0f, 65536.0f * 65536.0f - 256.0f, 65536.0f * 65536.0f - 256.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMUnsignedFix = { { { 32768.0f * 65536.0f, 32768.0f * 65536.0f, 32768.0f * 65536.0f, 32768.0f * 65536.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMsrgbScale = { { { 12.92f, 12.92f, 12.92f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMsrgbA = { { { 0.055f, 0.055f, 0.055f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMsrgbA1 = { { { 1.055f, 1.055f, 1.055f, 1.0f } } }; + XMGLOBALCONST XMVECTORI32 g_XMExponentBias = { { { 127, 127, 127, 127 } } }; + XMGLOBALCONST XMVECTORI32 g_XMSubnormalExponent = { { { -126, -126, -126, -126 } } }; + XMGLOBALCONST XMVECTORI32 g_XMNumTrailing = { { { 23, 23, 23, 23 } } }; + XMGLOBALCONST XMVECTORI32 g_XMMinNormal = { { { 0x00800000, 0x00800000, 0x00800000, 0x00800000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegInfinity = { { { 0xFF800000, 0xFF800000, 0xFF800000, 0xFF800000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegQNaN = { { { 0xFFC00000, 0xFFC00000, 0xFFC00000, 0xFFC00000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMBin128 = { { { 0x43000000, 0x43000000, 0x43000000, 0x43000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMBinNeg150 = { { { 0xC3160000, 0xC3160000, 0xC3160000, 0xC3160000 } } }; + XMGLOBALCONST XMVECTORI32 g_XM253 = { { { 253, 253, 253, 253 } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst1 = { { { -6.93147182e-1f, -6.93147182e-1f, -6.93147182e-1f, -6.93147182e-1f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst2 = { { { +2.40226462e-1f, +2.40226462e-1f, +2.40226462e-1f, +2.40226462e-1f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst3 = { { { -5.55036440e-2f, -5.55036440e-2f, -5.55036440e-2f, -5.55036440e-2f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst4 = { { { +9.61597636e-3f, +9.61597636e-3f, +9.61597636e-3f, +9.61597636e-3f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst5 = { { { -1.32823968e-3f, -1.32823968e-3f, -1.32823968e-3f, -1.32823968e-3f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst6 = { { { +1.47491097e-4f, +1.47491097e-4f, +1.47491097e-4f, +1.47491097e-4f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst7 = { { { -1.08635004e-5f, -1.08635004e-5f, -1.08635004e-5f, -1.08635004e-5f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst0 = { { { +1.442693f, +1.442693f, +1.442693f, +1.442693f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst1 = { { { -0.721242f, -0.721242f, -0.721242f, -0.721242f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst2 = { { { +0.479384f, +0.479384f, +0.479384f, +0.479384f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst3 = { { { -0.350295f, -0.350295f, -0.350295f, -0.350295f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst4 = { { { +0.248590f, +0.248590f, +0.248590f, +0.248590f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst5 = { { { -0.145700f, -0.145700f, -0.145700f, -0.145700f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst6 = { { { +0.057148f, +0.057148f, +0.057148f, +0.057148f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst7 = { { { -0.010578f, -0.010578f, -0.010578f, -0.010578f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLgE = { { { +1.442695f, +1.442695f, +1.442695f, +1.442695f } } }; + XMGLOBALCONST XMVECTORF32 g_XMInvLgE = { { { +6.93147182e-1f, +6.93147182e-1f, +6.93147182e-1f, +6.93147182e-1f } } }; + XMGLOBALCONST XMVECTORF32 g_UByteMax = { { { 255.0f, 255.0f, 255.0f, 255.0f } } }; + XMGLOBALCONST XMVECTORF32 g_ByteMin = { { { -127.0f, -127.0f, -127.0f, -127.0f } } }; + XMGLOBALCONST XMVECTORF32 g_ByteMax = { { { 127.0f, 127.0f, 127.0f, 127.0f } } }; + XMGLOBALCONST XMVECTORF32 g_ShortMin = { { { -32767.0f, -32767.0f, -32767.0f, -32767.0f } } }; + XMGLOBALCONST XMVECTORF32 g_ShortMax = { { { 32767.0f, 32767.0f, 32767.0f, 32767.0f } } }; + XMGLOBALCONST XMVECTORF32 g_UShortMax = { { { 65535.0f, 65535.0f, 65535.0f, 65535.0f } } }; + + /**************************************************************************** + * + * Implementation + * + ****************************************************************************/ + +#pragma warning(push) +#pragma warning(disable:4068 4214 4204 4365 4616 4640 6001 6101) + // C4068/4616: ignore unknown pragmas + // C4214/4204: nonstandard extension used + // C4365/4640: Off by default noise + // C6001/6101: False positives + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 25000, "FXMVECTOR is 16 bytes") +#pragma prefast(disable : 26495, "Union initialization confuses /analyze") +#endif + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wundefined-reinterpret-cast" +#endif + +//------------------------------------------------------------------------------ + + inline XMVECTOR XM_CALLCONV XMVectorSetBinaryConstant(uint32_t C0, uint32_t C1, uint32_t C2, uint32_t C3) noexcept + { +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = (0 - (C0 & 1)) & 0x3F800000; + vResult.u[1] = (0 - (C1 & 1)) & 0x3F800000; + vResult.u[2] = (0 - (C2 & 1)) & 0x3F800000; + vResult.u[3] = (0 - (C3 & 1)) & 0x3F800000; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = (0 - (C0 & 1)) & 0x3F800000; + vResult.u[1] = (0 - (C1 & 1)) & 0x3F800000; + vResult.u[2] = (0 - (C2 & 1)) & 0x3F800000; + vResult.u[3] = (0 - (C3 & 1)) & 0x3F800000; + return vResult.v; +#else // XM_SSE_INTRINSICS_ + static const XMVECTORU32 g_vMask1 = { { { 1, 1, 1, 1 } } }; + // Move the parms to a vector + __m128i vTemp = _mm_set_epi32(static_cast(C3), static_cast(C2), static_cast(C1), static_cast(C0)); + // Mask off the low bits + vTemp = _mm_and_si128(vTemp, g_vMask1); + // 0xFFFFFFFF on true bits + vTemp = _mm_cmpeq_epi32(vTemp, g_vMask1); + // 0xFFFFFFFF -> 1.0f, 0x00000000 -> 0.0f + vTemp = _mm_and_si128(vTemp, g_XMOne); + return _mm_castsi128_ps(vTemp); +#endif + } + + //------------------------------------------------------------------------------ + + inline XMVECTOR XM_CALLCONV XMVectorSplatConstant(int32_t IntConstant, uint32_t DivExponent) noexcept + { + assert(IntConstant >= -16 && IntConstant <= 15); + assert(DivExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + + using DirectX::XMConvertVectorIntToFloat; + + XMVECTORI32 V = { { { IntConstant, IntConstant, IntConstant, IntConstant } } }; + return XMConvertVectorIntToFloat(V.v, DivExponent); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Splat the int + int32x4_t vScale = vdupq_n_s32(IntConstant); + // Convert to a float + XMVECTOR vResult = vcvtq_f32_s32(vScale); + // Convert DivExponent into 1.0f/(1<(&vScale)[0]); + return vResult; +#else // XM_SSE_INTRINSICS_ + // Splat the int + __m128i vScale = _mm_set1_epi32(IntConstant); + // Convert to a float + XMVECTOR vResult = _mm_cvtepi32_ps(vScale); + // Convert DivExponent into 1.0f/(1<(uScale)); + // Multiply by the reciprocal (Perform a right shift by DivExponent) + vResult = _mm_mul_ps(vResult, _mm_castsi128_ps(vScale)); + return vResult; +#endif + } + + //------------------------------------------------------------------------------ + + inline XMVECTOR XM_CALLCONV XMVectorSplatConstantInt(int32_t IntConstant) noexcept + { + assert(IntConstant >= -16 && IntConstant <= 15); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORI32 V = { { { IntConstant, IntConstant, IntConstant, IntConstant } } }; + return V.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t V = vdupq_n_s32(IntConstant); + return reinterpret_cast(&V)[0]; +#else // XM_SSE_INTRINSICS_ + __m128i V = _mm_set1_epi32(IntConstant); + return _mm_castsi128_ps(V); +#endif + } + +#include "directxmath/directxmathconvert.inl" +#include "directxmath/directxmathvector.inl" +#include "directxmath/directxmathmatrix.inl" +#include "directxmath/directxmathmisc.inl" + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +#pragma warning(pop) + +} // namespace DirectX + +using namespace DirectX; diff --git a/include/directxmath/directxmathconvert.inl b/include/directxmath/directxmathconvert.inl new file mode 100644 index 0000000..dbf153c --- /dev/null +++ b/include/directxmath/directxmathconvert.inl @@ -0,0 +1,2187 @@ +//------------------------------------------------------------------------------------- +// DirectXMathConvert.inl -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +/**************************************************************************** + * + * Data conversion + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + +#pragma warning(push) +#pragma warning(disable:4701) +// C4701: false positives + +inline XMVECTOR XM_CALLCONV XMConvertVectorIntToFloat +( + FXMVECTOR VInt, + uint32_t DivExponent +) noexcept +{ + assert(DivExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + float fScale = 1.0f / static_cast(1U << DivExponent); + uint32_t ElementIndex = 0; + XMVECTOR Result; + do { + auto iTemp = static_cast(VInt.vector4_u32[ElementIndex]); + Result.vector4_f32[ElementIndex] = static_cast(iTemp)* fScale; + } while (++ElementIndex < 4); + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fScale = 1.0f / (float)(1U << DivExponent); + float32x4_t vResult = vcvtq_f32_s32(VInt); + return vmulq_n_f32(vResult, fScale); +#else // _XM_SSE_INTRINSICS_ + // Convert to floats + XMVECTOR vResult = _mm_cvtepi32_ps(_mm_castps_si128(VInt)); + // Convert DivExponent into 1.0f/(1<(uScale)); + vResult = _mm_mul_ps(vResult, _mm_castsi128_ps(vScale)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMConvertVectorFloatToInt +( + FXMVECTOR VFloat, + uint32_t MulExponent +) noexcept +{ + assert(MulExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + // Get the scalar factor. + auto fScale = static_cast(1U << MulExponent); + uint32_t ElementIndex = 0; + XMVECTOR Result; + do { + int32_t iResult; + float fTemp = VFloat.vector4_f32[ElementIndex] * fScale; + if (fTemp <= -(65536.0f * 32768.0f)) + { + iResult = (-0x7FFFFFFF) - 1; + } + else if (fTemp > (65536.0f * 32768.0f) - 128.0f) + { + iResult = 0x7FFFFFFF; + } + else { + iResult = static_cast(fTemp); + } + Result.vector4_u32[ElementIndex] = static_cast(iResult); + } while (++ElementIndex < 4); + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vResult = vmulq_n_f32(VFloat, (float)(1U << MulExponent)); + // In case of positive overflow, detect it + uint32x4_t vOverflow = vcgtq_f32(vResult, g_XMMaxInt); + // Float to int conversion + int32x4_t vResulti = vcvtq_s32_f32(vResult); + // If there was positive overflow, set to 0x7FFFFFFF + vResult = vandq_u32(vOverflow, g_XMAbsMask); + vOverflow = vbicq_u32(vResulti, vOverflow); + vOverflow = vorrq_u32(vOverflow, vResult); + return vOverflow; +#else // _XM_SSE_INTRINSICS_ + XMVECTOR vResult = _mm_set_ps1(static_cast(1U << MulExponent)); + vResult = _mm_mul_ps(vResult, VFloat); + // In case of positive overflow, detect it + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxInt); + // Float to int conversion + __m128i vResulti = _mm_cvttps_epi32(vResult); + // If there was positive overflow, set to 0x7FFFFFFF + vResult = _mm_and_ps(vOverflow, g_XMAbsMask); + vOverflow = _mm_andnot_ps(vOverflow, _mm_castsi128_ps(vResulti)); + vOverflow = _mm_or_ps(vOverflow, vResult); + return vOverflow; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMConvertVectorUIntToFloat +( + FXMVECTOR VUInt, + uint32_t DivExponent +) noexcept +{ + assert(DivExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + float fScale = 1.0f / static_cast(1U << DivExponent); + uint32_t ElementIndex = 0; + XMVECTOR Result; + do { + Result.vector4_f32[ElementIndex] = static_cast(VUInt.vector4_u32[ElementIndex])* fScale; + } while (++ElementIndex < 4); + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fScale = 1.0f / (float)(1U << DivExponent); + float32x4_t vResult = vcvtq_f32_u32(VUInt); + return vmulq_n_f32(vResult, fScale); +#else // _XM_SSE_INTRINSICS_ + // For the values that are higher than 0x7FFFFFFF, a fixup is needed + // Determine which ones need the fix. + XMVECTOR vMask = _mm_and_ps(VUInt, g_XMNegativeZero); + // Force all values positive + XMVECTOR vResult = _mm_xor_ps(VUInt, vMask); + // Convert to floats + vResult = _mm_cvtepi32_ps(_mm_castps_si128(vResult)); + // Convert 0x80000000 -> 0xFFFFFFFF + __m128i iMask = _mm_srai_epi32(_mm_castps_si128(vMask), 31); + // For only the ones that are too big, add the fixup + vMask = _mm_and_ps(_mm_castsi128_ps(iMask), g_XMFixUnsigned); + vResult = _mm_add_ps(vResult, vMask); + // Convert DivExponent into 1.0f/(1<(uScale)); + vResult = _mm_mul_ps(vResult, _mm_castsi128_ps(iMask)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMConvertVectorFloatToUInt +( + FXMVECTOR VFloat, + uint32_t MulExponent +) noexcept +{ + assert(MulExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + // Get the scalar factor. + auto fScale = static_cast(1U << MulExponent); + uint32_t ElementIndex = 0; + XMVECTOR Result; + do { + uint32_t uResult; + float fTemp = VFloat.vector4_f32[ElementIndex] * fScale; + if (fTemp <= 0.0f) + { + uResult = 0; + } + else if (fTemp >= (65536.0f * 65536.0f)) + { + uResult = 0xFFFFFFFFU; + } + else { + uResult = static_cast(fTemp); + } + Result.vector4_u32[ElementIndex] = uResult; + } while (++ElementIndex < 4); + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vResult = vmulq_n_f32(VFloat, (float)(1U << MulExponent)); + // In case of overflow, detect it + uint32x4_t vOverflow = vcgtq_f32(vResult, g_XMMaxUInt); + // Float to int conversion + uint32x4_t vResulti = vcvtq_u32_f32(vResult); + // If there was overflow, set to 0xFFFFFFFFU + vResult = vbicq_u32(vResulti, vOverflow); + vOverflow = vorrq_u32(vOverflow, vResult); + return vOverflow; +#else // _XM_SSE_INTRINSICS_ + XMVECTOR vResult = _mm_set_ps1(static_cast(1U << MulExponent)); + vResult = _mm_mul_ps(vResult, VFloat); + // Clamp to >=0 + vResult = _mm_max_ps(vResult, g_XMZero); + // Any numbers that are too big, set to 0xFFFFFFFFU + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxUInt); + XMVECTOR vValue = g_XMUnsignedFix; + // Too large for a signed integer? + XMVECTOR vMask = _mm_cmpge_ps(vResult, vValue); + // Zero for number's lower than 0x80000000, 32768.0f*65536.0f otherwise + vValue = _mm_and_ps(vValue, vMask); + // Perform fixup only on numbers too large (Keeps low bit precision) + vResult = _mm_sub_ps(vResult, vValue); + __m128i vResulti = _mm_cvttps_epi32(vResult); + // Convert from signed to unsigned pnly if greater than 0x80000000 + vMask = _mm_and_ps(vMask, g_XMNegativeZero); + vResult = _mm_xor_ps(_mm_castsi128_ps(vResulti), vMask); + // On those that are too large, set to 0xFFFFFFFF + vResult = _mm_or_ps(vResult, vOverflow); + return vResult; +#endif +} + +#pragma warning(pop) + +/**************************************************************************** + * + * Vector and matrix load operations + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt(const uint32_t* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = *pSource; + V.vector4_u32[1] = 0; + V.vector4_u32[2] = 0; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t zero = vdupq_n_u32(0); + return vld1q_lane_u32(pSource, zero, 0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ss(reinterpret_cast(pSource)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat(const float* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = *pSource; + V.vector4_f32[1] = 0.f; + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t zero = vdupq_n_f32(0); + return vld1q_lane_f32(pSource, zero, 0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ss(pSource); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt2(const uint32_t* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = 0; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t x = vld1_u32(pSource); + uint32x2_t zero = vdup_n_u32(0); + return vcombine_u32(x, zero); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt2A(const uint32_t* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = 0; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + uint32x2_t x = vld1_u32_ex(pSource, 64); +#else + uint32x2_t x = vld1_u32(pSource); +#endif + uint32x2_t zero = vdup_n_u32(0); + return vcombine_u32(x, zero); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat2(const XMFLOAT2* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t x = vld1_f32(reinterpret_cast(pSource)); + float32x2_t zero = vdup_n_f32(0); + return vcombine_f32(x, zero); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat2A(const XMFLOAT2A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + float32x2_t x = vld1_f32_ex(reinterpret_cast(pSource), 64); +#else + float32x2_t x = vld1_f32(reinterpret_cast(pSource)); +#endif + float32x2_t zero = vdup_n_f32(0); + return vcombine_f32(x, zero); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadSInt2(const XMINT2* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x2_t x = vld1_s32(reinterpret_cast(pSource)); + float32x2_t v = vcvt_f32_s32(x); + float32x2_t zero = vdup_n_f32(0); + return vcombine_f32(v, zero); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 V = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + return _mm_cvtepi32_ps(_mm_castps_si128(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadUInt2(const XMUINT2* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t x = vld1_u32(reinterpret_cast(pSource)); + float32x2_t v = vcvt_f32_u32(x); + float32x2_t zero = vdup_n_f32(0); + return vcombine_f32(v, zero); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 V = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + // For the values that are higher than 0x7FFFFFFF, a fixup is needed + // Determine which ones need the fix. + XMVECTOR vMask = _mm_and_ps(V, g_XMNegativeZero); + // Force all values positive + XMVECTOR vResult = _mm_xor_ps(V, vMask); + // Convert to floats + vResult = _mm_cvtepi32_ps(_mm_castps_si128(vResult)); + // Convert 0x80000000 -> 0xFFFFFFFF + __m128i iMask = _mm_srai_epi32(_mm_castps_si128(vMask), 31); + // For only the ones that are too big, add the fixup + vMask = _mm_and_ps(_mm_castsi128_ps(iMask), g_XMFixUnsigned); + vResult = _mm_add_ps(vResult, vMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt3(const uint32_t* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = pSource[2]; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t x = vld1_u32(pSource); + uint32x2_t zero = vdup_n_u32(0); + uint32x2_t y = vld1_lane_u32(pSource + 2, zero, 0); + return vcombine_u32(x, y); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(pSource + 2)); + return _mm_insert_ps(xy, z, 0x20); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(pSource + 2)); + return _mm_movelh_ps(xy, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt3A(const uint32_t* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = pSource[2]; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Reads an extra integer which is zero'd +#ifdef _MSC_VER + uint32x4_t V = vld1q_u32_ex(pSource, 128); +#else + uint32x4_t V = vld1q_u32(pSource); +#endif + return vsetq_lane_u32(0, V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(pSource + 2)); + return _mm_insert_ps(xy, z, 0x20); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(pSource + 2)); + return _mm_movelh_ps(xy, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat3(const XMFLOAT3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = pSource->z; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t x = vld1_f32(reinterpret_cast(pSource)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t y = vld1_lane_f32(reinterpret_cast(pSource) + 2, zero, 0); + return vcombine_f32(x, y); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(&pSource->z); + return _mm_insert_ps(xy, z, 0x20); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(&pSource->z); + return _mm_movelh_ps(xy, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat3A(const XMFLOAT3A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = pSource->z; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Reads an extra float which is zero'd +#ifdef _MSC_VER + float32x4_t V = vld1q_f32_ex(reinterpret_cast(pSource), 128); +#else + float32x4_t V = vld1q_f32(reinterpret_cast(pSource)); +#endif + return vsetq_lane_f32(0, V, 3); +#elif defined(_XM_SSE_INTRINSICS_) + // Reads an extra float which is zero'd + __m128 V = _mm_load_ps(&pSource->x); + return _mm_and_ps(V, g_XMMask3); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadSInt3(const XMINT3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = static_cast(pSource->z); + V.vector4_f32[3] = 0.f; + return V; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x2_t x = vld1_s32(reinterpret_cast(pSource)); + int32x2_t zero = vdup_n_s32(0); + int32x2_t y = vld1_lane_s32(reinterpret_cast(pSource) + 2, zero, 0); + int32x4_t v = vcombine_s32(x, y); + return vcvtq_f32_s32(v); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(&pSource->z)); + __m128 V = _mm_movelh_ps(xy, z); + return _mm_cvtepi32_ps(_mm_castps_si128(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadUInt3(const XMUINT3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = static_cast(pSource->z); + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t x = vld1_u32(reinterpret_cast(pSource)); + uint32x2_t zero = vdup_n_u32(0); + uint32x2_t y = vld1_lane_u32(reinterpret_cast(pSource) + 2, zero, 0); + uint32x4_t v = vcombine_u32(x, y); + return vcvtq_f32_u32(v); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(&pSource->z)); + __m128 V = _mm_movelh_ps(xy, z); + // For the values that are higher than 0x7FFFFFFF, a fixup is needed + // Determine which ones need the fix. + XMVECTOR vMask = _mm_and_ps(V, g_XMNegativeZero); + // Force all values positive + XMVECTOR vResult = _mm_xor_ps(V, vMask); + // Convert to floats + vResult = _mm_cvtepi32_ps(_mm_castps_si128(vResult)); + // Convert 0x80000000 -> 0xFFFFFFFF + __m128i iMask = _mm_srai_epi32(_mm_castps_si128(vMask), 31); + // For only the ones that are too big, add the fixup + vMask = _mm_and_ps(_mm_castsi128_ps(iMask), g_XMFixUnsigned); + vResult = _mm_add_ps(vResult, vMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt4(const uint32_t* pSource) noexcept +{ + assert(pSource); + +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = pSource[2]; + V.vector4_u32[3] = pSource[3]; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_u32(pSource); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_loadu_si128(reinterpret_cast(pSource)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt4A(const uint32_t* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = pSource[2]; + V.vector4_u32[3] = pSource[3]; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + return vld1q_u32_ex(pSource, 128); +#else + return vld1q_u32(pSource); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_load_si128(reinterpret_cast(pSource)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat4(const XMFLOAT4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = pSource->z; + V.vector4_f32[3] = pSource->w; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_f32(reinterpret_cast(pSource)); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_loadu_ps(&pSource->x); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat4A(const XMFLOAT4A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = pSource->z; + V.vector4_f32[3] = pSource->w; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + return vld1q_f32_ex(reinterpret_cast(pSource), 128); +#else + return vld1q_f32(reinterpret_cast(pSource)); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ps(&pSource->x); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadSInt4(const XMINT4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = static_cast(pSource->z); + V.vector4_f32[3] = static_cast(pSource->w); + return V; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t v = vld1q_s32(reinterpret_cast(pSource)); + return vcvtq_f32_s32(v); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_loadu_si128(reinterpret_cast(pSource)); + return _mm_cvtepi32_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadUInt4(const XMUINT4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = static_cast(pSource->z); + V.vector4_f32[3] = static_cast(pSource->w); + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t v = vld1q_u32(reinterpret_cast(pSource)); + return vcvtq_f32_u32(v); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_loadu_si128(reinterpret_cast(pSource)); + // For the values that are higher than 0x7FFFFFFF, a fixup is needed + // Determine which ones need the fix. + XMVECTOR vMask = _mm_and_ps(_mm_castsi128_ps(V), g_XMNegativeZero); + // Force all values positive + XMVECTOR vResult = _mm_xor_ps(_mm_castsi128_ps(V), vMask); + // Convert to floats + vResult = _mm_cvtepi32_ps(_mm_castps_si128(vResult)); + // Convert 0x80000000 -> 0xFFFFFFFF + __m128i iMask = _mm_srai_epi32(_mm_castps_si128(vMask), 31); + // For only the ones that are too big, add the fixup + vMask = _mm_and_ps(_mm_castsi128_ps(iMask), g_XMFixUnsigned); + vResult = _mm_add_ps(vResult, vMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat3x3(const XMFLOAT3X3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + M.r[3].vector4_f32[0] = 0.0f; + M.r[3].vector4_f32[1] = 0.0f; + M.r[3].vector4_f32[2] = 0.0f; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t v0 = vld1q_f32(&pSource->m[0][0]); + float32x4_t v1 = vld1q_f32(&pSource->m[1][1]); + float32x2_t v2 = vcreate_f32(static_cast(*reinterpret_cast(&pSource->m[2][2]))); + float32x4_t T = vextq_f32(v0, v1, 3); + + XMMATRIX M; + M.r[0] = vandq_u32(v0, g_XMMask3); + M.r[1] = vandq_u32(T, g_XMMask3); + M.r[2] = vcombine_f32(vget_high_f32(v1), v2); + M.r[3] = g_XMIdentityR3; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 Z = _mm_setzero_ps(); + + __m128 V1 = _mm_loadu_ps(&pSource->m[0][0]); + __m128 V2 = _mm_loadu_ps(&pSource->m[1][1]); + __m128 V3 = _mm_load_ss(&pSource->m[2][2]); + + __m128 T1 = _mm_unpackhi_ps(V1, Z); + __m128 T2 = _mm_unpacklo_ps(V2, Z); + __m128 T3 = _mm_shuffle_ps(V3, T2, _MM_SHUFFLE(0, 1, 0, 0)); + __m128 T4 = _mm_movehl_ps(T2, T3); + __m128 T5 = _mm_movehl_ps(Z, T1); + + XMMATRIX M; + M.r[0] = _mm_movelh_ps(V1, T1); + M.r[1] = _mm_add_ps(T4, T5); + M.r[2] = _mm_shuffle_ps(V2, V3, _MM_SHUFFLE(1, 0, 3, 2)); + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat4x3(const XMFLOAT4X3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + + M.r[3].vector4_f32[0] = pSource->m[3][0]; + M.r[3].vector4_f32[1] = pSource->m[3][1]; + M.r[3].vector4_f32[2] = pSource->m[3][2]; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t v0 = vld1q_f32(&pSource->m[0][0]); + float32x4_t v1 = vld1q_f32(&pSource->m[1][1]); + float32x4_t v2 = vld1q_f32(&pSource->m[2][2]); + + float32x4_t T1 = vextq_f32(v0, v1, 3); + float32x4_t T2 = vcombine_f32(vget_high_f32(v1), vget_low_f32(v2)); + float32x4_t T3 = vextq_f32(v2, v2, 1); + + XMMATRIX M; + M.r[0] = vandq_u32(v0, g_XMMask3); + M.r[1] = vandq_u32(T1, g_XMMask3); + M.r[2] = vandq_u32(T2, g_XMMask3); + M.r[3] = vsetq_lane_f32(1.f, T3, 3); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + // Use unaligned load instructions to + // load the 12 floats + // vTemp1 = x1,y1,z1,x2 + XMVECTOR vTemp1 = _mm_loadu_ps(&pSource->m[0][0]); + // vTemp2 = y2,z2,x3,y3 + XMVECTOR vTemp2 = _mm_loadu_ps(&pSource->m[1][1]); + // vTemp4 = z3,x4,y4,z4 + XMVECTOR vTemp4 = _mm_loadu_ps(&pSource->m[2][2]); + // vTemp3 = x3,y3,z3,z3 + XMVECTOR vTemp3 = _mm_shuffle_ps(vTemp2, vTemp4, _MM_SHUFFLE(0, 0, 3, 2)); + // vTemp2 = y2,z2,x2,x2 + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp1, _MM_SHUFFLE(3, 3, 1, 0)); + // vTemp2 = x2,y2,z2,z2 + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(1, 1, 0, 2)); + // vTemp1 = x1,y1,z1,0 + vTemp1 = _mm_and_ps(vTemp1, g_XMMask3); + // vTemp2 = x2,y2,z2,0 + vTemp2 = _mm_and_ps(vTemp2, g_XMMask3); + // vTemp3 = x3,y3,z3,0 + vTemp3 = _mm_and_ps(vTemp3, g_XMMask3); + // vTemp4i = x4,y4,z4,0 + __m128i vTemp4i = _mm_srli_si128(_mm_castps_si128(vTemp4), 32 / 8); + // vTemp4i = x4,y4,z4,1.0f + vTemp4i = _mm_or_si128(vTemp4i, g_XMIdentityR3); + XMMATRIX M(vTemp1, + vTemp2, + vTemp3, + _mm_castsi128_ps(vTemp4i)); + return M; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat4x3A(const XMFLOAT4X3A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + + M.r[3].vector4_f32[0] = pSource->m[3][0]; + M.r[3].vector4_f32[1] = pSource->m[3][1]; + M.r[3].vector4_f32[2] = pSource->m[3][2]; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + float32x4_t v0 = vld1q_f32_ex(&pSource->m[0][0], 128); + float32x4_t v1 = vld1q_f32_ex(&pSource->m[1][1], 128); + float32x4_t v2 = vld1q_f32_ex(&pSource->m[2][2], 128); +#else + float32x4_t v0 = vld1q_f32(&pSource->m[0][0]); + float32x4_t v1 = vld1q_f32(&pSource->m[1][1]); + float32x4_t v2 = vld1q_f32(&pSource->m[2][2]); +#endif + + float32x4_t T1 = vextq_f32(v0, v1, 3); + float32x4_t T2 = vcombine_f32(vget_high_f32(v1), vget_low_f32(v2)); + float32x4_t T3 = vextq_f32(v2, v2, 1); + + XMMATRIX M; + M.r[0] = vandq_u32(v0, g_XMMask3); + M.r[1] = vandq_u32(T1, g_XMMask3); + M.r[2] = vandq_u32(T2, g_XMMask3); + M.r[3] = vsetq_lane_f32(1.f, T3, 3); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + // Use aligned load instructions to + // load the 12 floats + // vTemp1 = x1,y1,z1,x2 + XMVECTOR vTemp1 = _mm_load_ps(&pSource->m[0][0]); + // vTemp2 = y2,z2,x3,y3 + XMVECTOR vTemp2 = _mm_load_ps(&pSource->m[1][1]); + // vTemp4 = z3,x4,y4,z4 + XMVECTOR vTemp4 = _mm_load_ps(&pSource->m[2][2]); + // vTemp3 = x3,y3,z3,z3 + XMVECTOR vTemp3 = _mm_shuffle_ps(vTemp2, vTemp4, _MM_SHUFFLE(0, 0, 3, 2)); + // vTemp2 = y2,z2,x2,x2 + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp1, _MM_SHUFFLE(3, 3, 1, 0)); + // vTemp2 = x2,y2,z2,z2 + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(1, 1, 0, 2)); + // vTemp1 = x1,y1,z1,0 + vTemp1 = _mm_and_ps(vTemp1, g_XMMask3); + // vTemp2 = x2,y2,z2,0 + vTemp2 = _mm_and_ps(vTemp2, g_XMMask3); + // vTemp3 = x3,y3,z3,0 + vTemp3 = _mm_and_ps(vTemp3, g_XMMask3); + // vTemp4i = x4,y4,z4,0 + __m128i vTemp4i = _mm_srli_si128(_mm_castps_si128(vTemp4), 32 / 8); + // vTemp4i = x4,y4,z4,1.0f + vTemp4i = _mm_or_si128(vTemp4i, g_XMIdentityR3); + XMMATRIX M(vTemp1, + vTemp2, + vTemp3, + _mm_castsi128_ps(vTemp4i)); + return M; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat3x4(const XMFLOAT3X4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[1][0]; + M.r[0].vector4_f32[2] = pSource->m[2][0]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[0][1]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[2][1]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[0][2]; + M.r[2].vector4_f32[1] = pSource->m[1][2]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + + M.r[3].vector4_f32[0] = pSource->m[0][3]; + M.r[3].vector4_f32[1] = pSource->m[1][3]; + M.r[3].vector4_f32[2] = pSource->m[2][3]; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2x4_t vTemp0 = vld4_f32(&pSource->_11); + float32x4_t vTemp1 = vld1q_f32(&pSource->_31); + + float32x2_t l = vget_low_f32(vTemp1); + float32x4_t T0 = vcombine_f32(vTemp0.val[0], l); + float32x2_t rl = vrev64_f32(l); + float32x4_t T1 = vcombine_f32(vTemp0.val[1], rl); + + float32x2_t h = vget_high_f32(vTemp1); + float32x4_t T2 = vcombine_f32(vTemp0.val[2], h); + float32x2_t rh = vrev64_f32(h); + float32x4_t T3 = vcombine_f32(vTemp0.val[3], rh); + + XMMATRIX M = {}; + M.r[0] = vandq_u32(T0, g_XMMask3); + M.r[1] = vandq_u32(T1, g_XMMask3); + M.r[2] = vandq_u32(T2, g_XMMask3); + M.r[3] = vsetq_lane_f32(1.f, T3, 3); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_loadu_ps(&pSource->_11); + M.r[1] = _mm_loadu_ps(&pSource->_21); + M.r[2] = _mm_loadu_ps(&pSource->_31); + M.r[3] = g_XMIdentityR3; + + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + XMMATRIX mResult; + + // x.x,y.x,z.x,w.x + mResult.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + mResult.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + mResult.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + // x.w,y.w,z.w,w.w + mResult.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat3x4A(const XMFLOAT3X4A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[1][0]; + M.r[0].vector4_f32[2] = pSource->m[2][0]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[0][1]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[2][1]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[0][2]; + M.r[2].vector4_f32[1] = pSource->m[1][2]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + + M.r[3].vector4_f32[0] = pSource->m[0][3]; + M.r[3].vector4_f32[1] = pSource->m[1][3]; + M.r[3].vector4_f32[2] = pSource->m[2][3]; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + float32x2x4_t vTemp0 = vld4_f32_ex(&pSource->_11, 128); + float32x4_t vTemp1 = vld1q_f32_ex(&pSource->_31, 128); +#else + float32x2x4_t vTemp0 = vld4_f32(&pSource->_11); + float32x4_t vTemp1 = vld1q_f32(&pSource->_31); +#endif + + float32x2_t l = vget_low_f32(vTemp1); + float32x4_t T0 = vcombine_f32(vTemp0.val[0], l); + float32x2_t rl = vrev64_f32(l); + float32x4_t T1 = vcombine_f32(vTemp0.val[1], rl); + + float32x2_t h = vget_high_f32(vTemp1); + float32x4_t T2 = vcombine_f32(vTemp0.val[2], h); + float32x2_t rh = vrev64_f32(h); + float32x4_t T3 = vcombine_f32(vTemp0.val[3], rh); + + XMMATRIX M = {}; + M.r[0] = vandq_u32(T0, g_XMMask3); + M.r[1] = vandq_u32(T1, g_XMMask3); + M.r[2] = vandq_u32(T2, g_XMMask3); + M.r[3] = vsetq_lane_f32(1.f, T3, 3); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_load_ps(&pSource->_11); + M.r[1] = _mm_load_ps(&pSource->_21); + M.r[2] = _mm_load_ps(&pSource->_31); + M.r[3] = g_XMIdentityR3; + + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + XMMATRIX mResult; + + // x.x,y.x,z.x,w.x + mResult.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + mResult.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + mResult.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + // x.w,y.w,z.w,w.w + mResult.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat4x4(const XMFLOAT4X4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = pSource->m[0][3]; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = pSource->m[1][3]; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = pSource->m[2][3]; + + M.r[3].vector4_f32[0] = pSource->m[3][0]; + M.r[3].vector4_f32[1] = pSource->m[3][1]; + M.r[3].vector4_f32[2] = pSource->m[3][2]; + M.r[3].vector4_f32[3] = pSource->m[3][3]; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; + M.r[0] = vld1q_f32(reinterpret_cast(&pSource->_11)); + M.r[1] = vld1q_f32(reinterpret_cast(&pSource->_21)); + M.r[2] = vld1q_f32(reinterpret_cast(&pSource->_31)); + M.r[3] = vld1q_f32(reinterpret_cast(&pSource->_41)); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_loadu_ps(&pSource->_11); + M.r[1] = _mm_loadu_ps(&pSource->_21); + M.r[2] = _mm_loadu_ps(&pSource->_31); + M.r[3] = _mm_loadu_ps(&pSource->_41); + return M; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat4x4A(const XMFLOAT4X4A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = pSource->m[0][3]; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = pSource->m[1][3]; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = pSource->m[2][3]; + + M.r[3].vector4_f32[0] = pSource->m[3][0]; + M.r[3].vector4_f32[1] = pSource->m[3][1]; + M.r[3].vector4_f32[2] = pSource->m[3][2]; + M.r[3].vector4_f32[3] = pSource->m[3][3]; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; +#ifdef _MSC_VER + M.r[0] = vld1q_f32_ex(reinterpret_cast(&pSource->_11), 128); + M.r[1] = vld1q_f32_ex(reinterpret_cast(&pSource->_21), 128); + M.r[2] = vld1q_f32_ex(reinterpret_cast(&pSource->_31), 128); + M.r[3] = vld1q_f32_ex(reinterpret_cast(&pSource->_41), 128); +#else + M.r[0] = vld1q_f32(reinterpret_cast(&pSource->_11)); + M.r[1] = vld1q_f32(reinterpret_cast(&pSource->_21)); + M.r[2] = vld1q_f32(reinterpret_cast(&pSource->_31)); + M.r[3] = vld1q_f32(reinterpret_cast(&pSource->_41)); +#endif + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_load_ps(&pSource->_11); + M.r[1] = _mm_load_ps(&pSource->_21); + M.r[2] = _mm_load_ps(&pSource->_31); + M.r[3] = _mm_load_ps(&pSource->_41); + return M; +#endif +} + +/**************************************************************************** + * + * Vector and matrix store operations + * + ****************************************************************************/ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + *pDestination = XMVectorGetIntX(V); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(pDestination, *reinterpret_cast(&V), 0); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ss(reinterpret_cast(pDestination), V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat +( + float* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + *pDestination = XMVectorGetX(V); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(pDestination, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ss(pDestination, V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt2 +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t VL = vget_low_u32(V); + vst1_u32(pDestination, VL); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt2A +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t VL = vget_low_u32(V); +#ifdef _MSC_VER + vst1_u32_ex(pDestination, VL, 64); +#else + vst1_u32(pDestination, VL); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat2 +( + XMFLOAT2* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + vst1_f32(reinterpret_cast(pDestination), VL); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat2A +( + XMFLOAT2A* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); +#ifdef _MSC_VER + vst1_f32_ex(reinterpret_cast(pDestination), VL, 64); +#else + vst1_f32(reinterpret_cast(pDestination), VL); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreSInt2 +( + XMINT2* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x2_t v = vget_low_s32(V); + v = vcvt_s32_f32(v); + vst1_s32(reinterpret_cast(pDestination), v); +#elif defined(_XM_SSE_INTRINSICS_) + // In case of positive overflow, detect it + XMVECTOR vOverflow = _mm_cmpgt_ps(V, g_XMMaxInt); + // Float to int conversion + __m128i vResulti = _mm_cvttps_epi32(V); + // If there was positive overflow, set to 0x7FFFFFFF + XMVECTOR vResult = _mm_and_ps(vOverflow, g_XMAbsMask); + vOverflow = _mm_andnot_ps(vOverflow, _mm_castsi128_ps(vResulti)); + vOverflow = _mm_or_ps(vOverflow, vResult); + // Write two ints + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(vOverflow)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreUInt2 +( + XMUINT2* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t v = vget_low_f32(V); + uint32x2_t iv = vcvt_u32_f32(v); + vst1_u32(reinterpret_cast(pDestination), iv); +#elif defined(_XM_SSE_INTRINSICS_) + // Clamp to >=0 + XMVECTOR vResult = _mm_max_ps(V, g_XMZero); + // Any numbers that are too big, set to 0xFFFFFFFFU + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxUInt); + XMVECTOR vValue = g_XMUnsignedFix; + // Too large for a signed integer? + XMVECTOR vMask = _mm_cmpge_ps(vResult, vValue); + // Zero for number's lower than 0x80000000, 32768.0f*65536.0f otherwise + vValue = _mm_and_ps(vValue, vMask); + // Perform fixup only on numbers too large (Keeps low bit precision) + vResult = _mm_sub_ps(vResult, vValue); + __m128i vResulti = _mm_cvttps_epi32(vResult); + // Convert from signed to unsigned pnly if greater than 0x80000000 + vMask = _mm_and_ps(vMask, g_XMNegativeZero); + vResult = _mm_xor_ps(_mm_castsi128_ps(vResulti), vMask); + // On those that are too large, set to 0xFFFFFFFF + vResult = _mm_or_ps(vResult, vOverflow); + // Write two uints + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(vResult)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt3 +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; + pDestination[2] = V.vector4_u32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t VL = vget_low_u32(V); + vst1_u32(pDestination, VL); + vst1q_lane_u32(pDestination + 2, *reinterpret_cast(&V), 2); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + __m128 z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(&pDestination[2]), z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt3A +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; + pDestination[2] = V.vector4_u32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t VL = vget_low_u32(V); +#ifdef _MSC_VER + vst1_u32_ex(pDestination, VL, 64); +#else + vst1_u32(pDestination, VL); +#endif + vst1q_lane_u32(pDestination + 2, *reinterpret_cast(&V), 2); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + __m128 z = _mm_movehl_ps(V, V); + _mm_store_ss(reinterpret_cast(&pDestination[2]), z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3 +( + XMFLOAT3* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; + pDestination->z = V.vector4_f32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + vst1_f32(reinterpret_cast(pDestination), VL); + vst1q_lane_f32(reinterpret_cast(pDestination) + 2, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + * reinterpret_cast(&pDestination->x) = _mm_extract_ps(V, 0); + *reinterpret_cast(&pDestination->y) = _mm_extract_ps(V, 1); + *reinterpret_cast(&pDestination->z) = _mm_extract_ps(V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + __m128 z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(&pDestination->z, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3A +( + XMFLOAT3A* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; + pDestination->z = V.vector4_f32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); +#ifdef _MSC_VER + vst1_f32_ex(reinterpret_cast(pDestination), VL, 64); +#else + vst1_f32(reinterpret_cast(pDestination), VL); +#endif + vst1q_lane_f32(reinterpret_cast(pDestination) + 2, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + *reinterpret_cast(&pDestination->z) = _mm_extract_ps(V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + __m128 z = _mm_movehl_ps(V, V); + _mm_store_ss(&pDestination->z, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreSInt3 +( + XMINT3* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); + pDestination->z = static_cast(V.vector4_f32[2]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t v = vcvtq_s32_f32(V); + int32x2_t vL = vget_low_s32(v); + vst1_s32(reinterpret_cast(pDestination), vL); + vst1q_lane_s32(reinterpret_cast(pDestination) + 2, v, 2); +#elif defined(_XM_SSE_INTRINSICS_) + // In case of positive overflow, detect it + XMVECTOR vOverflow = _mm_cmpgt_ps(V, g_XMMaxInt); + // Float to int conversion + __m128i vResulti = _mm_cvttps_epi32(V); + // If there was positive overflow, set to 0x7FFFFFFF + XMVECTOR vResult = _mm_and_ps(vOverflow, g_XMAbsMask); + vOverflow = _mm_andnot_ps(vOverflow, _mm_castsi128_ps(vResulti)); + vOverflow = _mm_or_ps(vOverflow, vResult); + // Write 3 uints + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(vOverflow)); + __m128 z = XM_PERMUTE_PS(vOverflow, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(&pDestination->z), z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreUInt3 +( + XMUINT3* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); + pDestination->z = static_cast(V.vector4_f32[2]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t v = vcvtq_u32_f32(V); + uint32x2_t vL = vget_low_u32(v); + vst1_u32(reinterpret_cast(pDestination), vL); + vst1q_lane_u32(reinterpret_cast(pDestination) + 2, v, 2); +#elif defined(_XM_SSE_INTRINSICS_) + // Clamp to >=0 + XMVECTOR vResult = _mm_max_ps(V, g_XMZero); + // Any numbers that are too big, set to 0xFFFFFFFFU + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxUInt); + XMVECTOR vValue = g_XMUnsignedFix; + // Too large for a signed integer? + XMVECTOR vMask = _mm_cmpge_ps(vResult, vValue); + // Zero for number's lower than 0x80000000, 32768.0f*65536.0f otherwise + vValue = _mm_and_ps(vValue, vMask); + // Perform fixup only on numbers too large (Keeps low bit precision) + vResult = _mm_sub_ps(vResult, vValue); + __m128i vResulti = _mm_cvttps_epi32(vResult); + // Convert from signed to unsigned pnly if greater than 0x80000000 + vMask = _mm_and_ps(vMask, g_XMNegativeZero); + vResult = _mm_xor_ps(_mm_castsi128_ps(vResulti), vMask); + // On those that are too large, set to 0xFFFFFFFF + vResult = _mm_or_ps(vResult, vOverflow); + // Write 3 uints + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(vResult)); + __m128 z = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(&pDestination->z), z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt4 +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; + pDestination[2] = V.vector4_u32[2]; + pDestination[3] = V.vector4_u32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_u32(pDestination, V); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_storeu_si128(reinterpret_cast<__m128i*>(pDestination), _mm_castps_si128(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt4A +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; + pDestination[2] = V.vector4_u32[2]; + pDestination[3] = V.vector4_u32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + vst1q_u32_ex(pDestination, V, 128); +#else + vst1q_u32(pDestination, V); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_si128(reinterpret_cast<__m128i*>(pDestination), _mm_castps_si128(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4 +( + XMFLOAT4* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; + pDestination->z = V.vector4_f32[2]; + pDestination->w = V.vector4_f32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_f32(reinterpret_cast(pDestination), V); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_storeu_ps(&pDestination->x, V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4A +( + XMFLOAT4A* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; + pDestination->z = V.vector4_f32[2]; + pDestination->w = V.vector4_f32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + vst1q_f32_ex(reinterpret_cast(pDestination), V, 128); +#else + vst1q_f32(reinterpret_cast(pDestination), V); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ps(&pDestination->x, V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreSInt4 +( + XMINT4* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); + pDestination->z = static_cast(V.vector4_f32[2]); + pDestination->w = static_cast(V.vector4_f32[3]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t v = vcvtq_s32_f32(V); + vst1q_s32(reinterpret_cast(pDestination), v); +#elif defined(_XM_SSE_INTRINSICS_) + // In case of positive overflow, detect it + XMVECTOR vOverflow = _mm_cmpgt_ps(V, g_XMMaxInt); + // Float to int conversion + __m128i vResulti = _mm_cvttps_epi32(V); + // If there was positive overflow, set to 0x7FFFFFFF + XMVECTOR vResult = _mm_and_ps(vOverflow, g_XMAbsMask); + vOverflow = _mm_andnot_ps(vOverflow, _mm_castsi128_ps(vResulti)); + vOverflow = _mm_or_ps(vOverflow, vResult); + _mm_storeu_si128(reinterpret_cast<__m128i*>(pDestination), _mm_castps_si128(vOverflow)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreUInt4 +( + XMUINT4* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); + pDestination->z = static_cast(V.vector4_f32[2]); + pDestination->w = static_cast(V.vector4_f32[3]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t v = vcvtq_u32_f32(V); + vst1q_u32(reinterpret_cast(pDestination), v); +#elif defined(_XM_SSE_INTRINSICS_) + // Clamp to >=0 + XMVECTOR vResult = _mm_max_ps(V, g_XMZero); + // Any numbers that are too big, set to 0xFFFFFFFFU + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxUInt); + XMVECTOR vValue = g_XMUnsignedFix; + // Too large for a signed integer? + XMVECTOR vMask = _mm_cmpge_ps(vResult, vValue); + // Zero for number's lower than 0x80000000, 32768.0f*65536.0f otherwise + vValue = _mm_and_ps(vValue, vMask); + // Perform fixup only on numbers too large (Keeps low bit precision) + vResult = _mm_sub_ps(vResult, vValue); + __m128i vResulti = _mm_cvttps_epi32(vResult); + // Convert from signed to unsigned pnly if greater than 0x80000000 + vMask = _mm_and_ps(vMask, g_XMNegativeZero); + vResult = _mm_xor_ps(_mm_castsi128_ps(vResulti), vMask); + // On those that are too large, set to 0xFFFFFFFF + vResult = _mm_or_ps(vResult, vOverflow); + _mm_storeu_si128(reinterpret_cast<__m128i*>(pDestination), _mm_castps_si128(vResult)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3x3 +( + XMFLOAT3X3* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t T1 = vextq_f32(M.r[0], M.r[1], 1); + float32x4_t T2 = vbslq_f32(g_XMMask3, M.r[0], T1); + vst1q_f32(&pDestination->m[0][0], T2); + + T1 = vextq_f32(M.r[1], M.r[1], 1); + T2 = vcombine_f32(vget_low_f32(T1), vget_low_f32(M.r[2])); + vst1q_f32(&pDestination->m[1][1], T2); + + vst1q_lane_f32(&pDestination->m[2][2], M.r[2], 2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp1 = M.r[0]; + XMVECTOR vTemp2 = M.r[1]; + XMVECTOR vTemp3 = M.r[2]; + XMVECTOR vWork = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(0, 0, 2, 2)); + vTemp1 = _mm_shuffle_ps(vTemp1, vWork, _MM_SHUFFLE(2, 0, 1, 0)); + _mm_storeu_ps(&pDestination->m[0][0], vTemp1); + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp3, _MM_SHUFFLE(1, 0, 2, 1)); + _mm_storeu_ps(&pDestination->m[1][1], vTemp2); + vTemp3 = XM_PERMUTE_PS(vTemp3, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(&pDestination->m[2][2], vTemp3); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4x3 +( + XMFLOAT4X3* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + + pDestination->m[3][0] = M.r[3].vector4_f32[0]; + pDestination->m[3][1] = M.r[3].vector4_f32[1]; + pDestination->m[3][2] = M.r[3].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t T1 = vextq_f32(M.r[0], M.r[1], 1); + float32x4_t T2 = vbslq_f32(g_XMMask3, M.r[0], T1); + vst1q_f32(&pDestination->m[0][0], T2); + + T1 = vextq_f32(M.r[1], M.r[1], 1); + T2 = vcombine_f32(vget_low_f32(T1), vget_low_f32(M.r[2])); + vst1q_f32(&pDestination->m[1][1], T2); + + T1 = vdupq_lane_f32(vget_high_f32(M.r[2]), 0); + T2 = vextq_f32(T1, M.r[3], 3); + vst1q_f32(&pDestination->m[2][2], T2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp1 = M.r[0]; + XMVECTOR vTemp2 = M.r[1]; + XMVECTOR vTemp3 = M.r[2]; + XMVECTOR vTemp4 = M.r[3]; + XMVECTOR vTemp2x = _mm_shuffle_ps(vTemp2, vTemp3, _MM_SHUFFLE(1, 0, 2, 1)); + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp1, _MM_SHUFFLE(2, 2, 0, 0)); + vTemp1 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(0, 2, 1, 0)); + vTemp3 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(0, 0, 2, 2)); + vTemp3 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 1, 2, 0)); + _mm_storeu_ps(&pDestination->m[0][0], vTemp1); + _mm_storeu_ps(&pDestination->m[1][1], vTemp2x); + _mm_storeu_ps(&pDestination->m[2][2], vTemp3); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4x3A +( + XMFLOAT4X3A* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + + pDestination->m[3][0] = M.r[3].vector4_f32[0]; + pDestination->m[3][1] = M.r[3].vector4_f32[1]; + pDestination->m[3][2] = M.r[3].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + float32x4_t T1 = vextq_f32(M.r[0], M.r[1], 1); + float32x4_t T2 = vbslq_f32(g_XMMask3, M.r[0], T1); + vst1q_f32_ex(&pDestination->m[0][0], T2, 128); + + T1 = vextq_f32(M.r[1], M.r[1], 1); + T2 = vcombine_f32(vget_low_f32(T1), vget_low_f32(M.r[2])); + vst1q_f32_ex(&pDestination->m[1][1], T2, 128); + + T1 = vdupq_lane_f32(vget_high_f32(M.r[2]), 0); + T2 = vextq_f32(T1, M.r[3], 3); + vst1q_f32_ex(&pDestination->m[2][2], T2, 128); +#else + float32x4_t T1 = vextq_f32(M.r[0], M.r[1], 1); + float32x4_t T2 = vbslq_f32(g_XMMask3, M.r[0], T1); + vst1q_f32(&pDestination->m[0][0], T2); + + T1 = vextq_f32(M.r[1], M.r[1], 1); + T2 = vcombine_f32(vget_low_f32(T1), vget_low_f32(M.r[2])); + vst1q_f32(&pDestination->m[1][1], T2); + + T1 = vdupq_lane_f32(vget_high_f32(M.r[2]), 0); + T2 = vextq_f32(T1, M.r[3], 3); + vst1q_f32(&pDestination->m[2][2], T2); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + // x1,y1,z1,w1 + XMVECTOR vTemp1 = M.r[0]; + // x2,y2,z2,w2 + XMVECTOR vTemp2 = M.r[1]; + // x3,y3,z3,w3 + XMVECTOR vTemp3 = M.r[2]; + // x4,y4,z4,w4 + XMVECTOR vTemp4 = M.r[3]; + // z1,z1,x2,y2 + XMVECTOR vTemp = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(1, 0, 2, 2)); + // y2,z2,x3,y3 (Final) + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp3, _MM_SHUFFLE(1, 0, 2, 1)); + // x1,y1,z1,x2 (Final) + vTemp1 = _mm_shuffle_ps(vTemp1, vTemp, _MM_SHUFFLE(2, 0, 1, 0)); + // z3,z3,x4,x4 + vTemp3 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(0, 0, 2, 2)); + // z3,x4,y4,z4 (Final) + vTemp3 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 1, 2, 0)); + // Store in 3 operations + _mm_store_ps(&pDestination->m[0][0], vTemp1); + _mm_store_ps(&pDestination->m[1][1], vTemp2); + _mm_store_ps(&pDestination->m[2][2], vTemp3); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3x4 +( + XMFLOAT3X4* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[1].vector4_f32[0]; + pDestination->m[0][2] = M.r[2].vector4_f32[0]; + pDestination->m[0][3] = M.r[3].vector4_f32[0]; + + pDestination->m[1][0] = M.r[0].vector4_f32[1]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[2].vector4_f32[1]; + pDestination->m[1][3] = M.r[3].vector4_f32[1]; + + pDestination->m[2][0] = M.r[0].vector4_f32[2]; + pDestination->m[2][1] = M.r[1].vector4_f32[2]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + pDestination->m[2][3] = M.r[3].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4x2_t P0 = vzipq_f32(M.r[0], M.r[2]); + float32x4x2_t P1 = vzipq_f32(M.r[1], M.r[3]); + + float32x4x2_t T0 = vzipq_f32(P0.val[0], P1.val[0]); + float32x4x2_t T1 = vzipq_f32(P0.val[1], P1.val[1]); + + vst1q_f32(&pDestination->m[0][0], T0.val[0]); + vst1q_f32(&pDestination->m[1][0], T0.val[1]); + vst1q_f32(&pDestination->m[2][0], T1.val[0]); +#elif defined(_XM_SSE_INTRINSICS_) + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + + // x.x,y.x,z.x,w.x + XMVECTOR r0 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + XMVECTOR r1 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + XMVECTOR r2 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + + _mm_storeu_ps(&pDestination->m[0][0], r0); + _mm_storeu_ps(&pDestination->m[1][0], r1); + _mm_storeu_ps(&pDestination->m[2][0], r2); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3x4A +( + XMFLOAT3X4A* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[1].vector4_f32[0]; + pDestination->m[0][2] = M.r[2].vector4_f32[0]; + pDestination->m[0][3] = M.r[3].vector4_f32[0]; + + pDestination->m[1][0] = M.r[0].vector4_f32[1]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[2].vector4_f32[1]; + pDestination->m[1][3] = M.r[3].vector4_f32[1]; + + pDestination->m[2][0] = M.r[0].vector4_f32[2]; + pDestination->m[2][1] = M.r[1].vector4_f32[2]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + pDestination->m[2][3] = M.r[3].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4x2_t P0 = vzipq_f32(M.r[0], M.r[2]); + float32x4x2_t P1 = vzipq_f32(M.r[1], M.r[3]); + + float32x4x2_t T0 = vzipq_f32(P0.val[0], P1.val[0]); + float32x4x2_t T1 = vzipq_f32(P0.val[1], P1.val[1]); + +#ifdef _MSC_VER + vst1q_f32_ex(&pDestination->m[0][0], T0.val[0], 128); + vst1q_f32_ex(&pDestination->m[1][0], T0.val[1], 128); + vst1q_f32_ex(&pDestination->m[2][0], T1.val[0], 128); +#else + vst1q_f32(&pDestination->m[0][0], T0.val[0]); + vst1q_f32(&pDestination->m[1][0], T0.val[1]); + vst1q_f32(&pDestination->m[2][0], T1.val[0]); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + + // x.x,y.x,z.x,w.x + XMVECTOR r0 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + XMVECTOR r1 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + XMVECTOR r2 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + + _mm_store_ps(&pDestination->m[0][0], r0); + _mm_store_ps(&pDestination->m[1][0], r1); + _mm_store_ps(&pDestination->m[2][0], r2); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4x4 +( + XMFLOAT4X4* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + pDestination->m[0][3] = M.r[0].vector4_f32[3]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + pDestination->m[1][3] = M.r[1].vector4_f32[3]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + pDestination->m[2][3] = M.r[2].vector4_f32[3]; + + pDestination->m[3][0] = M.r[3].vector4_f32[0]; + pDestination->m[3][1] = M.r[3].vector4_f32[1]; + pDestination->m[3][2] = M.r[3].vector4_f32[2]; + pDestination->m[3][3] = M.r[3].vector4_f32[3]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_f32(reinterpret_cast(&pDestination->_11), M.r[0]); + vst1q_f32(reinterpret_cast(&pDestination->_21), M.r[1]); + vst1q_f32(reinterpret_cast(&pDestination->_31), M.r[2]); + vst1q_f32(reinterpret_cast(&pDestination->_41), M.r[3]); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_storeu_ps(&pDestination->_11, M.r[0]); + _mm_storeu_ps(&pDestination->_21, M.r[1]); + _mm_storeu_ps(&pDestination->_31, M.r[2]); + _mm_storeu_ps(&pDestination->_41, M.r[3]); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4x4A +( + XMFLOAT4X4A* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + pDestination->m[0][3] = M.r[0].vector4_f32[3]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + pDestination->m[1][3] = M.r[1].vector4_f32[3]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + pDestination->m[2][3] = M.r[2].vector4_f32[3]; + + pDestination->m[3][0] = M.r[3].vector4_f32[0]; + pDestination->m[3][1] = M.r[3].vector4_f32[1]; + pDestination->m[3][2] = M.r[3].vector4_f32[2]; + pDestination->m[3][3] = M.r[3].vector4_f32[3]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + vst1q_f32_ex(reinterpret_cast(&pDestination->_11), M.r[0], 128); + vst1q_f32_ex(reinterpret_cast(&pDestination->_21), M.r[1], 128); + vst1q_f32_ex(reinterpret_cast(&pDestination->_31), M.r[2], 128); + vst1q_f32_ex(reinterpret_cast(&pDestination->_41), M.r[3], 128); +#else + vst1q_f32(reinterpret_cast(&pDestination->_11), M.r[0]); + vst1q_f32(reinterpret_cast(&pDestination->_21), M.r[1]); + vst1q_f32(reinterpret_cast(&pDestination->_31), M.r[2]); + vst1q_f32(reinterpret_cast(&pDestination->_41), M.r[3]); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ps(&pDestination->_11, M.r[0]); + _mm_store_ps(&pDestination->_21, M.r[1]); + _mm_store_ps(&pDestination->_31, M.r[2]); + _mm_store_ps(&pDestination->_41, M.r[3]); +#endif +} + diff --git a/include/directxmath/directxmathmatrix.inl b/include/directxmath/directxmathmatrix.inl new file mode 100644 index 0000000..606a5c6 --- /dev/null +++ b/include/directxmath/directxmathmatrix.inl @@ -0,0 +1,3413 @@ +//------------------------------------------------------------------------------------- +// DirectXMathMatrix.inl -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +/**************************************************************************** + * + * Matrix + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +// Return true if any entry in the matrix is NaN +inline bool XM_CALLCONV XMMatrixIsNaN(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + size_t i = 16; + auto pWork = reinterpret_cast(&M.m[0][0]); + do { + // Fetch value into integer unit + uint32_t uTest = pWork[0]; + // Remove sign + uTest &= 0x7FFFFFFFU; + // NaN is 0x7F800001 through 0x7FFFFFFF inclusive + uTest -= 0x7F800001U; + if (uTest < 0x007FFFFFU) + { + break; // NaN found + } + ++pWork; // Next entry + } while (--i); + return (i != 0); // i == 0 if nothing matched +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Load in registers + XMVECTOR vX = M.r[0]; + XMVECTOR vY = M.r[1]; + XMVECTOR vZ = M.r[2]; + XMVECTOR vW = M.r[3]; + // Test themselves to check for NaN + vX = vmvnq_u32(vceqq_f32(vX, vX)); + vY = vmvnq_u32(vceqq_f32(vY, vY)); + vZ = vmvnq_u32(vceqq_f32(vZ, vZ)); + vW = vmvnq_u32(vceqq_f32(vW, vW)); + // Or all the results + vX = vorrq_u32(vX, vZ); + vY = vorrq_u32(vY, vW); + vX = vorrq_u32(vX, vY); + // If any tested true, return true + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vX), vget_high_u8(vX)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + return (r != 0); +#elif defined(_XM_SSE_INTRINSICS_) + // Load in registers + XMVECTOR vX = M.r[0]; + XMVECTOR vY = M.r[1]; + XMVECTOR vZ = M.r[2]; + XMVECTOR vW = M.r[3]; + // Test themselves to check for NaN + vX = _mm_cmpneq_ps(vX, vX); + vY = _mm_cmpneq_ps(vY, vY); + vZ = _mm_cmpneq_ps(vZ, vZ); + vW = _mm_cmpneq_ps(vW, vW); + // Or all the results + vX = _mm_or_ps(vX, vZ); + vY = _mm_or_ps(vY, vW); + vX = _mm_or_ps(vX, vY); + // If any tested true, return true + return (_mm_movemask_ps(vX) != 0); +#else +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +// Return true if any entry in the matrix is +/-INF +inline bool XM_CALLCONV XMMatrixIsInfinite(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + size_t i = 16; + auto pWork = reinterpret_cast(&M.m[0][0]); + do { + // Fetch value into integer unit + uint32_t uTest = pWork[0]; + // Remove sign + uTest &= 0x7FFFFFFFU; + // INF is 0x7F800000 + if (uTest == 0x7F800000U) + { + break; // INF found + } + ++pWork; // Next entry + } while (--i); + return (i != 0); // i == 0 if nothing matched +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bits + XMVECTOR vTemp1 = vandq_u32(M.r[0], g_XMAbsMask); + XMVECTOR vTemp2 = vandq_u32(M.r[1], g_XMAbsMask); + XMVECTOR vTemp3 = vandq_u32(M.r[2], g_XMAbsMask); + XMVECTOR vTemp4 = vandq_u32(M.r[3], g_XMAbsMask); + // Compare to infinity + vTemp1 = vceqq_f32(vTemp1, g_XMInfinity); + vTemp2 = vceqq_f32(vTemp2, g_XMInfinity); + vTemp3 = vceqq_f32(vTemp3, g_XMInfinity); + vTemp4 = vceqq_f32(vTemp4, g_XMInfinity); + // Or the answers together + vTemp1 = vorrq_u32(vTemp1, vTemp2); + vTemp3 = vorrq_u32(vTemp3, vTemp4); + vTemp1 = vorrq_u32(vTemp1, vTemp3); + // If any are infinity, the signs are true. + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTemp1), vget_high_u8(vTemp1)); + uint16x4x2_t vTemp5 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp5.val[1], 1); + return (r != 0); +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bits + XMVECTOR vTemp1 = _mm_and_ps(M.r[0], g_XMAbsMask); + XMVECTOR vTemp2 = _mm_and_ps(M.r[1], g_XMAbsMask); + XMVECTOR vTemp3 = _mm_and_ps(M.r[2], g_XMAbsMask); + XMVECTOR vTemp4 = _mm_and_ps(M.r[3], g_XMAbsMask); + // Compare to infinity + vTemp1 = _mm_cmpeq_ps(vTemp1, g_XMInfinity); + vTemp2 = _mm_cmpeq_ps(vTemp2, g_XMInfinity); + vTemp3 = _mm_cmpeq_ps(vTemp3, g_XMInfinity); + vTemp4 = _mm_cmpeq_ps(vTemp4, g_XMInfinity); + // Or the answers together + vTemp1 = _mm_or_ps(vTemp1, vTemp2); + vTemp3 = _mm_or_ps(vTemp3, vTemp4); + vTemp1 = _mm_or_ps(vTemp1, vTemp3); + // If any are infinity, the signs are true. + return (_mm_movemask_ps(vTemp1) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +// Return true if the XMMatrix is equal to identity +inline bool XM_CALLCONV XMMatrixIsIdentity(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + // Use the integer pipeline to reduce branching to a minimum + auto pWork = reinterpret_cast(&M.m[0][0]); + // Convert 1.0f to zero and or them together + uint32_t uOne = pWork[0] ^ 0x3F800000U; + // Or all the 0.0f entries together + uint32_t uZero = pWork[1]; + uZero |= pWork[2]; + uZero |= pWork[3]; + // 2nd row + uZero |= pWork[4]; + uOne |= pWork[5] ^ 0x3F800000U; + uZero |= pWork[6]; + uZero |= pWork[7]; + // 3rd row + uZero |= pWork[8]; + uZero |= pWork[9]; + uOne |= pWork[10] ^ 0x3F800000U; + uZero |= pWork[11]; + // 4th row + uZero |= pWork[12]; + uZero |= pWork[13]; + uZero |= pWork[14]; + uOne |= pWork[15] ^ 0x3F800000U; + // If all zero entries are zero, the uZero==0 + uZero &= 0x7FFFFFFF; // Allow -0.0f + // If all 1.0f entries are 1.0f, then uOne==0 + uOne |= uZero; + return (uOne == 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vTemp1 = vceqq_f32(M.r[0], g_XMIdentityR0); + XMVECTOR vTemp2 = vceqq_f32(M.r[1], g_XMIdentityR1); + XMVECTOR vTemp3 = vceqq_f32(M.r[2], g_XMIdentityR2); + XMVECTOR vTemp4 = vceqq_f32(M.r[3], g_XMIdentityR3); + vTemp1 = vandq_u32(vTemp1, vTemp2); + vTemp3 = vandq_u32(vTemp3, vTemp4); + vTemp1 = vandq_u32(vTemp1, vTemp3); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTemp1), vget_high_u8(vTemp1)); + uint16x4x2_t vTemp5 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp5.val[1], 1); + return (r == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp1 = _mm_cmpeq_ps(M.r[0], g_XMIdentityR0); + XMVECTOR vTemp2 = _mm_cmpeq_ps(M.r[1], g_XMIdentityR1); + XMVECTOR vTemp3 = _mm_cmpeq_ps(M.r[2], g_XMIdentityR2); + XMVECTOR vTemp4 = _mm_cmpeq_ps(M.r[3], g_XMIdentityR3); + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + vTemp3 = _mm_and_ps(vTemp3, vTemp4); + vTemp1 = _mm_and_ps(vTemp1, vTemp3); + return (_mm_movemask_ps(vTemp1) == 0x0f); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ +// Perform a 4x4 matrix multiply by a 4x4 matrix +inline XMMATRIX XM_CALLCONV XMMatrixMultiply +( + FXMMATRIX M1, + CXMMATRIX M2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMMATRIX mResult; + // Cache the invariants in registers + float x = M1.m[0][0]; + float y = M1.m[0][1]; + float z = M1.m[0][2]; + float w = M1.m[0][3]; + // Perform the operation on the first row + mResult.m[0][0] = (M2.m[0][0] * x) + (M2.m[1][0] * y) + (M2.m[2][0] * z) + (M2.m[3][0] * w); + mResult.m[0][1] = (M2.m[0][1] * x) + (M2.m[1][1] * y) + (M2.m[2][1] * z) + (M2.m[3][1] * w); + mResult.m[0][2] = (M2.m[0][2] * x) + (M2.m[1][2] * y) + (M2.m[2][2] * z) + (M2.m[3][2] * w); + mResult.m[0][3] = (M2.m[0][3] * x) + (M2.m[1][3] * y) + (M2.m[2][3] * z) + (M2.m[3][3] * w); + // Repeat for all the other rows + x = M1.m[1][0]; + y = M1.m[1][1]; + z = M1.m[1][2]; + w = M1.m[1][3]; + mResult.m[1][0] = (M2.m[0][0] * x) + (M2.m[1][0] * y) + (M2.m[2][0] * z) + (M2.m[3][0] * w); + mResult.m[1][1] = (M2.m[0][1] * x) + (M2.m[1][1] * y) + (M2.m[2][1] * z) + (M2.m[3][1] * w); + mResult.m[1][2] = (M2.m[0][2] * x) + (M2.m[1][2] * y) + (M2.m[2][2] * z) + (M2.m[3][2] * w); + mResult.m[1][3] = (M2.m[0][3] * x) + (M2.m[1][3] * y) + (M2.m[2][3] * z) + (M2.m[3][3] * w); + x = M1.m[2][0]; + y = M1.m[2][1]; + z = M1.m[2][2]; + w = M1.m[2][3]; + mResult.m[2][0] = (M2.m[0][0] * x) + (M2.m[1][0] * y) + (M2.m[2][0] * z) + (M2.m[3][0] * w); + mResult.m[2][1] = (M2.m[0][1] * x) + (M2.m[1][1] * y) + (M2.m[2][1] * z) + (M2.m[3][1] * w); + mResult.m[2][2] = (M2.m[0][2] * x) + (M2.m[1][2] * y) + (M2.m[2][2] * z) + (M2.m[3][2] * w); + mResult.m[2][3] = (M2.m[0][3] * x) + (M2.m[1][3] * y) + (M2.m[2][3] * z) + (M2.m[3][3] * w); + x = M1.m[3][0]; + y = M1.m[3][1]; + z = M1.m[3][2]; + w = M1.m[3][3]; + mResult.m[3][0] = (M2.m[0][0] * x) + (M2.m[1][0] * y) + (M2.m[2][0] * z) + (M2.m[3][0] * w); + mResult.m[3][1] = (M2.m[0][1] * x) + (M2.m[1][1] * y) + (M2.m[2][1] * z) + (M2.m[3][1] * w); + mResult.m[3][2] = (M2.m[0][2] * x) + (M2.m[1][2] * y) + (M2.m[2][2] * z) + (M2.m[3][2] * w); + mResult.m[3][3] = (M2.m[0][3] * x) + (M2.m[1][3] * y) + (M2.m[2][3] * z) + (M2.m[3][3] * w); + return mResult; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX mResult; + float32x2_t VL = vget_low_f32(M1.r[0]); + float32x2_t VH = vget_high_f32(M1.r[0]); + // Perform the operation on the first row + XMVECTOR vX = vmulq_lane_f32(M2.r[0], VL, 0); + XMVECTOR vY = vmulq_lane_f32(M2.r[1], VL, 1); + XMVECTOR vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + XMVECTOR vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + mResult.r[0] = vaddq_f32(vZ, vW); + // Repeat for the other 3 rows + VL = vget_low_f32(M1.r[1]); + VH = vget_high_f32(M1.r[1]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + mResult.r[1] = vaddq_f32(vZ, vW); + VL = vget_low_f32(M1.r[2]); + VH = vget_high_f32(M1.r[2]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + mResult.r[2] = vaddq_f32(vZ, vW); + VL = vget_low_f32(M1.r[3]); + VH = vget_high_f32(M1.r[3]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + mResult.r[3] = vaddq_f32(vZ, vW); + return mResult; +#elif defined(_XM_AVX2_INTRINSICS_) + __m256 t0 = _mm256_castps128_ps256(M1.r[0]); + t0 = _mm256_insertf128_ps(t0, M1.r[1], 1); + __m256 t1 = _mm256_castps128_ps256(M1.r[2]); + t1 = _mm256_insertf128_ps(t1, M1.r[3], 1); + + __m256 u0 = _mm256_castps128_ps256(M2.r[0]); + u0 = _mm256_insertf128_ps(u0, M2.r[1], 1); + __m256 u1 = _mm256_castps128_ps256(M2.r[2]); + u1 = _mm256_insertf128_ps(u1, M2.r[3], 1); + + __m256 a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 b0 = _mm256_permute2f128_ps(u0, u0, 0x00); + __m256 c0 = _mm256_mul_ps(a0, b0); + __m256 c1 = _mm256_mul_ps(a1, b0); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(1, 1, 1, 1)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1)); + b0 = _mm256_permute2f128_ps(u0, u0, 0x11); + __m256 c2 = _mm256_fmadd_ps(a0, b0, c0); + __m256 c3 = _mm256_fmadd_ps(a1, b0, c1); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(2, 2, 2, 2)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 b1 = _mm256_permute2f128_ps(u1, u1, 0x00); + __m256 c4 = _mm256_mul_ps(a0, b1); + __m256 c5 = _mm256_mul_ps(a1, b1); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(3, 3, 3, 3)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3)); + b1 = _mm256_permute2f128_ps(u1, u1, 0x11); + __m256 c6 = _mm256_fmadd_ps(a0, b1, c4); + __m256 c7 = _mm256_fmadd_ps(a1, b1, c5); + + t0 = _mm256_add_ps(c2, c6); + t1 = _mm256_add_ps(c3, c7); + + XMMATRIX mResult; + mResult.r[0] = _mm256_castps256_ps128(t0); + mResult.r[1] = _mm256_extractf128_ps(t0, 1); + mResult.r[2] = _mm256_castps256_ps128(t1); + mResult.r[3] = _mm256_extractf128_ps(t1, 1); + return mResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX mResult; + // Splat the component X,Y,Z then W +#if defined(_XM_AVX_INTRINSICS_) + XMVECTOR vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 0); + XMVECTOR vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 1); + XMVECTOR vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 2); + XMVECTOR vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 3); +#else + // Use vW to hold the original row + XMVECTOR vW = M1.r[0]; + XMVECTOR vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + // Perform the operation on the first row + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + // Perform a binary add to reduce cumulative errors + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + mResult.r[0] = vX; + // Repeat for the other 3 rows +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 3); +#else + vW = M1.r[1]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + mResult.r[1] = vX; +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 3); +#else + vW = M1.r[2]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + mResult.r[2] = vX; +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 3); +#else + vW = M1.r[3]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + mResult.r[3] = vX; + return mResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixMultiplyTranspose +( + FXMMATRIX M1, + CXMMATRIX M2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMMATRIX mResult; + // Cache the invariants in registers + float x = M2.m[0][0]; + float y = M2.m[1][0]; + float z = M2.m[2][0]; + float w = M2.m[3][0]; + // Perform the operation on the first row + mResult.m[0][0] = (M1.m[0][0] * x) + (M1.m[0][1] * y) + (M1.m[0][2] * z) + (M1.m[0][3] * w); + mResult.m[0][1] = (M1.m[1][0] * x) + (M1.m[1][1] * y) + (M1.m[1][2] * z) + (M1.m[1][3] * w); + mResult.m[0][2] = (M1.m[2][0] * x) + (M1.m[2][1] * y) + (M1.m[2][2] * z) + (M1.m[2][3] * w); + mResult.m[0][3] = (M1.m[3][0] * x) + (M1.m[3][1] * y) + (M1.m[3][2] * z) + (M1.m[3][3] * w); + // Repeat for all the other rows + x = M2.m[0][1]; + y = M2.m[1][1]; + z = M2.m[2][1]; + w = M2.m[3][1]; + mResult.m[1][0] = (M1.m[0][0] * x) + (M1.m[0][1] * y) + (M1.m[0][2] * z) + (M1.m[0][3] * w); + mResult.m[1][1] = (M1.m[1][0] * x) + (M1.m[1][1] * y) + (M1.m[1][2] * z) + (M1.m[1][3] * w); + mResult.m[1][2] = (M1.m[2][0] * x) + (M1.m[2][1] * y) + (M1.m[2][2] * z) + (M1.m[2][3] * w); + mResult.m[1][3] = (M1.m[3][0] * x) + (M1.m[3][1] * y) + (M1.m[3][2] * z) + (M1.m[3][3] * w); + x = M2.m[0][2]; + y = M2.m[1][2]; + z = M2.m[2][2]; + w = M2.m[3][2]; + mResult.m[2][0] = (M1.m[0][0] * x) + (M1.m[0][1] * y) + (M1.m[0][2] * z) + (M1.m[0][3] * w); + mResult.m[2][1] = (M1.m[1][0] * x) + (M1.m[1][1] * y) + (M1.m[1][2] * z) + (M1.m[1][3] * w); + mResult.m[2][2] = (M1.m[2][0] * x) + (M1.m[2][1] * y) + (M1.m[2][2] * z) + (M1.m[2][3] * w); + mResult.m[2][3] = (M1.m[3][0] * x) + (M1.m[3][1] * y) + (M1.m[3][2] * z) + (M1.m[3][3] * w); + x = M2.m[0][3]; + y = M2.m[1][3]; + z = M2.m[2][3]; + w = M2.m[3][3]; + mResult.m[3][0] = (M1.m[0][0] * x) + (M1.m[0][1] * y) + (M1.m[0][2] * z) + (M1.m[0][3] * w); + mResult.m[3][1] = (M1.m[1][0] * x) + (M1.m[1][1] * y) + (M1.m[1][2] * z) + (M1.m[1][3] * w); + mResult.m[3][2] = (M1.m[2][0] * x) + (M1.m[2][1] * y) + (M1.m[2][2] * z) + (M1.m[2][3] * w); + mResult.m[3][3] = (M1.m[3][0] * x) + (M1.m[3][1] * y) + (M1.m[3][2] * z) + (M1.m[3][3] * w); + return mResult; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(M1.r[0]); + float32x2_t VH = vget_high_f32(M1.r[0]); + // Perform the operation on the first row + XMVECTOR vX = vmulq_lane_f32(M2.r[0], VL, 0); + XMVECTOR vY = vmulq_lane_f32(M2.r[1], VL, 1); + XMVECTOR vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + XMVECTOR vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + float32x4_t r0 = vaddq_f32(vZ, vW); + // Repeat for the other 3 rows + VL = vget_low_f32(M1.r[1]); + VH = vget_high_f32(M1.r[1]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + float32x4_t r1 = vaddq_f32(vZ, vW); + VL = vget_low_f32(M1.r[2]); + VH = vget_high_f32(M1.r[2]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + float32x4_t r2 = vaddq_f32(vZ, vW); + VL = vget_low_f32(M1.r[3]); + VH = vget_high_f32(M1.r[3]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + float32x4_t r3 = vaddq_f32(vZ, vW); + + // Transpose result + float32x4x2_t P0 = vzipq_f32(r0, r2); + float32x4x2_t P1 = vzipq_f32(r1, r3); + + float32x4x2_t T0 = vzipq_f32(P0.val[0], P1.val[0]); + float32x4x2_t T1 = vzipq_f32(P0.val[1], P1.val[1]); + + XMMATRIX mResult; + mResult.r[0] = T0.val[0]; + mResult.r[1] = T0.val[1]; + mResult.r[2] = T1.val[0]; + mResult.r[3] = T1.val[1]; + return mResult; +#elif defined(_XM_AVX2_INTRINSICS_) + __m256 t0 = _mm256_castps128_ps256(M1.r[0]); + t0 = _mm256_insertf128_ps(t0, M1.r[1], 1); + __m256 t1 = _mm256_castps128_ps256(M1.r[2]); + t1 = _mm256_insertf128_ps(t1, M1.r[3], 1); + + __m256 u0 = _mm256_castps128_ps256(M2.r[0]); + u0 = _mm256_insertf128_ps(u0, M2.r[1], 1); + __m256 u1 = _mm256_castps128_ps256(M2.r[2]); + u1 = _mm256_insertf128_ps(u1, M2.r[3], 1); + + __m256 a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 b0 = _mm256_permute2f128_ps(u0, u0, 0x00); + __m256 c0 = _mm256_mul_ps(a0, b0); + __m256 c1 = _mm256_mul_ps(a1, b0); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(1, 1, 1, 1)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1)); + b0 = _mm256_permute2f128_ps(u0, u0, 0x11); + __m256 c2 = _mm256_fmadd_ps(a0, b0, c0); + __m256 c3 = _mm256_fmadd_ps(a1, b0, c1); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(2, 2, 2, 2)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 b1 = _mm256_permute2f128_ps(u1, u1, 0x00); + __m256 c4 = _mm256_mul_ps(a0, b1); + __m256 c5 = _mm256_mul_ps(a1, b1); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(3, 3, 3, 3)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3)); + b1 = _mm256_permute2f128_ps(u1, u1, 0x11); + __m256 c6 = _mm256_fmadd_ps(a0, b1, c4); + __m256 c7 = _mm256_fmadd_ps(a1, b1, c5); + + t0 = _mm256_add_ps(c2, c6); + t1 = _mm256_add_ps(c3, c7); + + // Transpose result + __m256 vTemp = _mm256_unpacklo_ps(t0, t1); + __m256 vTemp2 = _mm256_unpackhi_ps(t0, t1); + __m256 vTemp3 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x20); + __m256 vTemp4 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x31); + vTemp = _mm256_unpacklo_ps(vTemp3, vTemp4); + vTemp2 = _mm256_unpackhi_ps(vTemp3, vTemp4); + t0 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x20); + t1 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x31); + + XMMATRIX mResult; + mResult.r[0] = _mm256_castps256_ps128(t0); + mResult.r[1] = _mm256_extractf128_ps(t0, 1); + mResult.r[2] = _mm256_castps256_ps128(t1); + mResult.r[3] = _mm256_extractf128_ps(t1, 1); + return mResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Splat the component X,Y,Z then W +#if defined(_XM_AVX_INTRINSICS_) + XMVECTOR vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 0); + XMVECTOR vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 1); + XMVECTOR vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 2); + XMVECTOR vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 3); +#else + // Use vW to hold the original row + XMVECTOR vW = M1.r[0]; + XMVECTOR vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + // Perform the operation on the first row + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + // Perform a binary add to reduce cumulative errors + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + XMVECTOR r0 = vX; + // Repeat for the other 3 rows +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 3); +#else + vW = M1.r[1]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + XMVECTOR r1 = vX; +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 3); +#else + vW = M1.r[2]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + XMVECTOR r2 = vX; +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 3); +#else + vW = M1.r[3]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + XMVECTOR r3 = vX; + + // Transpose result + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(3, 2, 3, 2)); + + XMMATRIX mResult; + // x.x,y.x,z.x,w.x + mResult.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + mResult.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + mResult.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + // x.w,y.w,z.w,w.w + mResult.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTranspose(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + // Original matrix: + // + // m00m01m02m03 + // m10m11m12m13 + // m20m21m22m23 + // m30m31m32m33 + + XMMATRIX P; + P.r[0] = XMVectorMergeXY(M.r[0], M.r[2]); // m00m20m01m21 + P.r[1] = XMVectorMergeXY(M.r[1], M.r[3]); // m10m30m11m31 + P.r[2] = XMVectorMergeZW(M.r[0], M.r[2]); // m02m22m03m23 + P.r[3] = XMVectorMergeZW(M.r[1], M.r[3]); // m12m32m13m33 + + XMMATRIX MT; + MT.r[0] = XMVectorMergeXY(P.r[0], P.r[1]); // m00m10m20m30 + MT.r[1] = XMVectorMergeZW(P.r[0], P.r[1]); // m01m11m21m31 + MT.r[2] = XMVectorMergeXY(P.r[2], P.r[3]); // m02m12m22m32 + MT.r[3] = XMVectorMergeZW(P.r[2], P.r[3]); // m03m13m23m33 + return MT; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4x2_t P0 = vzipq_f32(M.r[0], M.r[2]); + float32x4x2_t P1 = vzipq_f32(M.r[1], M.r[3]); + + float32x4x2_t T0 = vzipq_f32(P0.val[0], P1.val[0]); + float32x4x2_t T1 = vzipq_f32(P0.val[1], P1.val[1]); + + XMMATRIX mResult; + mResult.r[0] = T0.val[0]; + mResult.r[1] = T0.val[1]; + mResult.r[2] = T1.val[0]; + mResult.r[3] = T1.val[1]; + return mResult; +#elif defined(_XM_AVX2_INTRINSICS_) + __m256 t0 = _mm256_castps128_ps256(M.r[0]); + t0 = _mm256_insertf128_ps(t0, M.r[1], 1); + __m256 t1 = _mm256_castps128_ps256(M.r[2]); + t1 = _mm256_insertf128_ps(t1, M.r[3], 1); + + __m256 vTemp = _mm256_unpacklo_ps(t0, t1); + __m256 vTemp2 = _mm256_unpackhi_ps(t0, t1); + __m256 vTemp3 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x20); + __m256 vTemp4 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x31); + vTemp = _mm256_unpacklo_ps(vTemp3, vTemp4); + vTemp2 = _mm256_unpackhi_ps(vTemp3, vTemp4); + t0 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x20); + t1 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x31); + + XMMATRIX mResult; + mResult.r[0] = _mm256_castps256_ps128(t0); + mResult.r[1] = _mm256_extractf128_ps(t0, 1); + mResult.r[2] = _mm256_castps256_ps128(t1); + mResult.r[3] = _mm256_extractf128_ps(t1, 1); + return mResult; +#elif defined(_XM_SSE_INTRINSICS_) + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + + XMMATRIX mResult; + // x.x,y.x,z.x,w.x + mResult.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + mResult.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + mResult.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + // x.w,y.w,z.w,w.w + mResult.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ +// Return the inverse and the determinant of a 4x4 matrix +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMMatrixInverse +( + XMVECTOR* pDeterminant, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMMATRIX MT = XMMatrixTranspose(M); + + XMVECTOR V0[4], V1[4]; + V0[0] = XMVectorSwizzle(MT.r[2]); + V1[0] = XMVectorSwizzle(MT.r[3]); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorSwizzle(MT.r[1]); + V0[2] = XMVectorPermute(MT.r[2], MT.r[0]); + V1[2] = XMVectorPermute(MT.r[3], MT.r[1]); + + XMVECTOR D0 = XMVectorMultiply(V0[0], V1[0]); + XMVECTOR D1 = XMVectorMultiply(V0[1], V1[1]); + XMVECTOR D2 = XMVectorMultiply(V0[2], V1[2]); + + V0[0] = XMVectorSwizzle(MT.r[2]); + V1[0] = XMVectorSwizzle(MT.r[3]); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorSwizzle(MT.r[1]); + V0[2] = XMVectorPermute(MT.r[2], MT.r[0]); + V1[2] = XMVectorPermute(MT.r[3], MT.r[1]); + + D0 = XMVectorNegativeMultiplySubtract(V0[0], V1[0], D0); + D1 = XMVectorNegativeMultiplySubtract(V0[1], V1[1], D1); + D2 = XMVectorNegativeMultiplySubtract(V0[2], V1[2], D2); + + V0[0] = XMVectorSwizzle(MT.r[1]); + V1[0] = XMVectorPermute(D0, D2); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorPermute(D0, D2); + V0[2] = XMVectorSwizzle(MT.r[3]); + V1[2] = XMVectorPermute(D1, D2); + V0[3] = XMVectorSwizzle(MT.r[2]); + V1[3] = XMVectorPermute(D1, D2); + + XMVECTOR C0 = XMVectorMultiply(V0[0], V1[0]); + XMVECTOR C2 = XMVectorMultiply(V0[1], V1[1]); + XMVECTOR C4 = XMVectorMultiply(V0[2], V1[2]); + XMVECTOR C6 = XMVectorMultiply(V0[3], V1[3]); + + V0[0] = XMVectorSwizzle(MT.r[1]); + V1[0] = XMVectorPermute(D0, D2); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorPermute(D0, D2); + V0[2] = XMVectorSwizzle(MT.r[3]); + V1[2] = XMVectorPermute(D1, D2); + V0[3] = XMVectorSwizzle(MT.r[2]); + V1[3] = XMVectorPermute(D1, D2); + + C0 = XMVectorNegativeMultiplySubtract(V0[0], V1[0], C0); + C2 = XMVectorNegativeMultiplySubtract(V0[1], V1[1], C2); + C4 = XMVectorNegativeMultiplySubtract(V0[2], V1[2], C4); + C6 = XMVectorNegativeMultiplySubtract(V0[3], V1[3], C6); + + V0[0] = XMVectorSwizzle(MT.r[1]); + V1[0] = XMVectorPermute(D0, D2); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorPermute(D0, D2); + V0[2] = XMVectorSwizzle(MT.r[3]); + V1[2] = XMVectorPermute(D1, D2); + V0[3] = XMVectorSwizzle(MT.r[2]); + V1[3] = XMVectorPermute(D1, D2); + + XMVECTOR C1 = XMVectorNegativeMultiplySubtract(V0[0], V1[0], C0); + C0 = XMVectorMultiplyAdd(V0[0], V1[0], C0); + XMVECTOR C3 = XMVectorMultiplyAdd(V0[1], V1[1], C2); + C2 = XMVectorNegativeMultiplySubtract(V0[1], V1[1], C2); + XMVECTOR C5 = XMVectorNegativeMultiplySubtract(V0[2], V1[2], C4); + C4 = XMVectorMultiplyAdd(V0[2], V1[2], C4); + XMVECTOR C7 = XMVectorMultiplyAdd(V0[3], V1[3], C6); + C6 = XMVectorNegativeMultiplySubtract(V0[3], V1[3], C6); + + XMMATRIX R; + R.r[0] = XMVectorSelect(C0, C1, g_XMSelect0101.v); + R.r[1] = XMVectorSelect(C2, C3, g_XMSelect0101.v); + R.r[2] = XMVectorSelect(C4, C5, g_XMSelect0101.v); + R.r[3] = XMVectorSelect(C6, C7, g_XMSelect0101.v); + + XMVECTOR Determinant = XMVector4Dot(R.r[0], MT.r[0]); + + if (pDeterminant != nullptr) + *pDeterminant = Determinant; + + XMVECTOR Reciprocal = XMVectorReciprocal(Determinant); + + XMMATRIX Result; + Result.r[0] = XMVectorMultiply(R.r[0], Reciprocal); + Result.r[1] = XMVectorMultiply(R.r[1], Reciprocal); + Result.r[2] = XMVectorMultiply(R.r[2], Reciprocal); + Result.r[3] = XMVectorMultiply(R.r[3], Reciprocal); + return Result; + +#elif defined(_XM_SSE_INTRINSICS_) + // Transpose matrix + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + + XMMATRIX MT; + MT.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + MT.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + MT.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + MT.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + + XMVECTOR V00 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(1, 1, 0, 0)); + XMVECTOR V10 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + XMVECTOR V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(1, 1, 0, 0)); + XMVECTOR V11 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + XMVECTOR V02 = _mm_shuffle_ps(MT.r[2], MT.r[0], _MM_SHUFFLE(2, 0, 2, 0)); + XMVECTOR V12 = _mm_shuffle_ps(MT.r[3], MT.r[1], _MM_SHUFFLE(3, 1, 3, 1)); + + XMVECTOR D0 = _mm_mul_ps(V00, V10); + XMVECTOR D1 = _mm_mul_ps(V01, V11); + XMVECTOR D2 = _mm_mul_ps(V02, V12); + + V00 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(3, 2, 3, 2)); + V10 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(1, 1, 0, 0)); + V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(3, 2, 3, 2)); + V11 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(1, 1, 0, 0)); + V02 = _mm_shuffle_ps(MT.r[2], MT.r[0], _MM_SHUFFLE(3, 1, 3, 1)); + V12 = _mm_shuffle_ps(MT.r[3], MT.r[1], _MM_SHUFFLE(2, 0, 2, 0)); + + D0 = XM_FNMADD_PS(V00, V10, D0); + D1 = XM_FNMADD_PS(V01, V11, D1); + D2 = XM_FNMADD_PS(V02, V12, D2); + // V11 = D0Y,D0W,D2Y,D2Y + V11 = _mm_shuffle_ps(D0, D2, _MM_SHUFFLE(1, 1, 3, 1)); + V00 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(1, 0, 2, 1)); + V10 = _mm_shuffle_ps(V11, D0, _MM_SHUFFLE(0, 3, 0, 2)); + V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(0, 1, 0, 2)); + V11 = _mm_shuffle_ps(V11, D0, _MM_SHUFFLE(2, 1, 2, 1)); + // V13 = D1Y,D1W,D2W,D2W + XMVECTOR V13 = _mm_shuffle_ps(D1, D2, _MM_SHUFFLE(3, 3, 3, 1)); + V02 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(1, 0, 2, 1)); + V12 = _mm_shuffle_ps(V13, D1, _MM_SHUFFLE(0, 3, 0, 2)); + XMVECTOR V03 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(0, 1, 0, 2)); + V13 = _mm_shuffle_ps(V13, D1, _MM_SHUFFLE(2, 1, 2, 1)); + + XMVECTOR C0 = _mm_mul_ps(V00, V10); + XMVECTOR C2 = _mm_mul_ps(V01, V11); + XMVECTOR C4 = _mm_mul_ps(V02, V12); + XMVECTOR C6 = _mm_mul_ps(V03, V13); + + // V11 = D0X,D0Y,D2X,D2X + V11 = _mm_shuffle_ps(D0, D2, _MM_SHUFFLE(0, 0, 1, 0)); + V00 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(2, 1, 3, 2)); + V10 = _mm_shuffle_ps(D0, V11, _MM_SHUFFLE(2, 1, 0, 3)); + V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(1, 3, 2, 3)); + V11 = _mm_shuffle_ps(D0, V11, _MM_SHUFFLE(0, 2, 1, 2)); + // V13 = D1X,D1Y,D2Z,D2Z + V13 = _mm_shuffle_ps(D1, D2, _MM_SHUFFLE(2, 2, 1, 0)); + V02 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(2, 1, 3, 2)); + V12 = _mm_shuffle_ps(D1, V13, _MM_SHUFFLE(2, 1, 0, 3)); + V03 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(1, 3, 2, 3)); + V13 = _mm_shuffle_ps(D1, V13, _MM_SHUFFLE(0, 2, 1, 2)); + + C0 = XM_FNMADD_PS(V00, V10, C0); + C2 = XM_FNMADD_PS(V01, V11, C2); + C4 = XM_FNMADD_PS(V02, V12, C4); + C6 = XM_FNMADD_PS(V03, V13, C6); + + V00 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(0, 3, 0, 3)); + // V10 = D0Z,D0Z,D2X,D2Y + V10 = _mm_shuffle_ps(D0, D2, _MM_SHUFFLE(1, 0, 2, 2)); + V10 = XM_PERMUTE_PS(V10, _MM_SHUFFLE(0, 2, 3, 0)); + V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(2, 0, 3, 1)); + // V11 = D0X,D0W,D2X,D2Y + V11 = _mm_shuffle_ps(D0, D2, _MM_SHUFFLE(1, 0, 3, 0)); + V11 = XM_PERMUTE_PS(V11, _MM_SHUFFLE(2, 1, 0, 3)); + V02 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(0, 3, 0, 3)); + // V12 = D1Z,D1Z,D2Z,D2W + V12 = _mm_shuffle_ps(D1, D2, _MM_SHUFFLE(3, 2, 2, 2)); + V12 = XM_PERMUTE_PS(V12, _MM_SHUFFLE(0, 2, 3, 0)); + V03 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(2, 0, 3, 1)); + // V13 = D1X,D1W,D2Z,D2W + V13 = _mm_shuffle_ps(D1, D2, _MM_SHUFFLE(3, 2, 3, 0)); + V13 = XM_PERMUTE_PS(V13, _MM_SHUFFLE(2, 1, 0, 3)); + + V00 = _mm_mul_ps(V00, V10); + V01 = _mm_mul_ps(V01, V11); + V02 = _mm_mul_ps(V02, V12); + V03 = _mm_mul_ps(V03, V13); + XMVECTOR C1 = _mm_sub_ps(C0, V00); + C0 = _mm_add_ps(C0, V00); + XMVECTOR C3 = _mm_add_ps(C2, V01); + C2 = _mm_sub_ps(C2, V01); + XMVECTOR C5 = _mm_sub_ps(C4, V02); + C4 = _mm_add_ps(C4, V02); + XMVECTOR C7 = _mm_add_ps(C6, V03); + C6 = _mm_sub_ps(C6, V03); + + C0 = _mm_shuffle_ps(C0, C1, _MM_SHUFFLE(3, 1, 2, 0)); + C2 = _mm_shuffle_ps(C2, C3, _MM_SHUFFLE(3, 1, 2, 0)); + C4 = _mm_shuffle_ps(C4, C5, _MM_SHUFFLE(3, 1, 2, 0)); + C6 = _mm_shuffle_ps(C6, C7, _MM_SHUFFLE(3, 1, 2, 0)); + C0 = XM_PERMUTE_PS(C0, _MM_SHUFFLE(3, 1, 2, 0)); + C2 = XM_PERMUTE_PS(C2, _MM_SHUFFLE(3, 1, 2, 0)); + C4 = XM_PERMUTE_PS(C4, _MM_SHUFFLE(3, 1, 2, 0)); + C6 = XM_PERMUTE_PS(C6, _MM_SHUFFLE(3, 1, 2, 0)); + // Get the determinant + XMVECTOR vTemp = XMVector4Dot(C0, MT.r[0]); + if (pDeterminant != nullptr) + *pDeterminant = vTemp; + vTemp = _mm_div_ps(g_XMOne, vTemp); + XMMATRIX mResult; + mResult.r[0] = _mm_mul_ps(C0, vTemp); + mResult.r[1] = _mm_mul_ps(C2, vTemp); + mResult.r[2] = _mm_mul_ps(C4, vTemp); + mResult.r[3] = _mm_mul_ps(C6, vTemp); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixVectorTensorProduct +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + XMMATRIX mResult; + mResult.r[0] = XMVectorMultiply(XMVectorSwizzle<0, 0, 0, 0>(V1), V2); + mResult.r[1] = XMVectorMultiply(XMVectorSwizzle<1, 1, 1, 1>(V1), V2); + mResult.r[2] = XMVectorMultiply(XMVectorSwizzle<2, 2, 2, 2>(V1), V2); + mResult.r[3] = XMVectorMultiply(XMVectorSwizzle<3, 3, 3, 3>(V1), V2); + return mResult; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMMatrixDeterminant(FXMMATRIX M) noexcept +{ + static const XMVECTORF32 Sign = { { { 1.0f, -1.0f, 1.0f, -1.0f } } }; + + XMVECTOR V0 = XMVectorSwizzle(M.r[2]); + XMVECTOR V1 = XMVectorSwizzle(M.r[3]); + XMVECTOR V2 = XMVectorSwizzle(M.r[2]); + XMVECTOR V3 = XMVectorSwizzle(M.r[3]); + XMVECTOR V4 = XMVectorSwizzle(M.r[2]); + XMVECTOR V5 = XMVectorSwizzle(M.r[3]); + + XMVECTOR P0 = XMVectorMultiply(V0, V1); + XMVECTOR P1 = XMVectorMultiply(V2, V3); + XMVECTOR P2 = XMVectorMultiply(V4, V5); + + V0 = XMVectorSwizzle(M.r[2]); + V1 = XMVectorSwizzle(M.r[3]); + V2 = XMVectorSwizzle(M.r[2]); + V3 = XMVectorSwizzle(M.r[3]); + V4 = XMVectorSwizzle(M.r[2]); + V5 = XMVectorSwizzle(M.r[3]); + + P0 = XMVectorNegativeMultiplySubtract(V0, V1, P0); + P1 = XMVectorNegativeMultiplySubtract(V2, V3, P1); + P2 = XMVectorNegativeMultiplySubtract(V4, V5, P2); + + V0 = XMVectorSwizzle(M.r[1]); + V1 = XMVectorSwizzle(M.r[1]); + V2 = XMVectorSwizzle(M.r[1]); + + XMVECTOR S = XMVectorMultiply(M.r[0], Sign.v); + XMVECTOR R = XMVectorMultiply(V0, P0); + R = XMVectorNegativeMultiplySubtract(V1, P1, R); + R = XMVectorMultiplyAdd(V2, P2, R); + + return XMVector4Dot(S, R); +} + +#define XM3RANKDECOMPOSE(a, b, c, x, y, z) \ + if((x) < (y)) \ + { \ + if((y) < (z)) \ + { \ + (a) = 2; \ + (b) = 1; \ + (c) = 0; \ + } \ + else \ + { \ + (a) = 1; \ + \ + if((x) < (z)) \ + { \ + (b) = 2; \ + (c) = 0; \ + } \ + else \ + { \ + (b) = 0; \ + (c) = 2; \ + } \ + } \ + } \ + else \ + { \ + if((x) < (z)) \ + { \ + (a) = 2; \ + (b) = 0; \ + (c) = 1; \ + } \ + else \ + { \ + (a) = 0; \ + \ + if((y) < (z)) \ + { \ + (b) = 2; \ + (c) = 1; \ + } \ + else \ + { \ + (b) = 1; \ + (c) = 2; \ + } \ + } \ + } + +#define XM3_DECOMP_EPSILON 0.0001f + +_Use_decl_annotations_ +inline bool XM_CALLCONV XMMatrixDecompose +( + XMVECTOR* outScale, + XMVECTOR* outRotQuat, + XMVECTOR* outTrans, + FXMMATRIX M +) noexcept +{ + static const XMVECTOR* pvCanonicalBasis[3] = { + &g_XMIdentityR0.v, + &g_XMIdentityR1.v, + &g_XMIdentityR2.v + }; + + assert(outScale != nullptr); + assert(outRotQuat != nullptr); + assert(outTrans != nullptr); + + // Get the translation + outTrans[0] = M.r[3]; + + XMVECTOR* ppvBasis[3]; + XMMATRIX matTemp; + ppvBasis[0] = &matTemp.r[0]; + ppvBasis[1] = &matTemp.r[1]; + ppvBasis[2] = &matTemp.r[2]; + + matTemp.r[0] = M.r[0]; + matTemp.r[1] = M.r[1]; + matTemp.r[2] = M.r[2]; + matTemp.r[3] = g_XMIdentityR3.v; + + auto pfScales = reinterpret_cast(outScale); + + size_t a, b, c; + XMVectorGetXPtr(&pfScales[0], XMVector3Length(ppvBasis[0][0])); + XMVectorGetXPtr(&pfScales[1], XMVector3Length(ppvBasis[1][0])); + XMVectorGetXPtr(&pfScales[2], XMVector3Length(ppvBasis[2][0])); + pfScales[3] = 0.f; + + XM3RANKDECOMPOSE(a, b, c, pfScales[0], pfScales[1], pfScales[2]) + + if (pfScales[a] < XM3_DECOMP_EPSILON) + { + ppvBasis[a][0] = pvCanonicalBasis[a][0]; + } + ppvBasis[a][0] = XMVector3Normalize(ppvBasis[a][0]); + + if (pfScales[b] < XM3_DECOMP_EPSILON) + { + size_t aa, bb, cc; + float fAbsX, fAbsY, fAbsZ; + + fAbsX = fabsf(XMVectorGetX(ppvBasis[a][0])); + fAbsY = fabsf(XMVectorGetY(ppvBasis[a][0])); + fAbsZ = fabsf(XMVectorGetZ(ppvBasis[a][0])); + + XM3RANKDECOMPOSE(aa, bb, cc, fAbsX, fAbsY, fAbsZ) + + ppvBasis[b][0] = XMVector3Cross(ppvBasis[a][0], pvCanonicalBasis[cc][0]); + } + + ppvBasis[b][0] = XMVector3Normalize(ppvBasis[b][0]); + + if (pfScales[c] < XM3_DECOMP_EPSILON) + { + ppvBasis[c][0] = XMVector3Cross(ppvBasis[a][0], ppvBasis[b][0]); + } + + ppvBasis[c][0] = XMVector3Normalize(ppvBasis[c][0]); + + float fDet = XMVectorGetX(XMMatrixDeterminant(matTemp)); + + // use Kramer's rule to check for handedness of coordinate system + if (fDet < 0.0f) + { + // switch coordinate system by negating the scale and inverting the basis vector on the x-axis + pfScales[a] = -pfScales[a]; + ppvBasis[a][0] = XMVectorNegate(ppvBasis[a][0]); + + fDet = -fDet; + } + + fDet -= 1.0f; + fDet *= fDet; + + if (XM3_DECOMP_EPSILON < fDet) + { + // Non-SRT matrix encountered + return false; + } + + // generate the quaternion from the matrix + outRotQuat[0] = XMQuaternionRotationMatrix(matTemp); + return true; +} + +#undef XM3_DECOMP_EPSILON +#undef XM3RANKDECOMPOSE + +//------------------------------------------------------------------------------ +// Transformation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixIdentity() noexcept +{ + XMMATRIX M; + M.r[0] = g_XMIdentityR0.v; + M.r[1] = g_XMIdentityR1.v; + M.r[2] = g_XMIdentityR2.v; + M.r[3] = g_XMIdentityR3.v; + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixSet +( + float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33 +) noexcept +{ + XMMATRIX M; +#if defined(_XM_NO_INTRINSICS_) + M.m[0][0] = m00; M.m[0][1] = m01; M.m[0][2] = m02; M.m[0][3] = m03; + M.m[1][0] = m10; M.m[1][1] = m11; M.m[1][2] = m12; M.m[1][3] = m13; + M.m[2][0] = m20; M.m[2][1] = m21; M.m[2][2] = m22; M.m[2][3] = m23; + M.m[3][0] = m30; M.m[3][1] = m31; M.m[3][2] = m32; M.m[3][3] = m33; +#else + M.r[0] = XMVectorSet(m00, m01, m02, m03); + M.r[1] = XMVectorSet(m10, m11, m12, m13); + M.r[2] = XMVectorSet(m20, m21, m22, m23); + M.r[3] = XMVectorSet(m30, m31, m32, m33); +#endif + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTranslation +( + float OffsetX, + float OffsetY, + float OffsetZ +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.m[0][0] = 1.0f; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 1.0f; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = 1.0f; + M.m[2][3] = 0.0f; + + M.m[3][0] = OffsetX; + M.m[3][1] = OffsetY; + M.m[3][2] = OffsetZ; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_SSE_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; + M.r[0] = g_XMIdentityR0.v; + M.r[1] = g_XMIdentityR1.v; + M.r[2] = g_XMIdentityR2.v; + M.r[3] = XMVectorSet(OffsetX, OffsetY, OffsetZ, 1.f); + return M; +#endif +} + + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTranslationFromVector(FXMVECTOR Offset) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.m[0][0] = 1.0f; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 1.0f; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = 1.0f; + M.m[2][3] = 0.0f; + + M.m[3][0] = Offset.vector4_f32[0]; + M.m[3][1] = Offset.vector4_f32[1]; + M.m[3][2] = Offset.vector4_f32[2]; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_SSE_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; + M.r[0] = g_XMIdentityR0.v; + M.r[1] = g_XMIdentityR1.v; + M.r[2] = g_XMIdentityR2.v; + M.r[3] = XMVectorSelect(g_XMIdentityR3.v, Offset, g_XMSelect1110.v); + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixScaling +( + float ScaleX, + float ScaleY, + float ScaleZ +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.m[0][0] = ScaleX; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = ScaleY; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = ScaleZ; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(ScaleX, Zero, 0); + M.r[1] = vsetq_lane_f32(ScaleY, Zero, 1); + M.r[2] = vsetq_lane_f32(ScaleZ, Zero, 2); + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_set_ps(0, 0, 0, ScaleX); + M.r[1] = _mm_set_ps(0, 0, ScaleY, 0); + M.r[2] = _mm_set_ps(0, ScaleZ, 0, 0); + M.r[3] = g_XMIdentityR3.v; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixScalingFromVector(FXMVECTOR Scale) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.m[0][0] = Scale.vector4_f32[0]; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = Scale.vector4_f32[1]; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = Scale.vector4_f32[2]; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; + M.r[0] = vandq_u32(Scale, g_XMMaskX); + M.r[1] = vandq_u32(Scale, g_XMMaskY); + M.r[2] = vandq_u32(Scale, g_XMMaskZ); + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_and_ps(Scale, g_XMMaskX); + M.r[1] = _mm_and_ps(Scale, g_XMMaskY); + M.r[2] = _mm_and_ps(Scale, g_XMMaskZ); + M.r[3] = g_XMIdentityR3.v; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationX(float Angle) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMMATRIX M; + M.m[0][0] = 1.0f; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = fCosAngle; + M.m[1][2] = fSinAngle; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = -fSinAngle; + M.m[2][2] = fCosAngle; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + const XMVECTOR Zero = vdupq_n_f32(0); + + XMVECTOR T1 = vsetq_lane_f32(fCosAngle, Zero, 1); + T1 = vsetq_lane_f32(fSinAngle, T1, 2); + + XMVECTOR T2 = vsetq_lane_f32(-fSinAngle, Zero, 1); + T2 = vsetq_lane_f32(fCosAngle, T2, 2); + + XMMATRIX M; + M.r[0] = g_XMIdentityR0.v; + M.r[1] = T1; + M.r[2] = T2; + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinAngle; + float CosAngle; + XMScalarSinCos(&SinAngle, &CosAngle, Angle); + + XMVECTOR vSin = _mm_set_ss(SinAngle); + XMVECTOR vCos = _mm_set_ss(CosAngle); + // x = 0,y = cos,z = sin, w = 0 + vCos = _mm_shuffle_ps(vCos, vSin, _MM_SHUFFLE(3, 0, 0, 3)); + XMMATRIX M; + M.r[0] = g_XMIdentityR0; + M.r[1] = vCos; + // x = 0,y = sin,z = cos, w = 0 + vCos = XM_PERMUTE_PS(vCos, _MM_SHUFFLE(3, 1, 2, 0)); + // x = 0,y = -sin,z = cos, w = 0 + vCos = _mm_mul_ps(vCos, g_XMNegateY); + M.r[2] = vCos; + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationY(float Angle) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMMATRIX M; + M.m[0][0] = fCosAngle; + M.m[0][1] = 0.0f; + M.m[0][2] = -fSinAngle; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 1.0f; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = fSinAngle; + M.m[2][1] = 0.0f; + M.m[2][2] = fCosAngle; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + const XMVECTOR Zero = vdupq_n_f32(0); + + XMVECTOR T0 = vsetq_lane_f32(fCosAngle, Zero, 0); + T0 = vsetq_lane_f32(-fSinAngle, T0, 2); + + XMVECTOR T2 = vsetq_lane_f32(fSinAngle, Zero, 0); + T2 = vsetq_lane_f32(fCosAngle, T2, 2); + + XMMATRIX M; + M.r[0] = T0; + M.r[1] = g_XMIdentityR1.v; + M.r[2] = T2; + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinAngle; + float CosAngle; + XMScalarSinCos(&SinAngle, &CosAngle, Angle); + + XMVECTOR vSin = _mm_set_ss(SinAngle); + XMVECTOR vCos = _mm_set_ss(CosAngle); + // x = sin,y = 0,z = cos, w = 0 + vSin = _mm_shuffle_ps(vSin, vCos, _MM_SHUFFLE(3, 0, 3, 0)); + XMMATRIX M; + M.r[2] = vSin; + M.r[1] = g_XMIdentityR1; + // x = cos,y = 0,z = sin, w = 0 + vSin = XM_PERMUTE_PS(vSin, _MM_SHUFFLE(3, 0, 1, 2)); + // x = cos,y = 0,z = -sin, w = 0 + vSin = _mm_mul_ps(vSin, g_XMNegateZ); + M.r[0] = vSin; + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationZ(float Angle) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMMATRIX M; + M.m[0][0] = fCosAngle; + M.m[0][1] = fSinAngle; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = -fSinAngle; + M.m[1][1] = fCosAngle; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = 1.0f; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + const XMVECTOR Zero = vdupq_n_f32(0); + + XMVECTOR T0 = vsetq_lane_f32(fCosAngle, Zero, 0); + T0 = vsetq_lane_f32(fSinAngle, T0, 1); + + XMVECTOR T1 = vsetq_lane_f32(-fSinAngle, Zero, 0); + T1 = vsetq_lane_f32(fCosAngle, T1, 1); + + XMMATRIX M; + M.r[0] = T0; + M.r[1] = T1; + M.r[2] = g_XMIdentityR2.v; + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinAngle; + float CosAngle; + XMScalarSinCos(&SinAngle, &CosAngle, Angle); + + XMVECTOR vSin = _mm_set_ss(SinAngle); + XMVECTOR vCos = _mm_set_ss(CosAngle); + // x = cos,y = sin,z = 0, w = 0 + vCos = _mm_unpacklo_ps(vCos, vSin); + XMMATRIX M; + M.r[0] = vCos; + // x = sin,y = cos,z = 0, w = 0 + vCos = XM_PERMUTE_PS(vCos, _MM_SHUFFLE(3, 2, 0, 1)); + // x = cos,y = -sin,z = 0, w = 0 + vCos = _mm_mul_ps(vCos, g_XMNegateX); + M.r[1] = vCos; + M.r[2] = g_XMIdentityR2; + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYaw +( + float Pitch, + float Yaw, + float Roll +) noexcept +{ + XMVECTOR Angles = XMVectorSet(Pitch, Yaw, Roll, 0.0f); + return XMMatrixRotationRollPitchYawFromVector(Angles); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYawFromVector +( + FXMVECTOR Angles // +) noexcept +{ + XMVECTOR Q = XMQuaternionRotationRollPitchYawFromVector(Angles); + return XMMatrixRotationQuaternion(Q); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationNormal +( + FXMVECTOR NormalAxis, + float Angle +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMVECTOR A = XMVectorSet(fSinAngle, fCosAngle, 1.0f - fCosAngle, 0.0f); + + XMVECTOR C2 = XMVectorSplatZ(A); + XMVECTOR C1 = XMVectorSplatY(A); + XMVECTOR C0 = XMVectorSplatX(A); + + XMVECTOR N0 = XMVectorSwizzle(NormalAxis); + XMVECTOR N1 = XMVectorSwizzle(NormalAxis); + + XMVECTOR V0 = XMVectorMultiply(C2, N0); + V0 = XMVectorMultiply(V0, N1); + + XMVECTOR R0 = XMVectorMultiply(C2, NormalAxis); + R0 = XMVectorMultiplyAdd(R0, NormalAxis, C1); + + XMVECTOR R1 = XMVectorMultiplyAdd(C0, NormalAxis, V0); + XMVECTOR R2 = XMVectorNegativeMultiplySubtract(C0, NormalAxis, V0); + + V0 = XMVectorSelect(A, R0, g_XMSelect1110.v); + XMVECTOR V1 = XMVectorPermute(R1, R2); + XMVECTOR V2 = XMVectorPermute(R1, R2); + + XMMATRIX M; + M.r[0] = XMVectorPermute(V0, V1); + M.r[1] = XMVectorPermute(V0, V1); + M.r[2] = XMVectorPermute(V0, V2); + M.r[3] = g_XMIdentityR3.v; + return M; + +#elif defined(_XM_SSE_INTRINSICS_) + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMVECTOR C2 = _mm_set_ps1(1.0f - fCosAngle); + XMVECTOR C1 = _mm_set_ps1(fCosAngle); + XMVECTOR C0 = _mm_set_ps1(fSinAngle); + + XMVECTOR N0 = XM_PERMUTE_PS(NormalAxis, _MM_SHUFFLE(3, 0, 2, 1)); + XMVECTOR N1 = XM_PERMUTE_PS(NormalAxis, _MM_SHUFFLE(3, 1, 0, 2)); + + XMVECTOR V0 = _mm_mul_ps(C2, N0); + V0 = _mm_mul_ps(V0, N1); + + XMVECTOR R0 = _mm_mul_ps(C2, NormalAxis); + R0 = _mm_mul_ps(R0, NormalAxis); + R0 = _mm_add_ps(R0, C1); + + XMVECTOR R1 = _mm_mul_ps(C0, NormalAxis); + R1 = _mm_add_ps(R1, V0); + XMVECTOR R2 = _mm_mul_ps(C0, NormalAxis); + R2 = _mm_sub_ps(V0, R2); + + V0 = _mm_and_ps(R0, g_XMMask3); + XMVECTOR V1 = _mm_shuffle_ps(R1, R2, _MM_SHUFFLE(2, 1, 2, 0)); + V1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 3, 2, 1)); + XMVECTOR V2 = _mm_shuffle_ps(R1, R2, _MM_SHUFFLE(0, 0, 1, 1)); + V2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 0, 2, 0)); + + R2 = _mm_shuffle_ps(V0, V1, _MM_SHUFFLE(1, 0, 3, 0)); + R2 = XM_PERMUTE_PS(R2, _MM_SHUFFLE(1, 3, 2, 0)); + + XMMATRIX M; + M.r[0] = R2; + + R2 = _mm_shuffle_ps(V0, V1, _MM_SHUFFLE(3, 2, 3, 1)); + R2 = XM_PERMUTE_PS(R2, _MM_SHUFFLE(1, 3, 0, 2)); + M.r[1] = R2; + + V2 = _mm_shuffle_ps(V2, V0, _MM_SHUFFLE(3, 2, 1, 0)); + M.r[2] = V2; + M.r[3] = g_XMIdentityR3.v; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationAxis +( + FXMVECTOR Axis, + float Angle +) noexcept +{ + assert(!XMVector3Equal(Axis, XMVectorZero())); + assert(!XMVector3IsInfinite(Axis)); + + XMVECTOR Normal = XMVector3Normalize(Axis); + return XMMatrixRotationNormal(Normal, Angle); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationQuaternion(FXMVECTOR Quaternion) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + static const XMVECTORF32 Constant1110 = { { { 1.0f, 1.0f, 1.0f, 0.0f } } }; + + XMVECTOR Q0 = XMVectorAdd(Quaternion, Quaternion); + XMVECTOR Q1 = XMVectorMultiply(Quaternion, Q0); + + XMVECTOR V0 = XMVectorPermute(Q1, Constant1110.v); + XMVECTOR V1 = XMVectorPermute(Q1, Constant1110.v); + XMVECTOR R0 = XMVectorSubtract(Constant1110, V0); + R0 = XMVectorSubtract(R0, V1); + + V0 = XMVectorSwizzle(Quaternion); + V1 = XMVectorSwizzle(Q0); + V0 = XMVectorMultiply(V0, V1); + + V1 = XMVectorSplatW(Quaternion); + XMVECTOR V2 = XMVectorSwizzle(Q0); + V1 = XMVectorMultiply(V1, V2); + + XMVECTOR R1 = XMVectorAdd(V0, V1); + XMVECTOR R2 = XMVectorSubtract(V0, V1); + + V0 = XMVectorPermute(R1, R2); + V1 = XMVectorPermute(R1, R2); + + XMMATRIX M; + M.r[0] = XMVectorPermute(R0, V0); + M.r[1] = XMVectorPermute(R0, V0); + M.r[2] = XMVectorPermute(R0, V1); + M.r[3] = g_XMIdentityR3.v; + return M; + +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Constant1110 = { { { 1.0f, 1.0f, 1.0f, 0.0f } } }; + + XMVECTOR Q0 = _mm_add_ps(Quaternion, Quaternion); + XMVECTOR Q1 = _mm_mul_ps(Quaternion, Q0); + + XMVECTOR V0 = XM_PERMUTE_PS(Q1, _MM_SHUFFLE(3, 0, 0, 1)); + V0 = _mm_and_ps(V0, g_XMMask3); + XMVECTOR V1 = XM_PERMUTE_PS(Q1, _MM_SHUFFLE(3, 1, 2, 2)); + V1 = _mm_and_ps(V1, g_XMMask3); + XMVECTOR R0 = _mm_sub_ps(Constant1110, V0); + R0 = _mm_sub_ps(R0, V1); + + V0 = XM_PERMUTE_PS(Quaternion, _MM_SHUFFLE(3, 1, 0, 0)); + V1 = XM_PERMUTE_PS(Q0, _MM_SHUFFLE(3, 2, 1, 2)); + V0 = _mm_mul_ps(V0, V1); + + V1 = XM_PERMUTE_PS(Quaternion, _MM_SHUFFLE(3, 3, 3, 3)); + XMVECTOR V2 = XM_PERMUTE_PS(Q0, _MM_SHUFFLE(3, 0, 2, 1)); + V1 = _mm_mul_ps(V1, V2); + + XMVECTOR R1 = _mm_add_ps(V0, V1); + XMVECTOR R2 = _mm_sub_ps(V0, V1); + + V0 = _mm_shuffle_ps(R1, R2, _MM_SHUFFLE(1, 0, 2, 1)); + V0 = XM_PERMUTE_PS(V0, _MM_SHUFFLE(1, 3, 2, 0)); + V1 = _mm_shuffle_ps(R1, R2, _MM_SHUFFLE(2, 2, 0, 0)); + V1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 0, 2, 0)); + + Q1 = _mm_shuffle_ps(R0, V0, _MM_SHUFFLE(1, 0, 3, 0)); + Q1 = XM_PERMUTE_PS(Q1, _MM_SHUFFLE(1, 3, 2, 0)); + + XMMATRIX M; + M.r[0] = Q1; + + Q1 = _mm_shuffle_ps(R0, V0, _MM_SHUFFLE(3, 2, 3, 1)); + Q1 = XM_PERMUTE_PS(Q1, _MM_SHUFFLE(1, 3, 0, 2)); + M.r[1] = Q1; + + Q1 = _mm_shuffle_ps(V1, R0, _MM_SHUFFLE(3, 2, 1, 0)); + M.r[2] = Q1; + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTransformation2D +( + FXMVECTOR ScalingOrigin, + float ScalingOrientation, + FXMVECTOR Scaling, + FXMVECTOR RotationOrigin, + float Rotation, + GXMVECTOR Translation +) noexcept +{ + // M = Inverse(MScalingOrigin) * Transpose(MScalingOrientation) * MScaling * MScalingOrientation * + // MScalingOrigin * Inverse(MRotationOrigin) * MRotation * MRotationOrigin * MTranslation; + + XMVECTOR VScalingOrigin = XMVectorSelect(g_XMSelect1100.v, ScalingOrigin, g_XMSelect1100.v); + XMVECTOR NegScalingOrigin = XMVectorNegate(VScalingOrigin); + + XMMATRIX MScalingOriginI = XMMatrixTranslationFromVector(NegScalingOrigin); + XMMATRIX MScalingOrientation = XMMatrixRotationZ(ScalingOrientation); + XMMATRIX MScalingOrientationT = XMMatrixTranspose(MScalingOrientation); + XMVECTOR VScaling = XMVectorSelect(g_XMOne.v, Scaling, g_XMSelect1100.v); + XMMATRIX MScaling = XMMatrixScalingFromVector(VScaling); + XMVECTOR VRotationOrigin = XMVectorSelect(g_XMSelect1100.v, RotationOrigin, g_XMSelect1100.v); + XMMATRIX MRotation = XMMatrixRotationZ(Rotation); + XMVECTOR VTranslation = XMVectorSelect(g_XMSelect1100.v, Translation, g_XMSelect1100.v); + + XMMATRIX M = XMMatrixMultiply(MScalingOriginI, MScalingOrientationT); + M = XMMatrixMultiply(M, MScaling); + M = XMMatrixMultiply(M, MScalingOrientation); + M.r[3] = XMVectorAdd(M.r[3], VScalingOrigin); + M.r[3] = XMVectorSubtract(M.r[3], VRotationOrigin); + M = XMMatrixMultiply(M, MRotation); + M.r[3] = XMVectorAdd(M.r[3], VRotationOrigin); + M.r[3] = XMVectorAdd(M.r[3], VTranslation); + + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTransformation +( + FXMVECTOR ScalingOrigin, + FXMVECTOR ScalingOrientationQuaternion, + FXMVECTOR Scaling, + GXMVECTOR RotationOrigin, + HXMVECTOR RotationQuaternion, + HXMVECTOR Translation +) noexcept +{ + // M = Inverse(MScalingOrigin) * Transpose(MScalingOrientation) * MScaling * MScalingOrientation * + // MScalingOrigin * Inverse(MRotationOrigin) * MRotation * MRotationOrigin * MTranslation; + + XMVECTOR VScalingOrigin = XMVectorSelect(g_XMSelect1110.v, ScalingOrigin, g_XMSelect1110.v); + XMVECTOR NegScalingOrigin = XMVectorNegate(ScalingOrigin); + + XMMATRIX MScalingOriginI = XMMatrixTranslationFromVector(NegScalingOrigin); + XMMATRIX MScalingOrientation = XMMatrixRotationQuaternion(ScalingOrientationQuaternion); + XMMATRIX MScalingOrientationT = XMMatrixTranspose(MScalingOrientation); + XMMATRIX MScaling = XMMatrixScalingFromVector(Scaling); + XMVECTOR VRotationOrigin = XMVectorSelect(g_XMSelect1110.v, RotationOrigin, g_XMSelect1110.v); + XMMATRIX MRotation = XMMatrixRotationQuaternion(RotationQuaternion); + XMVECTOR VTranslation = XMVectorSelect(g_XMSelect1110.v, Translation, g_XMSelect1110.v); + + XMMATRIX M; + M = XMMatrixMultiply(MScalingOriginI, MScalingOrientationT); + M = XMMatrixMultiply(M, MScaling); + M = XMMatrixMultiply(M, MScalingOrientation); + M.r[3] = XMVectorAdd(M.r[3], VScalingOrigin); + M.r[3] = XMVectorSubtract(M.r[3], VRotationOrigin); + M = XMMatrixMultiply(M, MRotation); + M.r[3] = XMVectorAdd(M.r[3], VRotationOrigin); + M.r[3] = XMVectorAdd(M.r[3], VTranslation); + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixAffineTransformation2D +( + FXMVECTOR Scaling, + FXMVECTOR RotationOrigin, + float Rotation, + FXMVECTOR Translation +) noexcept +{ + // M = MScaling * Inverse(MRotationOrigin) * MRotation * MRotationOrigin * MTranslation; + + XMVECTOR VScaling = XMVectorSelect(g_XMOne.v, Scaling, g_XMSelect1100.v); + XMMATRIX MScaling = XMMatrixScalingFromVector(VScaling); + XMVECTOR VRotationOrigin = XMVectorSelect(g_XMSelect1100.v, RotationOrigin, g_XMSelect1100.v); + XMMATRIX MRotation = XMMatrixRotationZ(Rotation); + XMVECTOR VTranslation = XMVectorSelect(g_XMSelect1100.v, Translation, g_XMSelect1100.v); + + XMMATRIX M; + M = MScaling; + M.r[3] = XMVectorSubtract(M.r[3], VRotationOrigin); + M = XMMatrixMultiply(M, MRotation); + M.r[3] = XMVectorAdd(M.r[3], VRotationOrigin); + M.r[3] = XMVectorAdd(M.r[3], VTranslation); + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixAffineTransformation +( + FXMVECTOR Scaling, + FXMVECTOR RotationOrigin, + FXMVECTOR RotationQuaternion, + GXMVECTOR Translation +) noexcept +{ + // M = MScaling * Inverse(MRotationOrigin) * MRotation * MRotationOrigin * MTranslation; + + XMMATRIX MScaling = XMMatrixScalingFromVector(Scaling); + XMVECTOR VRotationOrigin = XMVectorSelect(g_XMSelect1110.v, RotationOrigin, g_XMSelect1110.v); + XMMATRIX MRotation = XMMatrixRotationQuaternion(RotationQuaternion); + XMVECTOR VTranslation = XMVectorSelect(g_XMSelect1110.v, Translation, g_XMSelect1110.v); + + XMMATRIX M; + M = MScaling; + M.r[3] = XMVectorSubtract(M.r[3], VRotationOrigin); + M = XMMatrixMultiply(M, MRotation); + M.r[3] = XMVectorAdd(M.r[3], VRotationOrigin); + M.r[3] = XMVectorAdd(M.r[3], VTranslation); + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixReflect(FXMVECTOR ReflectionPlane) noexcept +{ + assert(!XMVector3Equal(ReflectionPlane, XMVectorZero())); + assert(!XMPlaneIsInfinite(ReflectionPlane)); + + static const XMVECTORF32 NegativeTwo = { { { -2.0f, -2.0f, -2.0f, 0.0f } } }; + + XMVECTOR P = XMPlaneNormalize(ReflectionPlane); + XMVECTOR S = XMVectorMultiply(P, NegativeTwo); + + XMVECTOR A = XMVectorSplatX(P); + XMVECTOR B = XMVectorSplatY(P); + XMVECTOR C = XMVectorSplatZ(P); + XMVECTOR D = XMVectorSplatW(P); + + XMMATRIX M; + M.r[0] = XMVectorMultiplyAdd(A, S, g_XMIdentityR0.v); + M.r[1] = XMVectorMultiplyAdd(B, S, g_XMIdentityR1.v); + M.r[2] = XMVectorMultiplyAdd(C, S, g_XMIdentityR2.v); + M.r[3] = XMVectorMultiplyAdd(D, S, g_XMIdentityR3.v); + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixShadow +( + FXMVECTOR ShadowPlane, + FXMVECTOR LightPosition +) noexcept +{ + static const XMVECTORU32 Select0001 = { { { XM_SELECT_0, XM_SELECT_0, XM_SELECT_0, XM_SELECT_1 } } }; + + assert(!XMVector3Equal(ShadowPlane, XMVectorZero())); + assert(!XMPlaneIsInfinite(ShadowPlane)); + + XMVECTOR P = XMPlaneNormalize(ShadowPlane); + XMVECTOR Dot = XMPlaneDot(P, LightPosition); + P = XMVectorNegate(P); + XMVECTOR D = XMVectorSplatW(P); + XMVECTOR C = XMVectorSplatZ(P); + XMVECTOR B = XMVectorSplatY(P); + XMVECTOR A = XMVectorSplatX(P); + Dot = XMVectorSelect(Select0001.v, Dot, Select0001.v); + + XMMATRIX M; + M.r[3] = XMVectorMultiplyAdd(D, LightPosition, Dot); + Dot = XMVectorRotateLeft(Dot, 1); + M.r[2] = XMVectorMultiplyAdd(C, LightPosition, Dot); + Dot = XMVectorRotateLeft(Dot, 1); + M.r[1] = XMVectorMultiplyAdd(B, LightPosition, Dot); + Dot = XMVectorRotateLeft(Dot, 1); + M.r[0] = XMVectorMultiplyAdd(A, LightPosition, Dot); + return M; +} + +//------------------------------------------------------------------------------ +// View and projection initialization operations +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixLookAtLH +( + FXMVECTOR EyePosition, + FXMVECTOR FocusPosition, + FXMVECTOR UpDirection +) noexcept +{ + XMVECTOR EyeDirection = XMVectorSubtract(FocusPosition, EyePosition); + return XMMatrixLookToLH(EyePosition, EyeDirection, UpDirection); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixLookAtRH +( + FXMVECTOR EyePosition, + FXMVECTOR FocusPosition, + FXMVECTOR UpDirection +) noexcept +{ + XMVECTOR NegEyeDirection = XMVectorSubtract(EyePosition, FocusPosition); + return XMMatrixLookToLH(EyePosition, NegEyeDirection, UpDirection); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixLookToLH +( + FXMVECTOR EyePosition, + FXMVECTOR EyeDirection, + FXMVECTOR UpDirection +) noexcept +{ + assert(!XMVector3Equal(EyeDirection, XMVectorZero())); + assert(!XMVector3IsInfinite(EyeDirection)); + assert(!XMVector3Equal(UpDirection, XMVectorZero())); + assert(!XMVector3IsInfinite(UpDirection)); + + XMVECTOR R2 = XMVector3Normalize(EyeDirection); + + XMVECTOR R0 = XMVector3Cross(UpDirection, R2); + R0 = XMVector3Normalize(R0); + + XMVECTOR R1 = XMVector3Cross(R2, R0); + + XMVECTOR NegEyePosition = XMVectorNegate(EyePosition); + + XMVECTOR D0 = XMVector3Dot(R0, NegEyePosition); + XMVECTOR D1 = XMVector3Dot(R1, NegEyePosition); + XMVECTOR D2 = XMVector3Dot(R2, NegEyePosition); + + XMMATRIX M; + M.r[0] = XMVectorSelect(D0, R0, g_XMSelect1110.v); + M.r[1] = XMVectorSelect(D1, R1, g_XMSelect1110.v); + M.r[2] = XMVectorSelect(D2, R2, g_XMSelect1110.v); + M.r[3] = g_XMIdentityR3.v; + + M = XMMatrixTranspose(M); + + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixLookToRH +( + FXMVECTOR EyePosition, + FXMVECTOR EyeDirection, + FXMVECTOR UpDirection +) noexcept +{ + XMVECTOR NegEyeDirection = XMVectorNegate(EyeDirection); + return XMMatrixLookToLH(EyePosition, NegEyeDirection, UpDirection); +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable:28931, "PREfast noise: Esp:1266") +#endif + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveLH +( + float ViewWidth, + float ViewHeight, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(ViewWidth, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(ViewHeight, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = TwoNearZ / ViewWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = TwoNearZ / ViewHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (FarZ - NearZ); + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(TwoNearZ / ViewWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(TwoNearZ / ViewHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, g_XMIdentityR3.v, 2); + M.r[3] = vsetq_lane_f32(-fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (FarZ - NearZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + TwoNearZ / ViewWidth, + TwoNearZ / ViewHeight, + fRange, + -fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // TwoNearZ / ViewWidth,0,0,0 + M.r[0] = vTemp; + // 0,TwoNearZ / ViewHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,1.0f + vValues = _mm_shuffle_ps(vValues, g_XMIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,1.0f + vTemp = _mm_setzero_ps(); + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,-fRange * NearZ,0 + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveRH +( + float ViewWidth, + float ViewHeight, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(ViewWidth, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(ViewHeight, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = TwoNearZ / ViewWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = TwoNearZ / ViewHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = -1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (NearZ - FarZ); + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(TwoNearZ / ViewWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(TwoNearZ / ViewHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, g_XMNegIdentityR3.v, 2); + M.r[3] = vsetq_lane_f32(fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (NearZ - FarZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + TwoNearZ / ViewWidth, + TwoNearZ / ViewHeight, + fRange, + fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // TwoNearZ / ViewWidth,0,0,0 + M.r[0] = vTemp; + // 0,TwoNearZ / ViewHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,-1.0f + vValues = _mm_shuffle_ps(vValues, g_XMNegIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,-1.0f + vTemp = _mm_setzero_ps(); + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,-fRange * NearZ,0 + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovLH +( + float FovAngleY, + float AspectRatio, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(FovAngleY, 0.0f, 0.00001f * 2.0f)); + assert(!XMScalarNearEqual(AspectRatio, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + + float Height = CosFov / SinFov; + float Width = Height / AspectRatio; + float fRange = FarZ / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = Width; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = Height; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + + float fRange = FarZ / (FarZ - NearZ); + float Height = CosFov / SinFov; + float Width = Height / AspectRatio; + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(Width, Zero, 0); + M.r[1] = vsetq_lane_f32(Height, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, g_XMIdentityR3.v, 2); + M.r[3] = vsetq_lane_f32(-fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + + float fRange = FarZ / (FarZ - NearZ); + // Note: This is recorded on the stack + float Height = CosFov / SinFov; + XMVECTOR rMem = { + Height / AspectRatio, + Height, + fRange, + -fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // CosFov / SinFov,0,0,0 + XMMATRIX M; + M.r[0] = vTemp; + // 0,Height / AspectRatio,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,1.0f + vTemp = _mm_setzero_ps(); + vValues = _mm_shuffle_ps(vValues, g_XMIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,1.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,-fRange * NearZ,0.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovRH +( + float FovAngleY, + float AspectRatio, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(FovAngleY, 0.0f, 0.00001f * 2.0f)); + assert(!XMScalarNearEqual(AspectRatio, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + + float Height = CosFov / SinFov; + float Width = Height / AspectRatio; + float fRange = FarZ / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = Width; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = Height; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = -1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + float fRange = FarZ / (NearZ - FarZ); + float Height = CosFov / SinFov; + float Width = Height / AspectRatio; + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(Width, Zero, 0); + M.r[1] = vsetq_lane_f32(Height, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, g_XMNegIdentityR3.v, 2); + M.r[3] = vsetq_lane_f32(fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + float fRange = FarZ / (NearZ - FarZ); + // Note: This is recorded on the stack + float Height = CosFov / SinFov; + XMVECTOR rMem = { + Height / AspectRatio, + Height, + fRange, + fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // CosFov / SinFov,0,0,0 + XMMATRIX M; + M.r[0] = vTemp; + // 0,Height / AspectRatio,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,-1.0f + vTemp = _mm_setzero_ps(); + vValues = _mm_shuffle_ps(vValues, g_XMNegIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,-1.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,fRange * NearZ,0.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterLH +( + float ViewLeft, + float ViewRight, + float ViewBottom, + float ViewTop, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(ViewRight, ViewLeft, 0.00001f)); + assert(!XMScalarNearEqual(ViewTop, ViewBottom, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = TwoNearZ * ReciprocalWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = TwoNearZ * ReciprocalHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = -(ViewLeft + ViewRight) * ReciprocalWidth; + M.m[2][1] = -(ViewTop + ViewBottom) * ReciprocalHeight; + M.m[2][2] = fRange; + M.m[2][3] = 1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (FarZ - NearZ); + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(TwoNearZ * ReciprocalWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(TwoNearZ * ReciprocalHeight, Zero, 1); + M.r[2] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + fRange, + 1.0f); + M.r[3] = vsetq_lane_f32(-fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (FarZ - NearZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + TwoNearZ * ReciprocalWidth, + TwoNearZ * ReciprocalHeight, + -fRange * NearZ, + 0 + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // TwoNearZ*ReciprocalWidth,0,0,0 + M.r[0] = vTemp; + // 0,TwoNearZ*ReciprocalHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // 0,0,fRange,1.0f + M.r[2] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + fRange, + 1.0f); + // 0,0,-fRange * NearZ,0.0f + vValues = _mm_and_ps(vValues, g_XMMaskZ); + M.r[3] = vValues; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterRH +( + float ViewLeft, + float ViewRight, + float ViewBottom, + float ViewTop, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(ViewRight, ViewLeft, 0.00001f)); + assert(!XMScalarNearEqual(ViewTop, ViewBottom, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = TwoNearZ * ReciprocalWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = TwoNearZ * ReciprocalHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = (ViewLeft + ViewRight) * ReciprocalWidth; + M.m[2][1] = (ViewTop + ViewBottom) * ReciprocalHeight; + M.m[2][2] = fRange; + M.m[2][3] = -1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (NearZ - FarZ); + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(TwoNearZ * ReciprocalWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(TwoNearZ * ReciprocalHeight, Zero, 1); + M.r[2] = XMVectorSet((ViewLeft + ViewRight) * ReciprocalWidth, + (ViewTop + ViewBottom) * ReciprocalHeight, + fRange, + -1.0f); + M.r[3] = vsetq_lane_f32(fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (NearZ - FarZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + TwoNearZ * ReciprocalWidth, + TwoNearZ * ReciprocalHeight, + fRange * NearZ, + 0 + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // TwoNearZ*ReciprocalWidth,0,0,0 + M.r[0] = vTemp; + // 0,TwoNearZ*ReciprocalHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // 0,0,fRange,1.0f + M.r[2] = XMVectorSet((ViewLeft + ViewRight) * ReciprocalWidth, + (ViewTop + ViewBottom) * ReciprocalHeight, + fRange, + -1.0f); + // 0,0,-fRange * NearZ,0.0f + vValues = _mm_and_ps(vValues, g_XMMaskZ); + M.r[3] = vValues; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixOrthographicLH +( + float ViewWidth, + float ViewHeight, + float NearZ, + float FarZ +) noexcept +{ + assert(!XMScalarNearEqual(ViewWidth, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(ViewHeight, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float fRange = 1.0f / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = 2.0f / ViewWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 2.0f / ViewHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fRange = 1.0f / (FarZ - NearZ); + + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(2.0f / ViewWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(2.0f / ViewHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, Zero, 2); + M.r[3] = vsetq_lane_f32(-fRange * NearZ, g_XMIdentityR3.v, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float fRange = 1.0f / (FarZ - NearZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + 2.0f / ViewWidth, + 2.0f / ViewHeight, + fRange, + -fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // 2.0f / ViewWidth,0,0,0 + M.r[0] = vTemp; + // 0,2.0f / ViewHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,1.0f + vTemp = _mm_setzero_ps(); + vValues = _mm_shuffle_ps(vValues, g_XMIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,0.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,-fRange * NearZ,1.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixOrthographicRH +( + float ViewWidth, + float ViewHeight, + float NearZ, + float FarZ +) noexcept +{ + assert(!XMScalarNearEqual(ViewWidth, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(ViewHeight, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float fRange = 1.0f / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = 2.0f / ViewWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 2.0f / ViewHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = fRange * NearZ; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fRange = 1.0f / (NearZ - FarZ); + + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(2.0f / ViewWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(2.0f / ViewHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, Zero, 2); + M.r[3] = vsetq_lane_f32(fRange * NearZ, g_XMIdentityR3.v, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float fRange = 1.0f / (NearZ - FarZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + 2.0f / ViewWidth, + 2.0f / ViewHeight, + fRange, + fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // 2.0f / ViewWidth,0,0,0 + M.r[0] = vTemp; + // 0,2.0f / ViewHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=fRange * NearZ,0,1.0f + vTemp = _mm_setzero_ps(); + vValues = _mm_shuffle_ps(vValues, g_XMIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,0.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,fRange * NearZ,1.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterLH +( + float ViewLeft, + float ViewRight, + float ViewBottom, + float ViewTop, + float NearZ, + float FarZ +) noexcept +{ + assert(!XMScalarNearEqual(ViewRight, ViewLeft, 0.00001f)); + assert(!XMScalarNearEqual(ViewTop, ViewBottom, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = ReciprocalWidth + ReciprocalWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = ReciprocalHeight + ReciprocalHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 0.0f; + + M.m[3][0] = -(ViewLeft + ViewRight) * ReciprocalWidth; + M.m[3][1] = -(ViewTop + ViewBottom) * ReciprocalHeight; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (FarZ - NearZ); + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(ReciprocalWidth + ReciprocalWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(ReciprocalHeight + ReciprocalHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, Zero, 2); + M.r[3] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + -fRange * NearZ, + 1.0f); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float fReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float fReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (FarZ - NearZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + fReciprocalWidth, + fReciprocalHeight, + fRange, + 1.0f + }; + XMVECTOR rMem2 = { + -(ViewLeft + ViewRight), + -(ViewTop + ViewBottom), + -NearZ, + 1.0f + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // fReciprocalWidth*2,0,0,0 + vTemp = _mm_add_ss(vTemp, vTemp); + M.r[0] = vTemp; + // 0,fReciprocalHeight*2,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + vTemp = _mm_add_ps(vTemp, vTemp); + M.r[1] = vTemp; + // 0,0,fRange,0.0f + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskZ); + M.r[2] = vTemp; + // -(ViewLeft + ViewRight)*fReciprocalWidth,-(ViewTop + ViewBottom)*fReciprocalHeight,fRange*-NearZ,1.0f + vValues = _mm_mul_ps(vValues, rMem2); + M.r[3] = vValues; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterRH +( + float ViewLeft, + float ViewRight, + float ViewBottom, + float ViewTop, + float NearZ, + float FarZ +) noexcept +{ + assert(!XMScalarNearEqual(ViewRight, ViewLeft, 0.00001f)); + assert(!XMScalarNearEqual(ViewTop, ViewBottom, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = ReciprocalWidth + ReciprocalWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = ReciprocalHeight + ReciprocalHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 0.0f; + + M.r[3] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + fRange * NearZ, + 1.0f); + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (NearZ - FarZ); + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(ReciprocalWidth + ReciprocalWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(ReciprocalHeight + ReciprocalHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, Zero, 2); + M.r[3] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + fRange * NearZ, + 1.0f); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float fReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float fReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (NearZ - FarZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + fReciprocalWidth, + fReciprocalHeight, + fRange, + 1.0f + }; + XMVECTOR rMem2 = { + -(ViewLeft + ViewRight), + -(ViewTop + ViewBottom), + NearZ, + 1.0f + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // fReciprocalWidth*2,0,0,0 + vTemp = _mm_add_ss(vTemp, vTemp); + M.r[0] = vTemp; + // 0,fReciprocalHeight*2,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + vTemp = _mm_add_ps(vTemp, vTemp); + M.r[1] = vTemp; + // 0,0,fRange,0.0f + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskZ); + M.r[2] = vTemp; + // -(ViewLeft + ViewRight)*fReciprocalWidth,-(ViewTop + ViewBottom)*fReciprocalHeight,fRange*-NearZ,1.0f + vValues = _mm_mul_ps(vValues, rMem2); + M.r[3] = vValues; + return M; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +/**************************************************************************** + * + * XMMATRIX operators and methods + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + +inline XMMATRIX::XMMATRIX +( + float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33 +) noexcept +{ + r[0] = XMVectorSet(m00, m01, m02, m03); + r[1] = XMVectorSet(m10, m11, m12, m13); + r[2] = XMVectorSet(m20, m21, m22, m23); + r[3] = XMVectorSet(m30, m31, m32, m33); +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX::XMMATRIX(const float* pArray) noexcept +{ + assert(pArray != nullptr); + r[0] = XMLoadFloat4(reinterpret_cast(pArray)); + r[1] = XMLoadFloat4(reinterpret_cast(pArray + 4)); + r[2] = XMLoadFloat4(reinterpret_cast(pArray + 8)); + r[3] = XMLoadFloat4(reinterpret_cast(pArray + 12)); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XMMATRIX::operator- () const noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorNegate(r[0]); + R.r[1] = XMVectorNegate(r[1]); + R.r[2] = XMVectorNegate(r[2]); + R.r[3] = XMVectorNegate(r[3]); + return R; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XM_CALLCONV XMMATRIX::operator+= (FXMMATRIX M) noexcept +{ + r[0] = XMVectorAdd(r[0], M.r[0]); + r[1] = XMVectorAdd(r[1], M.r[1]); + r[2] = XMVectorAdd(r[2], M.r[2]); + r[3] = XMVectorAdd(r[3], M.r[3]); + return *this; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XM_CALLCONV XMMATRIX::operator-= (FXMMATRIX M) noexcept +{ + r[0] = XMVectorSubtract(r[0], M.r[0]); + r[1] = XMVectorSubtract(r[1], M.r[1]); + r[2] = XMVectorSubtract(r[2], M.r[2]); + r[3] = XMVectorSubtract(r[3], M.r[3]); + return *this; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XM_CALLCONV XMMATRIX::operator*=(FXMMATRIX M) noexcept +{ + *this = XMMatrixMultiply(*this, M); + return *this; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XMMATRIX::operator*= (float S) noexcept +{ + r[0] = XMVectorScale(r[0], S); + r[1] = XMVectorScale(r[1], S); + r[2] = XMVectorScale(r[2], S); + r[3] = XMVectorScale(r[3], S); + return *this; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XMMATRIX::operator/= (float S) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR vS = XMVectorReplicate(S); + r[0] = XMVectorDivide(r[0], vS); + r[1] = XMVectorDivide(r[1], vS); + r[2] = XMVectorDivide(r[2], vS); + r[3] = XMVectorDivide(r[3], vS); + return *this; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + float32x4_t vS = vdupq_n_f32(S); + r[0] = vdivq_f32(r[0], vS); + r[1] = vdivq_f32(r[1], vS); + r[2] = vdivq_f32(r[2], vS); + r[3] = vdivq_f32(r[3], vS); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x2_t vS = vdup_n_f32(S); + float32x2_t R0 = vrecpe_f32(vS); + float32x2_t S0 = vrecps_f32(R0, vS); + R0 = vmul_f32(S0, R0); + S0 = vrecps_f32(R0, vS); + R0 = vmul_f32(S0, R0); + float32x4_t Reciprocal = vcombine_u32(R0, R0); + r[0] = vmulq_f32(r[0], Reciprocal); + r[1] = vmulq_f32(r[1], Reciprocal); + r[2] = vmulq_f32(r[2], Reciprocal); + r[3] = vmulq_f32(r[3], Reciprocal); +#endif + return *this; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 vS = _mm_set_ps1(S); + r[0] = _mm_div_ps(r[0], vS); + r[1] = _mm_div_ps(r[1], vS); + r[2] = _mm_div_ps(r[2], vS); + r[3] = _mm_div_ps(r[3], vS); + return *this; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMATRIX::operator+ (FXMMATRIX M) const noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorAdd(r[0], M.r[0]); + R.r[1] = XMVectorAdd(r[1], M.r[1]); + R.r[2] = XMVectorAdd(r[2], M.r[2]); + R.r[3] = XMVectorAdd(r[3], M.r[3]); + return R; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMATRIX::operator- (FXMMATRIX M) const noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorSubtract(r[0], M.r[0]); + R.r[1] = XMVectorSubtract(r[1], M.r[1]); + R.r[2] = XMVectorSubtract(r[2], M.r[2]); + R.r[3] = XMVectorSubtract(r[3], M.r[3]); + return R; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMATRIX::operator*(FXMMATRIX M) const noexcept +{ + return XMMatrixMultiply(*this, M); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XMMATRIX::operator* (float S) const noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorScale(r[0], S); + R.r[1] = XMVectorScale(r[1], S); + R.r[2] = XMVectorScale(r[2], S); + R.r[3] = XMVectorScale(r[3], S); + return R; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XMMATRIX::operator/ (float S) const noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR vS = XMVectorReplicate(S); + XMMATRIX R; + R.r[0] = XMVectorDivide(r[0], vS); + R.r[1] = XMVectorDivide(r[1], vS); + R.r[2] = XMVectorDivide(r[2], vS); + R.r[3] = XMVectorDivide(r[3], vS); + return R; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + float32x4_t vS = vdupq_n_f32(S); + XMMATRIX R; + R.r[0] = vdivq_f32(r[0], vS); + R.r[1] = vdivq_f32(r[1], vS); + R.r[2] = vdivq_f32(r[2], vS); + R.r[3] = vdivq_f32(r[3], vS); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x2_t vS = vdup_n_f32(S); + float32x2_t R0 = vrecpe_f32(vS); + float32x2_t S0 = vrecps_f32(R0, vS); + R0 = vmul_f32(S0, R0); + S0 = vrecps_f32(R0, vS); + R0 = vmul_f32(S0, R0); + float32x4_t Reciprocal = vcombine_u32(R0, R0); + XMMATRIX R; + R.r[0] = vmulq_f32(r[0], Reciprocal); + R.r[1] = vmulq_f32(r[1], Reciprocal); + R.r[2] = vmulq_f32(r[2], Reciprocal); + R.r[3] = vmulq_f32(r[3], Reciprocal); +#endif + return R; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 vS = _mm_set_ps1(S); + XMMATRIX R; + R.r[0] = _mm_div_ps(r[0], vS); + R.r[1] = _mm_div_ps(r[1], vS); + R.r[2] = _mm_div_ps(r[2], vS); + R.r[3] = _mm_div_ps(r[3], vS); + return R; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV operator* +( + float S, + FXMMATRIX M +) noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorScale(M.r[0], S); + R.r[1] = XMVectorScale(M.r[1], S); + R.r[2] = XMVectorScale(M.r[2], S); + R.r[3] = XMVectorScale(M.r[3], S); + return R; +} + +/**************************************************************************** + * + * XMFLOAT3X3 operators + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT3X3::XMFLOAT3X3(const float* pArray) noexcept +{ + assert(pArray != nullptr); + for (size_t Row = 0; Row < 3; Row++) + { + for (size_t Column = 0; Column < 3; Column++) + { + m[Row][Column] = pArray[Row * 3 + Column]; + } + } +} + +/**************************************************************************** + * + * XMFLOAT4X3 operators + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT4X3::XMFLOAT4X3(const float* pArray) noexcept +{ + assert(pArray != nullptr); + + m[0][0] = pArray[0]; + m[0][1] = pArray[1]; + m[0][2] = pArray[2]; + + m[1][0] = pArray[3]; + m[1][1] = pArray[4]; + m[1][2] = pArray[5]; + + m[2][0] = pArray[6]; + m[2][1] = pArray[7]; + m[2][2] = pArray[8]; + + m[3][0] = pArray[9]; + m[3][1] = pArray[10]; + m[3][2] = pArray[11]; +} + +/**************************************************************************** +* +* XMFLOAT3X4 operators +* +****************************************************************************/ + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT3X4::XMFLOAT3X4(const float* pArray) noexcept +{ + assert(pArray != nullptr); + + m[0][0] = pArray[0]; + m[0][1] = pArray[1]; + m[0][2] = pArray[2]; + m[0][3] = pArray[3]; + + m[1][0] = pArray[4]; + m[1][1] = pArray[5]; + m[1][2] = pArray[6]; + m[1][3] = pArray[7]; + + m[2][0] = pArray[8]; + m[2][1] = pArray[9]; + m[2][2] = pArray[10]; + m[2][3] = pArray[11]; +} + +/**************************************************************************** + * + * XMFLOAT4X4 operators + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT4X4::XMFLOAT4X4(const float* pArray) noexcept +{ + assert(pArray != nullptr); + + m[0][0] = pArray[0]; + m[0][1] = pArray[1]; + m[0][2] = pArray[2]; + m[0][3] = pArray[3]; + + m[1][0] = pArray[4]; + m[1][1] = pArray[5]; + m[1][2] = pArray[6]; + m[1][3] = pArray[7]; + + m[2][0] = pArray[8]; + m[2][1] = pArray[9]; + m[2][2] = pArray[10]; + m[2][3] = pArray[11]; + + m[3][0] = pArray[12]; + m[3][1] = pArray[13]; + m[3][2] = pArray[14]; + m[3][3] = pArray[15]; +} + diff --git a/include/directxmath/directxmathmisc.inl b/include/directxmath/directxmathmisc.inl new file mode 100644 index 0000000..aca863b --- /dev/null +++ b/include/directxmath/directxmathmisc.inl @@ -0,0 +1,2425 @@ +//------------------------------------------------------------------------------------- +// DirectXMathMisc.inl -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +/**************************************************************************** + * + * Quaternion + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionEqual +( + FXMVECTOR Q1, + FXMVECTOR Q2 +) noexcept +{ + return XMVector4Equal(Q1, Q2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionNotEqual +( + FXMVECTOR Q1, + FXMVECTOR Q2 +) noexcept +{ + return XMVector4NotEqual(Q1, Q2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionIsNaN(FXMVECTOR Q) noexcept +{ + return XMVector4IsNaN(Q); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionIsInfinite(FXMVECTOR Q) noexcept +{ + return XMVector4IsInfinite(Q); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionIsIdentity(FXMVECTOR Q) noexcept +{ + return XMVector4Equal(Q, g_XMIdentityR3.v); +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionDot +( + FXMVECTOR Q1, + FXMVECTOR Q2 +) noexcept +{ + return XMVector4Dot(Q1, Q2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionMultiply +( + FXMVECTOR Q1, + FXMVECTOR Q2 +) noexcept +{ + // Returns the product Q2*Q1 (which is the concatenation of a rotation Q1 followed by the rotation Q2) + + // [ (Q2.w * Q1.x) + (Q2.x * Q1.w) + (Q2.y * Q1.z) - (Q2.z * Q1.y), + // (Q2.w * Q1.y) - (Q2.x * Q1.z) + (Q2.y * Q1.w) + (Q2.z * Q1.x), + // (Q2.w * Q1.z) + (Q2.x * Q1.y) - (Q2.y * Q1.x) + (Q2.z * Q1.w), + // (Q2.w * Q1.w) - (Q2.x * Q1.x) - (Q2.y * Q1.y) - (Q2.z * Q1.z) ] + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + (Q2.vector4_f32[3] * Q1.vector4_f32[0]) + (Q2.vector4_f32[0] * Q1.vector4_f32[3]) + (Q2.vector4_f32[1] * Q1.vector4_f32[2]) - (Q2.vector4_f32[2] * Q1.vector4_f32[1]), + (Q2.vector4_f32[3] * Q1.vector4_f32[1]) - (Q2.vector4_f32[0] * Q1.vector4_f32[2]) + (Q2.vector4_f32[1] * Q1.vector4_f32[3]) + (Q2.vector4_f32[2] * Q1.vector4_f32[0]), + (Q2.vector4_f32[3] * Q1.vector4_f32[2]) + (Q2.vector4_f32[0] * Q1.vector4_f32[1]) - (Q2.vector4_f32[1] * Q1.vector4_f32[0]) + (Q2.vector4_f32[2] * Q1.vector4_f32[3]), + (Q2.vector4_f32[3] * Q1.vector4_f32[3]) - (Q2.vector4_f32[0] * Q1.vector4_f32[0]) - (Q2.vector4_f32[1] * Q1.vector4_f32[1]) - (Q2.vector4_f32[2] * Q1.vector4_f32[2]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 ControlWZYX = { { { 1.0f, -1.0f, 1.0f, -1.0f } } }; + static const XMVECTORF32 ControlZWXY = { { { 1.0f, 1.0f, -1.0f, -1.0f } } }; + static const XMVECTORF32 ControlYXWZ = { { { -1.0f, 1.0f, 1.0f, -1.0f } } }; + + float32x2_t Q2L = vget_low_f32(Q2); + float32x2_t Q2H = vget_high_f32(Q2); + + float32x4_t Q2X = vdupq_lane_f32(Q2L, 0); + float32x4_t Q2Y = vdupq_lane_f32(Q2L, 1); + float32x4_t Q2Z = vdupq_lane_f32(Q2H, 0); + XMVECTOR vResult = vmulq_lane_f32(Q1, Q2H, 1); + + // Mul by Q1WZYX + float32x4_t vTemp = vrev64q_f32(Q1); + vTemp = vcombine_f32(vget_high_f32(vTemp), vget_low_f32(vTemp)); + Q2X = vmulq_f32(Q2X, vTemp); + vResult = vmlaq_f32(vResult, Q2X, ControlWZYX); + + // Mul by Q1ZWXY + vTemp = vrev64q_u32(vTemp); + Q2Y = vmulq_f32(Q2Y, vTemp); + vResult = vmlaq_f32(vResult, Q2Y, ControlZWXY); + + // Mul by Q1YXWZ + vTemp = vrev64q_u32(vTemp); + vTemp = vcombine_f32(vget_high_f32(vTemp), vget_low_f32(vTemp)); + Q2Z = vmulq_f32(Q2Z, vTemp); + vResult = vmlaq_f32(vResult, Q2Z, ControlYXWZ); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 ControlWZYX = { { { 1.0f, -1.0f, 1.0f, -1.0f } } }; + static const XMVECTORF32 ControlZWXY = { { { 1.0f, 1.0f, -1.0f, -1.0f } } }; + static const XMVECTORF32 ControlYXWZ = { { { -1.0f, 1.0f, 1.0f, -1.0f } } }; + // Copy to SSE registers and use as few as possible for x86 + XMVECTOR Q2X = Q2; + XMVECTOR Q2Y = Q2; + XMVECTOR Q2Z = Q2; + XMVECTOR vResult = Q2; + // Splat with one instruction + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 3, 3, 3)); + Q2X = XM_PERMUTE_PS(Q2X, _MM_SHUFFLE(0, 0, 0, 0)); + Q2Y = XM_PERMUTE_PS(Q2Y, _MM_SHUFFLE(1, 1, 1, 1)); + Q2Z = XM_PERMUTE_PS(Q2Z, _MM_SHUFFLE(2, 2, 2, 2)); + // Retire Q1 and perform Q1*Q2W + vResult = _mm_mul_ps(vResult, Q1); + XMVECTOR Q1Shuffle = Q1; + // Shuffle the copies of Q1 + Q1Shuffle = XM_PERMUTE_PS(Q1Shuffle, _MM_SHUFFLE(0, 1, 2, 3)); + // Mul by Q1WZYX + Q2X = _mm_mul_ps(Q2X, Q1Shuffle); + Q1Shuffle = XM_PERMUTE_PS(Q1Shuffle, _MM_SHUFFLE(2, 3, 0, 1)); + // Flip the signs on y and z + vResult = XM_FMADD_PS(Q2X, ControlWZYX, vResult); + // Mul by Q1ZWXY + Q2Y = _mm_mul_ps(Q2Y, Q1Shuffle); + Q1Shuffle = XM_PERMUTE_PS(Q1Shuffle, _MM_SHUFFLE(0, 1, 2, 3)); + // Flip the signs on z and w + Q2Y = _mm_mul_ps(Q2Y, ControlZWXY); + // Mul by Q1YXWZ + Q2Z = _mm_mul_ps(Q2Z, Q1Shuffle); + // Flip the signs on x and w + Q2Y = XM_FMADD_PS(Q2Z, ControlYXWZ, Q2Y); + vResult = _mm_add_ps(vResult, Q2Y); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionLengthSq(FXMVECTOR Q) noexcept +{ + return XMVector4LengthSq(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionReciprocalLength(FXMVECTOR Q) noexcept +{ + return XMVector4ReciprocalLength(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionLength(FXMVECTOR Q) noexcept +{ + return XMVector4Length(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionNormalizeEst(FXMVECTOR Q) noexcept +{ + return XMVector4NormalizeEst(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionNormalize(FXMVECTOR Q) noexcept +{ + return XMVector4Normalize(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionConjugate(FXMVECTOR Q) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + -Q.vector4_f32[0], + -Q.vector4_f32[1], + -Q.vector4_f32[2], + Q.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 NegativeOne3 = { { { -1.0f, -1.0f, -1.0f, 1.0f } } }; + return vmulq_f32(Q, NegativeOne3.v); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 NegativeOne3 = { { { -1.0f, -1.0f, -1.0f, 1.0f } } }; + return _mm_mul_ps(Q, NegativeOne3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionInverse(FXMVECTOR Q) noexcept +{ + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR L = XMVector4LengthSq(Q); + XMVECTOR Conjugate = XMQuaternionConjugate(Q); + + XMVECTOR Control = XMVectorLessOrEqual(L, g_XMEpsilon.v); + + XMVECTOR Result = XMVectorDivide(Conjugate, L); + + Result = XMVectorSelect(Result, Zero, Control); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionLn(FXMVECTOR Q) noexcept +{ + static const XMVECTORF32 OneMinusEpsilon = { { { 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f } } }; + + XMVECTOR QW = XMVectorSplatW(Q); + XMVECTOR Q0 = XMVectorSelect(g_XMSelect1110.v, Q, g_XMSelect1110.v); + + XMVECTOR ControlW = XMVectorInBounds(QW, OneMinusEpsilon.v); + + XMVECTOR Theta = XMVectorACos(QW); + XMVECTOR SinTheta = XMVectorSin(Theta); + + XMVECTOR S = XMVectorDivide(Theta, SinTheta); + + XMVECTOR Result = XMVectorMultiply(Q0, S); + Result = XMVectorSelect(Q0, Result, ControlW); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionExp(FXMVECTOR Q) noexcept +{ + XMVECTOR Theta = XMVector3Length(Q); + + XMVECTOR SinTheta, CosTheta; + XMVectorSinCos(&SinTheta, &CosTheta, Theta); + + XMVECTOR S = XMVectorDivide(SinTheta, Theta); + + XMVECTOR Result = XMVectorMultiply(Q, S); + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR Control = XMVectorNearEqual(Theta, Zero, g_XMEpsilon.v); + Result = XMVectorSelect(Result, Q, Control); + + Result = XMVectorSelect(CosTheta, Result, g_XMSelect1110.v); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionSlerp +( + FXMVECTOR Q0, + FXMVECTOR Q1, + float t +) noexcept +{ + XMVECTOR T = XMVectorReplicate(t); + return XMQuaternionSlerpV(Q0, Q1, T); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionSlerpV +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR T +) noexcept +{ + assert((XMVectorGetY(T) == XMVectorGetX(T)) && (XMVectorGetZ(T) == XMVectorGetX(T)) && (XMVectorGetW(T) == XMVectorGetX(T))); + + // Result = Q0 * sin((1.0 - t) * Omega) / sin(Omega) + Q1 * sin(t * Omega) / sin(Omega) + +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + const XMVECTORF32 OneMinusEpsilon = { { { 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f } } }; + + XMVECTOR CosOmega = XMQuaternionDot(Q0, Q1); + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR Control = XMVectorLess(CosOmega, Zero); + XMVECTOR Sign = XMVectorSelect(g_XMOne.v, g_XMNegativeOne.v, Control); + + CosOmega = XMVectorMultiply(CosOmega, Sign); + + Control = XMVectorLess(CosOmega, OneMinusEpsilon); + + XMVECTOR SinOmega = XMVectorNegativeMultiplySubtract(CosOmega, CosOmega, g_XMOne.v); + SinOmega = XMVectorSqrt(SinOmega); + + XMVECTOR Omega = XMVectorATan2(SinOmega, CosOmega); + + XMVECTOR SignMask = XMVectorSplatSignMask(); + XMVECTOR V01 = XMVectorShiftLeft(T, Zero, 2); + SignMask = XMVectorShiftLeft(SignMask, Zero, 3); + V01 = XMVectorXorInt(V01, SignMask); + V01 = XMVectorAdd(g_XMIdentityR0.v, V01); + + XMVECTOR InvSinOmega = XMVectorReciprocal(SinOmega); + + XMVECTOR S0 = XMVectorMultiply(V01, Omega); + S0 = XMVectorSin(S0); + S0 = XMVectorMultiply(S0, InvSinOmega); + + S0 = XMVectorSelect(V01, S0, Control); + + XMVECTOR S1 = XMVectorSplatY(S0); + S0 = XMVectorSplatX(S0); + + S1 = XMVectorMultiply(S1, Sign); + + XMVECTOR Result = XMVectorMultiply(Q0, S0); + Result = XMVectorMultiplyAdd(Q1, S1, Result); + + return Result; + +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 OneMinusEpsilon = { { { 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f } } }; + static const XMVECTORU32 SignMask2 = { { { 0x80000000, 0x00000000, 0x00000000, 0x00000000 } } }; + + XMVECTOR CosOmega = XMQuaternionDot(Q0, Q1); + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR Control = XMVectorLess(CosOmega, Zero); + XMVECTOR Sign = XMVectorSelect(g_XMOne, g_XMNegativeOne, Control); + + CosOmega = _mm_mul_ps(CosOmega, Sign); + + Control = XMVectorLess(CosOmega, OneMinusEpsilon); + + XMVECTOR SinOmega = _mm_mul_ps(CosOmega, CosOmega); + SinOmega = _mm_sub_ps(g_XMOne, SinOmega); + SinOmega = _mm_sqrt_ps(SinOmega); + + XMVECTOR Omega = XMVectorATan2(SinOmega, CosOmega); + + XMVECTOR V01 = XM_PERMUTE_PS(T, _MM_SHUFFLE(2, 3, 0, 1)); + V01 = _mm_and_ps(V01, g_XMMaskXY); + V01 = _mm_xor_ps(V01, SignMask2); + V01 = _mm_add_ps(g_XMIdentityR0, V01); + + XMVECTOR S0 = _mm_mul_ps(V01, Omega); + S0 = XMVectorSin(S0); + S0 = _mm_div_ps(S0, SinOmega); + + S0 = XMVectorSelect(V01, S0, Control); + + XMVECTOR S1 = XMVectorSplatY(S0); + S0 = XMVectorSplatX(S0); + + S1 = _mm_mul_ps(S1, Sign); + XMVECTOR Result = _mm_mul_ps(Q0, S0); + S1 = _mm_mul_ps(S1, Q1); + Result = _mm_add_ps(Result, S1); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionSquad +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + GXMVECTOR Q3, + float t +) noexcept +{ + XMVECTOR T = XMVectorReplicate(t); + return XMQuaternionSquadV(Q0, Q1, Q2, Q3, T); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionSquadV +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + GXMVECTOR Q3, + HXMVECTOR T +) noexcept +{ + assert((XMVectorGetY(T) == XMVectorGetX(T)) && (XMVectorGetZ(T) == XMVectorGetX(T)) && (XMVectorGetW(T) == XMVectorGetX(T))); + + XMVECTOR TP = T; + const XMVECTOR Two = XMVectorSplatConstant(2, 0); + + XMVECTOR Q03 = XMQuaternionSlerpV(Q0, Q3, T); + XMVECTOR Q12 = XMQuaternionSlerpV(Q1, Q2, T); + + TP = XMVectorNegativeMultiplySubtract(TP, TP, TP); + TP = XMVectorMultiply(TP, Two); + + XMVECTOR Result = XMQuaternionSlerpV(Q03, Q12, TP); + + return Result; +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMQuaternionSquadSetup +( + XMVECTOR* pA, + XMVECTOR* pB, + XMVECTOR* pC, + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + GXMVECTOR Q3 +) noexcept +{ + assert(pA); + assert(pB); + assert(pC); + + XMVECTOR LS12 = XMQuaternionLengthSq(XMVectorAdd(Q1, Q2)); + XMVECTOR LD12 = XMQuaternionLengthSq(XMVectorSubtract(Q1, Q2)); + XMVECTOR SQ2 = XMVectorNegate(Q2); + + XMVECTOR Control1 = XMVectorLess(LS12, LD12); + SQ2 = XMVectorSelect(Q2, SQ2, Control1); + + XMVECTOR LS01 = XMQuaternionLengthSq(XMVectorAdd(Q0, Q1)); + XMVECTOR LD01 = XMQuaternionLengthSq(XMVectorSubtract(Q0, Q1)); + XMVECTOR SQ0 = XMVectorNegate(Q0); + + XMVECTOR LS23 = XMQuaternionLengthSq(XMVectorAdd(SQ2, Q3)); + XMVECTOR LD23 = XMQuaternionLengthSq(XMVectorSubtract(SQ2, Q3)); + XMVECTOR SQ3 = XMVectorNegate(Q3); + + XMVECTOR Control0 = XMVectorLess(LS01, LD01); + XMVECTOR Control2 = XMVectorLess(LS23, LD23); + + SQ0 = XMVectorSelect(Q0, SQ0, Control0); + SQ3 = XMVectorSelect(Q3, SQ3, Control2); + + XMVECTOR InvQ1 = XMQuaternionInverse(Q1); + XMVECTOR InvQ2 = XMQuaternionInverse(SQ2); + + XMVECTOR LnQ0 = XMQuaternionLn(XMQuaternionMultiply(InvQ1, SQ0)); + XMVECTOR LnQ2 = XMQuaternionLn(XMQuaternionMultiply(InvQ1, SQ2)); + XMVECTOR LnQ1 = XMQuaternionLn(XMQuaternionMultiply(InvQ2, Q1)); + XMVECTOR LnQ3 = XMQuaternionLn(XMQuaternionMultiply(InvQ2, SQ3)); + + const XMVECTOR NegativeOneQuarter = XMVectorSplatConstant(-1, 2); + + XMVECTOR ExpQ02 = XMVectorMultiply(XMVectorAdd(LnQ0, LnQ2), NegativeOneQuarter); + XMVECTOR ExpQ13 = XMVectorMultiply(XMVectorAdd(LnQ1, LnQ3), NegativeOneQuarter); + ExpQ02 = XMQuaternionExp(ExpQ02); + ExpQ13 = XMQuaternionExp(ExpQ13); + + *pA = XMQuaternionMultiply(Q1, ExpQ02); + *pB = XMQuaternionMultiply(SQ2, ExpQ13); + *pC = SQ2; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionBaryCentric +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + float f, + float g +) noexcept +{ + float s = f + g; + + XMVECTOR Result; + if ((s < 0.00001f) && (s > -0.00001f)) + { + Result = Q0; + } + else + { + XMVECTOR Q01 = XMQuaternionSlerp(Q0, Q1, s); + XMVECTOR Q02 = XMQuaternionSlerp(Q0, Q2, s); + + Result = XMQuaternionSlerp(Q01, Q02, g / s); + } + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionBaryCentricV +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + GXMVECTOR F, + HXMVECTOR G +) noexcept +{ + assert((XMVectorGetY(F) == XMVectorGetX(F)) && (XMVectorGetZ(F) == XMVectorGetX(F)) && (XMVectorGetW(F) == XMVectorGetX(F))); + assert((XMVectorGetY(G) == XMVectorGetX(G)) && (XMVectorGetZ(G) == XMVectorGetX(G)) && (XMVectorGetW(G) == XMVectorGetX(G))); + + const XMVECTOR Epsilon = XMVectorSplatConstant(1, 16); + + XMVECTOR S = XMVectorAdd(F, G); + + XMVECTOR Result; + if (XMVector4InBounds(S, Epsilon)) + { + Result = Q0; + } + else + { + XMVECTOR Q01 = XMQuaternionSlerpV(Q0, Q1, S); + XMVECTOR Q02 = XMQuaternionSlerpV(Q0, Q2, S); + XMVECTOR GS = XMVectorReciprocal(S); + GS = XMVectorMultiply(G, GS); + + Result = XMQuaternionSlerpV(Q01, Q02, GS); + } + + return Result; +} + +//------------------------------------------------------------------------------ +// Transformation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionIdentity() noexcept +{ + return g_XMIdentityR3.v; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYaw +( + float Pitch, + float Yaw, + float Roll +) noexcept +{ + XMVECTOR Angles = XMVectorSet(Pitch, Yaw, Roll, 0.0f); + XMVECTOR Q = XMQuaternionRotationRollPitchYawFromVector(Angles); + return Q; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYawFromVector +( + FXMVECTOR Angles // +) noexcept +{ + static const XMVECTORF32 Sign = { { { 1.0f, -1.0f, -1.0f, 1.0f } } }; + + XMVECTOR HalfAngles = XMVectorMultiply(Angles, g_XMOneHalf.v); + + XMVECTOR SinAngles, CosAngles; + XMVectorSinCos(&SinAngles, &CosAngles, HalfAngles); + + XMVECTOR P0 = XMVectorPermute(SinAngles, CosAngles); + XMVECTOR Y0 = XMVectorPermute(SinAngles, CosAngles); + XMVECTOR R0 = XMVectorPermute(SinAngles, CosAngles); + XMVECTOR P1 = XMVectorPermute(CosAngles, SinAngles); + XMVECTOR Y1 = XMVectorPermute(CosAngles, SinAngles); + XMVECTOR R1 = XMVectorPermute(CosAngles, SinAngles); + + XMVECTOR Q1 = XMVectorMultiply(P1, Sign.v); + XMVECTOR Q0 = XMVectorMultiply(P0, Y0); + Q1 = XMVectorMultiply(Q1, Y1); + Q0 = XMVectorMultiply(Q0, R0); + XMVECTOR Q = XMVectorMultiplyAdd(Q1, R1, Q0); + + return Q; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationNormal +( + FXMVECTOR NormalAxis, + float Angle +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMVECTOR N = XMVectorSelect(g_XMOne.v, NormalAxis, g_XMSelect1110.v); + + float SinV, CosV; + XMScalarSinCos(&SinV, &CosV, 0.5f * Angle); + + XMVECTOR Scale = XMVectorSet(SinV, SinV, SinV, CosV); + return XMVectorMultiply(N, Scale); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR N = _mm_and_ps(NormalAxis, g_XMMask3); + N = _mm_or_ps(N, g_XMIdentityR3); + XMVECTOR Scale = _mm_set_ps1(0.5f * Angle); + XMVECTOR vSine; + XMVECTOR vCosine; + XMVectorSinCos(&vSine, &vCosine, Scale); + Scale = _mm_and_ps(vSine, g_XMMask3); + vCosine = _mm_and_ps(vCosine, g_XMMaskW); + Scale = _mm_or_ps(Scale, vCosine); + N = _mm_mul_ps(N, Scale); + return N; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationAxis +( + FXMVECTOR Axis, + float Angle +) noexcept +{ + assert(!XMVector3Equal(Axis, XMVectorZero())); + assert(!XMVector3IsInfinite(Axis)); + + XMVECTOR Normal = XMVector3Normalize(Axis); + XMVECTOR Q = XMQuaternionRotationNormal(Normal, Angle); + return Q; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationMatrix(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 q; + float r22 = M.m[2][2]; + if (r22 <= 0.f) // x^2 + y^2 >= z^2 + w^2 + { + float dif10 = M.m[1][1] - M.m[0][0]; + float omr22 = 1.f - r22; + if (dif10 <= 0.f) // x^2 >= y^2 + { + float fourXSqr = omr22 - dif10; + float inv4x = 0.5f / sqrtf(fourXSqr); + q.f[0] = fourXSqr * inv4x; + q.f[1] = (M.m[0][1] + M.m[1][0]) * inv4x; + q.f[2] = (M.m[0][2] + M.m[2][0]) * inv4x; + q.f[3] = (M.m[1][2] - M.m[2][1]) * inv4x; + } + else // y^2 >= x^2 + { + float fourYSqr = omr22 + dif10; + float inv4y = 0.5f / sqrtf(fourYSqr); + q.f[0] = (M.m[0][1] + M.m[1][0]) * inv4y; + q.f[1] = fourYSqr * inv4y; + q.f[2] = (M.m[1][2] + M.m[2][1]) * inv4y; + q.f[3] = (M.m[2][0] - M.m[0][2]) * inv4y; + } + } + else // z^2 + w^2 >= x^2 + y^2 + { + float sum10 = M.m[1][1] + M.m[0][0]; + float opr22 = 1.f + r22; + if (sum10 <= 0.f) // z^2 >= w^2 + { + float fourZSqr = opr22 - sum10; + float inv4z = 0.5f / sqrtf(fourZSqr); + q.f[0] = (M.m[0][2] + M.m[2][0]) * inv4z; + q.f[1] = (M.m[1][2] + M.m[2][1]) * inv4z; + q.f[2] = fourZSqr * inv4z; + q.f[3] = (M.m[0][1] - M.m[1][0]) * inv4z; + } + else // w^2 >= z^2 + { + float fourWSqr = opr22 + sum10; + float inv4w = 0.5f / sqrtf(fourWSqr); + q.f[0] = (M.m[1][2] - M.m[2][1]) * inv4w; + q.f[1] = (M.m[2][0] - M.m[0][2]) * inv4w; + q.f[2] = (M.m[0][1] - M.m[1][0]) * inv4w; + q.f[3] = fourWSqr * inv4w; + } + } + return q.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 XMPMMP = { { { +1.0f, -1.0f, -1.0f, +1.0f } } }; + static const XMVECTORF32 XMMPMP = { { { -1.0f, +1.0f, -1.0f, +1.0f } } }; + static const XMVECTORF32 XMMMPP = { { { -1.0f, -1.0f, +1.0f, +1.0f } } }; + static const XMVECTORU32 Select0110 = { { { XM_SELECT_0, XM_SELECT_1, XM_SELECT_1, XM_SELECT_0 } } }; + static const XMVECTORU32 Select0010 = { { { XM_SELECT_0, XM_SELECT_0, XM_SELECT_1, XM_SELECT_0 } } }; + + XMVECTOR r0 = M.r[0]; + XMVECTOR r1 = M.r[1]; + XMVECTOR r2 = M.r[2]; + + XMVECTOR r00 = vdupq_lane_f32(vget_low_f32(r0), 0); + XMVECTOR r11 = vdupq_lane_f32(vget_low_f32(r1), 1); + XMVECTOR r22 = vdupq_lane_f32(vget_high_f32(r2), 0); + + // x^2 >= y^2 equivalent to r11 - r00 <= 0 + XMVECTOR r11mr00 = vsubq_f32(r11, r00); + XMVECTOR x2gey2 = vcleq_f32(r11mr00, g_XMZero); + + // z^2 >= w^2 equivalent to r11 + r00 <= 0 + XMVECTOR r11pr00 = vaddq_f32(r11, r00); + XMVECTOR z2gew2 = vcleq_f32(r11pr00, g_XMZero); + + // x^2 + y^2 >= z^2 + w^2 equivalent to r22 <= 0 + XMVECTOR x2py2gez2pw2 = vcleq_f32(r22, g_XMZero); + + // (4*x^2, 4*y^2, 4*z^2, 4*w^2) + XMVECTOR t0 = vmulq_f32(XMPMMP, r00); + XMVECTOR x2y2z2w2 = vmlaq_f32(t0, XMMPMP, r11); + x2y2z2w2 = vmlaq_f32(x2y2z2w2, XMMMPP, r22); + x2y2z2w2 = vaddq_f32(x2y2z2w2, g_XMOne); + + // (r01, r02, r12, r11) + t0 = vextq_f32(r0, r0, 1); + XMVECTOR t1 = vextq_f32(r1, r1, 1); + t0 = vcombine_f32(vget_low_f32(t0), vrev64_f32(vget_low_f32(t1))); + + // (r10, r20, r21, r10) + t1 = vextq_f32(r2, r2, 3); + XMVECTOR r10 = vdupq_lane_f32(vget_low_f32(r1), 0); + t1 = vbslq_f32(Select0110, t1, r10); + + // (4*x*y, 4*x*z, 4*y*z, unused) + XMVECTOR xyxzyz = vaddq_f32(t0, t1); + + // (r21, r20, r10, r10) + t0 = vcombine_f32(vrev64_f32(vget_low_f32(r2)), vget_low_f32(r10)); + + // (r12, r02, r01, r12) + XMVECTOR t2 = vcombine_f32(vrev64_f32(vget_high_f32(r0)), vrev64_f32(vget_low_f32(r0))); + XMVECTOR t3 = vdupq_lane_f32(vget_high_f32(r1), 0); + t1 = vbslq_f32(Select0110, t2, t3); + + // (4*x*w, 4*y*w, 4*z*w, unused) + XMVECTOR xwywzw = vsubq_f32(t0, t1); + xwywzw = vmulq_f32(XMMPMP, xwywzw); + + // (4*x*x, 4*x*y, 4*x*z, 4*x*w) + t0 = vextq_f32(xyxzyz, xyxzyz, 3); + t1 = vbslq_f32(Select0110, t0, x2y2z2w2); + t2 = vdupq_lane_f32(vget_low_f32(xwywzw), 0); + XMVECTOR tensor0 = vbslq_f32(g_XMSelect1110, t1, t2); + + // (4*y*x, 4*y*y, 4*y*z, 4*y*w) + t0 = vbslq_f32(g_XMSelect1011, xyxzyz, x2y2z2w2); + t1 = vdupq_lane_f32(vget_low_f32(xwywzw), 1); + XMVECTOR tensor1 = vbslq_f32(g_XMSelect1110, t0, t1); + + // (4*z*x, 4*z*y, 4*z*z, 4*z*w) + t0 = vextq_f32(xyxzyz, xyxzyz, 1); + t1 = vcombine_f32(vget_low_f32(t0), vrev64_f32(vget_high_f32(xwywzw))); + XMVECTOR tensor2 = vbslq_f32(Select0010, x2y2z2w2, t1); + + // (4*w*x, 4*w*y, 4*w*z, 4*w*w) + XMVECTOR tensor3 = vbslq_f32(g_XMSelect1110, xwywzw, x2y2z2w2); + + // Select the row of the tensor-product matrix that has the largest + // magnitude. + t0 = vbslq_f32(x2gey2, tensor0, tensor1); + t1 = vbslq_f32(z2gew2, tensor2, tensor3); + t2 = vbslq_f32(x2py2gez2pw2, t0, t1); + + // Normalize the row. No division by zero is possible because the + // quaternion is unit-length (and the row is a nonzero multiple of + // the quaternion). + t0 = XMVector4Length(t2); + return XMVectorDivide(t2, t0); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 XMPMMP = { { { +1.0f, -1.0f, -1.0f, +1.0f } } }; + static const XMVECTORF32 XMMPMP = { { { -1.0f, +1.0f, -1.0f, +1.0f } } }; + static const XMVECTORF32 XMMMPP = { { { -1.0f, -1.0f, +1.0f, +1.0f } } }; + + XMVECTOR r0 = M.r[0]; // (r00, r01, r02, 0) + XMVECTOR r1 = M.r[1]; // (r10, r11, r12, 0) + XMVECTOR r2 = M.r[2]; // (r20, r21, r22, 0) + + // (r00, r00, r00, r00) + XMVECTOR r00 = XM_PERMUTE_PS(r0, _MM_SHUFFLE(0, 0, 0, 0)); + // (r11, r11, r11, r11) + XMVECTOR r11 = XM_PERMUTE_PS(r1, _MM_SHUFFLE(1, 1, 1, 1)); + // (r22, r22, r22, r22) + XMVECTOR r22 = XM_PERMUTE_PS(r2, _MM_SHUFFLE(2, 2, 2, 2)); + + // x^2 >= y^2 equivalent to r11 - r00 <= 0 + // (r11 - r00, r11 - r00, r11 - r00, r11 - r00) + XMVECTOR r11mr00 = _mm_sub_ps(r11, r00); + XMVECTOR x2gey2 = _mm_cmple_ps(r11mr00, g_XMZero); + + // z^2 >= w^2 equivalent to r11 + r00 <= 0 + // (r11 + r00, r11 + r00, r11 + r00, r11 + r00) + XMVECTOR r11pr00 = _mm_add_ps(r11, r00); + XMVECTOR z2gew2 = _mm_cmple_ps(r11pr00, g_XMZero); + + // x^2 + y^2 >= z^2 + w^2 equivalent to r22 <= 0 + XMVECTOR x2py2gez2pw2 = _mm_cmple_ps(r22, g_XMZero); + + // (4*x^2, 4*y^2, 4*z^2, 4*w^2) + XMVECTOR t0 = XM_FMADD_PS(XMPMMP, r00, g_XMOne); + XMVECTOR t1 = _mm_mul_ps(XMMPMP, r11); + XMVECTOR t2 = XM_FMADD_PS(XMMMPP, r22, t0); + XMVECTOR x2y2z2w2 = _mm_add_ps(t1, t2); + + // (r01, r02, r12, r11) + t0 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(1, 2, 2, 1)); + // (r10, r10, r20, r21) + t1 = _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(1, 0, 0, 0)); + // (r10, r20, r21, r10) + t1 = XM_PERMUTE_PS(t1, _MM_SHUFFLE(1, 3, 2, 0)); + // (4*x*y, 4*x*z, 4*y*z, unused) + XMVECTOR xyxzyz = _mm_add_ps(t0, t1); + + // (r21, r20, r10, r10) + t0 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 1)); + // (r12, r12, r02, r01) + t1 = _mm_shuffle_ps(r1, r0, _MM_SHUFFLE(1, 2, 2, 2)); + // (r12, r02, r01, r12) + t1 = XM_PERMUTE_PS(t1, _MM_SHUFFLE(1, 3, 2, 0)); + // (4*x*w, 4*y*w, 4*z*w, unused) + XMVECTOR xwywzw = _mm_sub_ps(t0, t1); + xwywzw = _mm_mul_ps(XMMPMP, xwywzw); + + // (4*x^2, 4*y^2, 4*x*y, unused) + t0 = _mm_shuffle_ps(x2y2z2w2, xyxzyz, _MM_SHUFFLE(0, 0, 1, 0)); + // (4*z^2, 4*w^2, 4*z*w, unused) + t1 = _mm_shuffle_ps(x2y2z2w2, xwywzw, _MM_SHUFFLE(0, 2, 3, 2)); + // (4*x*z, 4*y*z, 4*x*w, 4*y*w) + t2 = _mm_shuffle_ps(xyxzyz, xwywzw, _MM_SHUFFLE(1, 0, 2, 1)); + + // (4*x*x, 4*x*y, 4*x*z, 4*x*w) + XMVECTOR tensor0 = _mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2, 0, 2, 0)); + // (4*y*x, 4*y*y, 4*y*z, 4*y*w) + XMVECTOR tensor1 = _mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3, 1, 1, 2)); + // (4*z*x, 4*z*y, 4*z*z, 4*z*w) + XMVECTOR tensor2 = _mm_shuffle_ps(t2, t1, _MM_SHUFFLE(2, 0, 1, 0)); + // (4*w*x, 4*w*y, 4*w*z, 4*w*w) + XMVECTOR tensor3 = _mm_shuffle_ps(t2, t1, _MM_SHUFFLE(1, 2, 3, 2)); + + // Select the row of the tensor-product matrix that has the largest + // magnitude. + t0 = _mm_and_ps(x2gey2, tensor0); + t1 = _mm_andnot_ps(x2gey2, tensor1); + t0 = _mm_or_ps(t0, t1); + t1 = _mm_and_ps(z2gew2, tensor2); + t2 = _mm_andnot_ps(z2gew2, tensor3); + t1 = _mm_or_ps(t1, t2); + t0 = _mm_and_ps(x2py2gez2pw2, t0); + t1 = _mm_andnot_ps(x2py2gez2pw2, t1); + t2 = _mm_or_ps(t0, t1); + + // Normalize the row. No division by zero is possible because the + // quaternion is unit-length (and the row is a nonzero multiple of + // the quaternion). + t0 = XMVector4Length(t2); + return _mm_div_ps(t2, t0); +#endif +} + +//------------------------------------------------------------------------------ +// Conversion operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMQuaternionToAxisAngle +( + XMVECTOR* pAxis, + float* pAngle, + FXMVECTOR Q +) noexcept +{ + assert(pAxis); + assert(pAngle); + + *pAxis = Q; + + *pAngle = 2.0f * XMScalarACos(XMVectorGetW(Q)); +} + +/**************************************************************************** + * + * Plane + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneEqual +( + FXMVECTOR P1, + FXMVECTOR P2 +) noexcept +{ + return XMVector4Equal(P1, P2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneNearEqual +( + FXMVECTOR P1, + FXMVECTOR P2, + FXMVECTOR Epsilon +) noexcept +{ + XMVECTOR NP1 = XMPlaneNormalize(P1); + XMVECTOR NP2 = XMPlaneNormalize(P2); + return XMVector4NearEqual(NP1, NP2, Epsilon); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneNotEqual +( + FXMVECTOR P1, + FXMVECTOR P2 +) noexcept +{ + return XMVector4NotEqual(P1, P2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneIsNaN(FXMVECTOR P) noexcept +{ + return XMVector4IsNaN(P); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneIsInfinite(FXMVECTOR P) noexcept +{ + return XMVector4IsInfinite(P); +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneDot +( + FXMVECTOR P, + FXMVECTOR V +) noexcept +{ + return XMVector4Dot(P, V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneDotCoord +( + FXMVECTOR P, + FXMVECTOR V +) noexcept +{ + // Result = P[0] * V[0] + P[1] * V[1] + P[2] * V[2] + P[3] + + XMVECTOR V3 = XMVectorSelect(g_XMOne.v, V, g_XMSelect1110.v); + XMVECTOR Result = XMVector4Dot(P, V3); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneDotNormal +( + FXMVECTOR P, + FXMVECTOR V +) noexcept +{ + return XMVector3Dot(P, V); +} + +//------------------------------------------------------------------------------ +// XMPlaneNormalizeEst uses a reciprocal estimate and +// returns QNaN on zero and infinite vectors. + +inline XMVECTOR XM_CALLCONV XMPlaneNormalizeEst(FXMVECTOR P) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMVECTOR Result = XMVector3ReciprocalLengthEst(P); + return XMVectorMultiply(P, Result); + +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(P, P, 0x7f); + XMVECTOR vResult = _mm_rsqrt_ps(vTemp); + return _mm_mul_ps(vResult, P); +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product + XMVECTOR vDot = _mm_mul_ps(P, P); + // x=Dot.y, y=Dot.z + XMVECTOR vTemp = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(2, 1, 2, 1)); + // Result.x = x+y + vDot = _mm_add_ss(vDot, vTemp); + // x=Dot.z + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // Result.x = (x+y)+z + vDot = _mm_add_ss(vDot, vTemp); + // Splat x + vDot = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the reciprocal + vDot = _mm_rsqrt_ps(vDot); + // Get the reciprocal + vDot = _mm_mul_ps(vDot, P); + return vDot; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneNormalize(FXMVECTOR P) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fLengthSq = sqrtf((P.vector4_f32[0] * P.vector4_f32[0]) + (P.vector4_f32[1] * P.vector4_f32[1]) + (P.vector4_f32[2] * P.vector4_f32[2])); + // Prevent divide by zero + if (fLengthSq > 0) + { + fLengthSq = 1.0f / fLengthSq; + } + XMVECTORF32 vResult = { { { + P.vector4_f32[0] * fLengthSq, + P.vector4_f32[1] * fLengthSq, + P.vector4_f32[2] * fLengthSq, + P.vector4_f32[3] * fLengthSq + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vLength = XMVector3ReciprocalLength(P); + return XMVectorMultiply(P, vLength); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vLengthSq = _mm_dp_ps(P, P, 0x7f); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(P, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vLengthSq); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z only + XMVECTOR vLengthSq = _mm_mul_ps(P, P); + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 1, 2, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(P, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vLengthSq); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneIntersectLine +( + FXMVECTOR P, + FXMVECTOR LinePoint1, + FXMVECTOR LinePoint2 +) noexcept +{ + XMVECTOR V1 = XMVector3Dot(P, LinePoint1); + XMVECTOR V2 = XMVector3Dot(P, LinePoint2); + XMVECTOR D = XMVectorSubtract(V1, V2); + + XMVECTOR VT = XMPlaneDotCoord(P, LinePoint1); + VT = XMVectorDivide(VT, D); + + XMVECTOR Point = XMVectorSubtract(LinePoint2, LinePoint1); + Point = XMVectorMultiplyAdd(Point, VT, LinePoint1); + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR Control = XMVectorNearEqual(D, Zero, g_XMEpsilon.v); + + return XMVectorSelect(Point, g_XMQNaN.v, Control); +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMPlaneIntersectPlane +( + XMVECTOR* pLinePoint1, + XMVECTOR* pLinePoint2, + FXMVECTOR P1, + FXMVECTOR P2 +) noexcept +{ + assert(pLinePoint1); + assert(pLinePoint2); + + XMVECTOR V1 = XMVector3Cross(P2, P1); + + XMVECTOR LengthSq = XMVector3LengthSq(V1); + + XMVECTOR V2 = XMVector3Cross(P2, V1); + + XMVECTOR P1W = XMVectorSplatW(P1); + XMVECTOR Point = XMVectorMultiply(V2, P1W); + + XMVECTOR V3 = XMVector3Cross(V1, P1); + + XMVECTOR P2W = XMVectorSplatW(P2); + Point = XMVectorMultiplyAdd(V3, P2W, Point); + + XMVECTOR LinePoint1 = XMVectorDivide(Point, LengthSq); + + XMVECTOR LinePoint2 = XMVectorAdd(LinePoint1, V1); + + XMVECTOR Control = XMVectorLessOrEqual(LengthSq, g_XMEpsilon.v); + *pLinePoint1 = XMVectorSelect(LinePoint1, g_XMQNaN.v, Control); + *pLinePoint2 = XMVectorSelect(LinePoint2, g_XMQNaN.v, Control); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneTransform +( + FXMVECTOR P, + FXMMATRIX M +) noexcept +{ + XMVECTOR W = XMVectorSplatW(P); + XMVECTOR Z = XMVectorSplatZ(P); + XMVECTOR Y = XMVectorSplatY(P); + XMVECTOR X = XMVectorSplatX(P); + + XMVECTOR Result = XMVectorMultiply(W, M.r[3]); + Result = XMVectorMultiplyAdd(Z, M.r[2], Result); + Result = XMVectorMultiplyAdd(Y, M.r[1], Result); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + return Result; +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT4* XM_CALLCONV XMPlaneTransformStream +( + XMFLOAT4* pOutputStream, + size_t OutputStride, + const XMFLOAT4* pInputStream, + size_t InputStride, + size_t PlaneCount, + FXMMATRIX M +) noexcept +{ + return XMVector4TransformStream(pOutputStream, + OutputStride, + pInputStream, + InputStride, + PlaneCount, + M); +} + +//------------------------------------------------------------------------------ +// Conversion operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneFromPointNormal +( + FXMVECTOR Point, + FXMVECTOR Normal +) noexcept +{ + XMVECTOR W = XMVector3Dot(Point, Normal); + W = XMVectorNegate(W); + return XMVectorSelect(W, Normal, g_XMSelect1110.v); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneFromPoints +( + FXMVECTOR Point1, + FXMVECTOR Point2, + FXMVECTOR Point3 +) noexcept +{ + XMVECTOR V21 = XMVectorSubtract(Point1, Point2); + XMVECTOR V31 = XMVectorSubtract(Point1, Point3); + + XMVECTOR N = XMVector3Cross(V21, V31); + N = XMVector3Normalize(N); + + XMVECTOR D = XMPlaneDotNormal(N, Point1); + D = XMVectorNegate(D); + + XMVECTOR Result = XMVectorSelect(D, N, g_XMSelect1110.v); + + return Result; +} + +/**************************************************************************** + * + * Color + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorEqual +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4Equal(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorNotEqual +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4NotEqual(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorGreater +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4Greater(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorGreaterOrEqual +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4GreaterOrEqual(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorLess +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4Less(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorLessOrEqual +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4LessOrEqual(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorIsNaN(FXMVECTOR C) noexcept +{ + return XMVector4IsNaN(C); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorIsInfinite(FXMVECTOR C) noexcept +{ + return XMVector4IsInfinite(C); +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorNegative(FXMVECTOR vColor) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { + 1.0f - vColor.vector4_f32[0], + 1.0f - vColor.vector4_f32[1], + 1.0f - vColor.vector4_f32[2], + vColor.vector4_f32[3] + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vTemp = veorq_u32(vColor, g_XMNegate3); + return vaddq_f32(vTemp, g_XMOne3); +#elif defined(_XM_SSE_INTRINSICS_) + // Negate only x,y and z. + XMVECTOR vTemp = _mm_xor_ps(vColor, g_XMNegate3); + // Add 1,1,1,0 to -x,-y,-z,w + return _mm_add_ps(vTemp, g_XMOne3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorModulate +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVectorMultiply(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorAdjustSaturation +( + FXMVECTOR vColor, + float fSaturation +) noexcept +{ + // Luminance = 0.2125f * C[0] + 0.7154f * C[1] + 0.0721f * C[2]; + // Result = (C - Luminance) * Saturation + Luminance; + + const XMVECTORF32 gvLuminance = { { { 0.2125f, 0.7154f, 0.0721f, 0.0f } } }; +#if defined(_XM_NO_INTRINSICS_) + float fLuminance = (vColor.vector4_f32[0] * gvLuminance.f[0]) + (vColor.vector4_f32[1] * gvLuminance.f[1]) + (vColor.vector4_f32[2] * gvLuminance.f[2]); + XMVECTOR vResult; + vResult.vector4_f32[0] = ((vColor.vector4_f32[0] - fLuminance) * fSaturation) + fLuminance; + vResult.vector4_f32[1] = ((vColor.vector4_f32[1] - fLuminance) * fSaturation) + fLuminance; + vResult.vector4_f32[2] = ((vColor.vector4_f32[2] - fLuminance) * fSaturation) + fLuminance; + vResult.vector4_f32[3] = vColor.vector4_f32[3]; + return vResult; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vLuminance = XMVector3Dot(vColor, gvLuminance); + XMVECTOR vResult = vsubq_f32(vColor, vLuminance); + vResult = vmlaq_n_f32(vLuminance, vResult, fSaturation); + return vbslq_f32(g_XMSelect1110, vResult, vColor); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vLuminance = XMVector3Dot(vColor, gvLuminance); + // Splat fSaturation + XMVECTOR vSaturation = _mm_set_ps1(fSaturation); + // vResult = ((vColor-vLuminance)*vSaturation)+vLuminance; + XMVECTOR vResult = _mm_sub_ps(vColor, vLuminance); + vResult = XM_FMADD_PS(vResult, vSaturation, vLuminance); + // Retain w from the source color + vLuminance = _mm_shuffle_ps(vResult, vColor, _MM_SHUFFLE(3, 2, 2, 2)); // x = vResult.z,y = vResult.z,z = vColor.z,w=vColor.w + vResult = _mm_shuffle_ps(vResult, vLuminance, _MM_SHUFFLE(3, 0, 1, 0)); // x = vResult.x,y = vResult.y,z = vResult.z,w=vColor.w + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorAdjustContrast +( + FXMVECTOR vColor, + float fContrast +) noexcept +{ + // Result = (vColor - 0.5f) * fContrast + 0.5f; + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { + ((vColor.vector4_f32[0] - 0.5f) * fContrast) + 0.5f, + ((vColor.vector4_f32[1] - 0.5f) * fContrast) + 0.5f, + ((vColor.vector4_f32[2] - 0.5f) * fContrast) + 0.5f, + vColor.vector4_f32[3] // Leave W untouched + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vResult = vsubq_f32(vColor, g_XMOneHalf.v); + vResult = vmlaq_n_f32(g_XMOneHalf.v, vResult, fContrast); + return vbslq_f32(g_XMSelect1110, vResult, vColor); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vScale = _mm_set_ps1(fContrast); // Splat the scale + XMVECTOR vResult = _mm_sub_ps(vColor, g_XMOneHalf); // Subtract 0.5f from the source (Saving source) + vResult = XM_FMADD_PS(vResult, vScale, g_XMOneHalf); +// Retain w from the source color + vScale = _mm_shuffle_ps(vResult, vColor, _MM_SHUFFLE(3, 2, 2, 2)); // x = vResult.z,y = vResult.z,z = vColor.z,w=vColor.w + vResult = _mm_shuffle_ps(vResult, vScale, _MM_SHUFFLE(3, 0, 1, 0)); // x = vResult.x,y = vResult.y,z = vResult.z,w=vColor.w + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToHSL(FXMVECTOR rgb) noexcept +{ + XMVECTOR r = XMVectorSplatX(rgb); + XMVECTOR g = XMVectorSplatY(rgb); + XMVECTOR b = XMVectorSplatZ(rgb); + + XMVECTOR min = XMVectorMin(r, XMVectorMin(g, b)); + XMVECTOR max = XMVectorMax(r, XMVectorMax(g, b)); + + XMVECTOR l = XMVectorMultiply(XMVectorAdd(min, max), g_XMOneHalf); + + XMVECTOR d = XMVectorSubtract(max, min); + + XMVECTOR la = XMVectorSelect(rgb, l, g_XMSelect1110); + + if (XMVector3Less(d, g_XMEpsilon)) + { + // Achromatic, assume H and S of 0 + return XMVectorSelect(la, g_XMZero, g_XMSelect1100); + } + else + { + XMVECTOR s, h; + + XMVECTOR d2 = XMVectorAdd(min, max); + + if (XMVector3Greater(l, g_XMOneHalf)) + { + // d / (2-max-min) + s = XMVectorDivide(d, XMVectorSubtract(g_XMTwo, d2)); + } + else + { + // d / (max+min) + s = XMVectorDivide(d, d2); + } + + if (XMVector3Equal(r, max)) + { + // Red is max + h = XMVectorDivide(XMVectorSubtract(g, b), d); + } + else if (XMVector3Equal(g, max)) + { + // Green is max + h = XMVectorDivide(XMVectorSubtract(b, r), d); + h = XMVectorAdd(h, g_XMTwo); + } + else + { + // Blue is max + h = XMVectorDivide(XMVectorSubtract(r, g), d); + h = XMVectorAdd(h, g_XMFour); + } + + h = XMVectorDivide(h, g_XMSix); + + if (XMVector3Less(h, g_XMZero)) + h = XMVectorAdd(h, g_XMOne); + + XMVECTOR lha = XMVectorSelect(la, h, g_XMSelect1100); + return XMVectorSelect(s, lha, g_XMSelect1011); + } +} + +//------------------------------------------------------------------------------ + +namespace Internal +{ + + inline XMVECTOR XM_CALLCONV XMColorHue2Clr(FXMVECTOR p, FXMVECTOR q, FXMVECTOR h) noexcept + { + static const XMVECTORF32 oneSixth = { { { 1.0f / 6.0f, 1.0f / 6.0f, 1.0f / 6.0f, 1.0f / 6.0f } } }; + static const XMVECTORF32 twoThirds = { { { 2.0f / 3.0f, 2.0f / 3.0f, 2.0f / 3.0f, 2.0f / 3.0f } } }; + + XMVECTOR t = h; + + if (XMVector3Less(t, g_XMZero)) + t = XMVectorAdd(t, g_XMOne); + + if (XMVector3Greater(t, g_XMOne)) + t = XMVectorSubtract(t, g_XMOne); + + if (XMVector3Less(t, oneSixth)) + { + // p + (q - p) * 6 * t + XMVECTOR t1 = XMVectorSubtract(q, p); + XMVECTOR t2 = XMVectorMultiply(g_XMSix, t); + return XMVectorMultiplyAdd(t1, t2, p); + } + + if (XMVector3Less(t, g_XMOneHalf)) + return q; + + if (XMVector3Less(t, twoThirds)) + { + // p + (q - p) * 6 * (2/3 - t) + XMVECTOR t1 = XMVectorSubtract(q, p); + XMVECTOR t2 = XMVectorMultiply(g_XMSix, XMVectorSubtract(twoThirds, t)); + return XMVectorMultiplyAdd(t1, t2, p); + } + + return p; + } + +} // namespace Internal + +inline XMVECTOR XM_CALLCONV XMColorHSLToRGB(FXMVECTOR hsl) noexcept +{ + static const XMVECTORF32 oneThird = { { { 1.0f / 3.0f, 1.0f / 3.0f, 1.0f / 3.0f, 1.0f / 3.0f } } }; + + XMVECTOR s = XMVectorSplatY(hsl); + XMVECTOR l = XMVectorSplatZ(hsl); + + if (XMVector3NearEqual(s, g_XMZero, g_XMEpsilon)) + { + // Achromatic + return XMVectorSelect(hsl, l, g_XMSelect1110); + } + else + { + XMVECTOR h = XMVectorSplatX(hsl); + + XMVECTOR q; + if (XMVector3Less(l, g_XMOneHalf)) + { + q = XMVectorMultiply(l, XMVectorAdd(g_XMOne, s)); + } + else + { + q = XMVectorSubtract(XMVectorAdd(l, s), XMVectorMultiply(l, s)); + } + + XMVECTOR p = XMVectorSubtract(XMVectorMultiply(g_XMTwo, l), q); + + XMVECTOR r = DirectX::Internal::XMColorHue2Clr(p, q, XMVectorAdd(h, oneThird)); + XMVECTOR g = DirectX::Internal::XMColorHue2Clr(p, q, h); + XMVECTOR b = DirectX::Internal::XMColorHue2Clr(p, q, XMVectorSubtract(h, oneThird)); + + XMVECTOR rg = XMVectorSelect(g, r, g_XMSelect1000); + XMVECTOR ba = XMVectorSelect(hsl, b, g_XMSelect1110); + + return XMVectorSelect(ba, rg, g_XMSelect1100); + } +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToHSV(FXMVECTOR rgb) noexcept +{ + XMVECTOR r = XMVectorSplatX(rgb); + XMVECTOR g = XMVectorSplatY(rgb); + XMVECTOR b = XMVectorSplatZ(rgb); + + XMVECTOR min = XMVectorMin(r, XMVectorMin(g, b)); + XMVECTOR v = XMVectorMax(r, XMVectorMax(g, b)); + + XMVECTOR d = XMVectorSubtract(v, min); + + XMVECTOR s = (XMVector3NearEqual(v, g_XMZero, g_XMEpsilon)) ? g_XMZero : XMVectorDivide(d, v); + + if (XMVector3Less(d, g_XMEpsilon)) + { + // Achromatic, assume H of 0 + XMVECTOR hv = XMVectorSelect(v, g_XMZero, g_XMSelect1000); + XMVECTOR hva = XMVectorSelect(rgb, hv, g_XMSelect1110); + return XMVectorSelect(s, hva, g_XMSelect1011); + } + else + { + XMVECTOR h; + + if (XMVector3Equal(r, v)) + { + // Red is max + h = XMVectorDivide(XMVectorSubtract(g, b), d); + + if (XMVector3Less(g, b)) + h = XMVectorAdd(h, g_XMSix); + } + else if (XMVector3Equal(g, v)) + { + // Green is max + h = XMVectorDivide(XMVectorSubtract(b, r), d); + h = XMVectorAdd(h, g_XMTwo); + } + else + { + // Blue is max + h = XMVectorDivide(XMVectorSubtract(r, g), d); + h = XMVectorAdd(h, g_XMFour); + } + + h = XMVectorDivide(h, g_XMSix); + + XMVECTOR hv = XMVectorSelect(v, h, g_XMSelect1000); + XMVECTOR hva = XMVectorSelect(rgb, hv, g_XMSelect1110); + return XMVectorSelect(s, hva, g_XMSelect1011); + } +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorHSVToRGB(FXMVECTOR hsv) noexcept +{ + XMVECTOR h = XMVectorSplatX(hsv); + XMVECTOR s = XMVectorSplatY(hsv); + XMVECTOR v = XMVectorSplatZ(hsv); + + XMVECTOR h6 = XMVectorMultiply(h, g_XMSix); + + XMVECTOR i = XMVectorFloor(h6); + XMVECTOR f = XMVectorSubtract(h6, i); + + // p = v* (1-s) + XMVECTOR p = XMVectorMultiply(v, XMVectorSubtract(g_XMOne, s)); + + // q = v*(1-f*s) + XMVECTOR q = XMVectorMultiply(v, XMVectorSubtract(g_XMOne, XMVectorMultiply(f, s))); + + // t = v*(1 - (1-f)*s) + XMVECTOR t = XMVectorMultiply(v, XMVectorSubtract(g_XMOne, XMVectorMultiply(XMVectorSubtract(g_XMOne, f), s))); + + auto ii = static_cast(XMVectorGetX(XMVectorMod(i, g_XMSix))); + + XMVECTOR _rgb; + + switch (ii) + { + case 0: // rgb = vtp + { + XMVECTOR vt = XMVectorSelect(t, v, g_XMSelect1000); + _rgb = XMVectorSelect(p, vt, g_XMSelect1100); + } + break; + case 1: // rgb = qvp + { + XMVECTOR qv = XMVectorSelect(v, q, g_XMSelect1000); + _rgb = XMVectorSelect(p, qv, g_XMSelect1100); + } + break; + case 2: // rgb = pvt + { + XMVECTOR pv = XMVectorSelect(v, p, g_XMSelect1000); + _rgb = XMVectorSelect(t, pv, g_XMSelect1100); + } + break; + case 3: // rgb = pqv + { + XMVECTOR pq = XMVectorSelect(q, p, g_XMSelect1000); + _rgb = XMVectorSelect(v, pq, g_XMSelect1100); + } + break; + case 4: // rgb = tpv + { + XMVECTOR tp = XMVectorSelect(p, t, g_XMSelect1000); + _rgb = XMVectorSelect(v, tp, g_XMSelect1100); + } + break; + default: // rgb = vpq + { + XMVECTOR vp = XMVectorSelect(p, v, g_XMSelect1000); + _rgb = XMVectorSelect(q, vp, g_XMSelect1100); + } + break; + } + + return XMVectorSelect(hsv, _rgb, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToYUV(FXMVECTOR rgb) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 0.299f, -0.147f, 0.615f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { 0.587f, -0.289f, -0.515f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 0.114f, 0.436f, -0.100f, 0.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(rgb, M); + + return XMVectorSelect(rgb, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorYUVToRGB(FXMVECTOR yuv) noexcept +{ + static const XMVECTORF32 Scale1 = { { { 0.0f, -0.395f, 2.032f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 1.140f, -0.581f, 0.0f, 0.0f } } }; + + XMMATRIX M(g_XMOne, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(yuv, M); + + return XMVectorSelect(yuv, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToYUV_HD(FXMVECTOR rgb) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 0.2126f, -0.0997f, 0.6150f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { 0.7152f, -0.3354f, -0.5586f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 0.0722f, 0.4351f, -0.0564f, 0.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(rgb, M); + + return XMVectorSelect(rgb, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorYUVToRGB_HD(FXMVECTOR yuv) noexcept +{ + static const XMVECTORF32 Scale1 = { { { 0.0f, -0.2153f, 2.1324f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 1.2803f, -0.3806f, 0.0f, 0.0f } } }; + + XMMATRIX M(g_XMOne, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(yuv, M); + + return XMVectorSelect(yuv, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToXYZ(FXMVECTOR rgb) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 0.4887180f, 0.1762044f, 0.0000000f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { 0.3106803f, 0.8129847f, 0.0102048f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 0.2006017f, 0.0108109f, 0.9897952f, 0.0f } } }; + static const XMVECTORF32 Scale = { { { 1.f / 0.17697f, 1.f / 0.17697f, 1.f / 0.17697f, 0.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVectorMultiply(XMVector3Transform(rgb, M), Scale); + + return XMVectorSelect(rgb, clr, g_XMSelect1110); +} + +inline XMVECTOR XM_CALLCONV XMColorXYZToRGB(FXMVECTOR xyz) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 2.3706743f, -0.5138850f, 0.0052982f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { -0.9000405f, 1.4253036f, -0.0146949f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { -0.4706338f, 0.0885814f, 1.0093968f, 0.0f } } }; + static const XMVECTORF32 Scale = { { { 0.17697f, 0.17697f, 0.17697f, 0.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(XMVectorMultiply(xyz, Scale), M); + + return XMVectorSelect(xyz, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorXYZToSRGB(FXMVECTOR xyz) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 3.2406f, -0.9689f, 0.0557f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { -1.5372f, 1.8758f, -0.2040f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { -0.4986f, 0.0415f, 1.0570f, 0.0f } } }; + static const XMVECTORF32 Cutoff = { { { 0.0031308f, 0.0031308f, 0.0031308f, 0.0f } } }; + static const XMVECTORF32 Exp = { { { 1.0f / 2.4f, 1.0f / 2.4f, 1.0f / 2.4f, 1.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR lclr = XMVector3Transform(xyz, M); + + XMVECTOR sel = XMVectorGreater(lclr, Cutoff); + + // clr = 12.92 * lclr for lclr <= 0.0031308f + XMVECTOR smallC = XMVectorMultiply(lclr, g_XMsrgbScale); + + // clr = (1+a)*pow(lclr, 1/2.4) - a for lclr > 0.0031308 (where a = 0.055) + XMVECTOR largeC = XMVectorSubtract(XMVectorMultiply(g_XMsrgbA1, XMVectorPow(lclr, Exp)), g_XMsrgbA); + + XMVECTOR clr = XMVectorSelect(smallC, largeC, sel); + + return XMVectorSelect(xyz, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorSRGBToXYZ(FXMVECTOR srgb) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 0.4124f, 0.2126f, 0.0193f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { 0.3576f, 0.7152f, 0.1192f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 0.1805f, 0.0722f, 0.9505f, 0.0f } } }; + static const XMVECTORF32 Cutoff = { { { 0.04045f, 0.04045f, 0.04045f, 0.0f } } }; + static const XMVECTORF32 Exp = { { { 2.4f, 2.4f, 2.4f, 1.0f } } }; + + XMVECTOR sel = XMVectorGreater(srgb, Cutoff); + + // lclr = clr / 12.92 + XMVECTOR smallC = XMVectorDivide(srgb, g_XMsrgbScale); + + // lclr = pow( (clr + a) / (1+a), 2.4 ) + XMVECTOR largeC = XMVectorPow(XMVectorDivide(XMVectorAdd(srgb, g_XMsrgbA), g_XMsrgbA1), Exp); + + XMVECTOR lclr = XMVectorSelect(smallC, largeC, sel); + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(lclr, M); + + return XMVectorSelect(srgb, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToSRGB(FXMVECTOR rgb) noexcept +{ + static const XMVECTORF32 Cutoff = { { { 0.0031308f, 0.0031308f, 0.0031308f, 1.f } } }; + static const XMVECTORF32 Linear = { { { 12.92f, 12.92f, 12.92f, 1.f } } }; + static const XMVECTORF32 Scale = { { { 1.055f, 1.055f, 1.055f, 1.f } } }; + static const XMVECTORF32 Bias = { { { 0.055f, 0.055f, 0.055f, 0.f } } }; + static const XMVECTORF32 InvGamma = { { { 1.0f / 2.4f, 1.0f / 2.4f, 1.0f / 2.4f, 1.f } } }; + + XMVECTOR V = XMVectorSaturate(rgb); + XMVECTOR V0 = XMVectorMultiply(V, Linear); + XMVECTOR V1 = XMVectorSubtract(XMVectorMultiply(Scale, XMVectorPow(V, InvGamma)), Bias); + XMVECTOR select = XMVectorLess(V, Cutoff); + V = XMVectorSelect(V1, V0, select); + return XMVectorSelect(rgb, V, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorSRGBToRGB(FXMVECTOR srgb) noexcept +{ + static const XMVECTORF32 Cutoff = { { { 0.04045f, 0.04045f, 0.04045f, 1.f } } }; + static const XMVECTORF32 ILinear = { { { 1.f / 12.92f, 1.f / 12.92f, 1.f / 12.92f, 1.f } } }; + static const XMVECTORF32 Scale = { { { 1.f / 1.055f, 1.f / 1.055f, 1.f / 1.055f, 1.f } } }; + static const XMVECTORF32 Bias = { { { 0.055f, 0.055f, 0.055f, 0.f } } }; + static const XMVECTORF32 Gamma = { { { 2.4f, 2.4f, 2.4f, 1.f } } }; + + XMVECTOR V = XMVectorSaturate(srgb); + XMVECTOR V0 = XMVectorMultiply(V, ILinear); + XMVECTOR V1 = XMVectorPow(XMVectorMultiply(XMVectorAdd(V, Bias), Scale), Gamma); + XMVECTOR select = XMVectorGreater(V, Cutoff); + V = XMVectorSelect(V0, V1, select); + return XMVectorSelect(srgb, V, g_XMSelect1110); +} + +/**************************************************************************** + * + * Miscellaneous + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + +inline bool XMVerifyCPUSupport() noexcept +{ +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + int CPUInfo[4] = { -1 }; +#if defined(__clang__) || defined(__GNUC__) + __cpuid(0, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]); +#else + __cpuid(CPUInfo, 0); +#endif + +#ifdef __AVX2__ + if (CPUInfo[0] < 7) + return false; +#else + if (CPUInfo[0] < 1) + return false; +#endif + +#if defined(__clang__) || defined(__GNUC__) + __cpuid(1, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]); +#else + __cpuid(CPUInfo, 1); +#endif + +#if defined(__AVX2__) || defined(_XM_AVX2_INTRINSICS_) + // The compiler can emit FMA3 instructions even without explicit intrinsics use + if ((CPUInfo[2] & 0x38081001) != 0x38081001) + return false; // No F16C/AVX/OSXSAVE/SSE4.1/FMA3/SSE3 support +#elif defined(_XM_FMA3_INTRINSICS_) && defined(_XM_F16C_INTRINSICS_) + if ((CPUInfo[2] & 0x38081001) != 0x38081001) + return false; // No F16C/AVX/OSXSAVE/SSE4.1/FMA3/SSE3 support +#elif defined(_XM_FMA3_INTRINSICS_) + if ((CPUInfo[2] & 0x18081001) != 0x18081001) + return false; // No AVX/OSXSAVE/SSE4.1/FMA3/SSE3 support +#elif defined(_XM_F16C_INTRINSICS_) + if ((CPUInfo[2] & 0x38080001) != 0x38080001) + return false; // No F16C/AVX/OSXSAVE/SSE4.1/SSE3 support +#elif defined(__AVX__) || defined(_XM_AVX_INTRINSICS_) + if ((CPUInfo[2] & 0x18080001) != 0x18080001) + return false; // No AVX/OSXSAVE/SSE4.1/SSE3 support +#elif defined(_XM_SSE4_INTRINSICS_) + if ((CPUInfo[2] & 0x80001) != 0x80001) + return false; // No SSE3/SSE4.1 support +#elif defined(_XM_SSE3_INTRINSICS_) + if (!(CPUInfo[2] & 0x1)) + return false; // No SSE3 support +#endif + + // The x64 processor model requires SSE2 support, but no harm in checking + if ((CPUInfo[3] & 0x6000000) != 0x6000000) + return false; // No SSE2/SSE support + +#if defined(__AVX2__) || defined(_XM_AVX2_INTRINSICS_) +#if defined(__clang__) || defined(__GNUC__) + __cpuid_count(7, 0, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]); +#else + __cpuidex(CPUInfo, 7, 0); +#endif + if (!(CPUInfo[1] & 0x20)) + return false; // No AVX2 support +#endif + + return true; +#elif defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + // ARM-NEON support is required for the Windows on ARM platform + return true; +#else + // No intrinsics path always supported + return true; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMFresnelTerm +( + FXMVECTOR CosIncidentAngle, + FXMVECTOR RefractionIndex +) noexcept +{ + assert(!XMVector4IsInfinite(CosIncidentAngle)); + + // Result = 0.5f * (g - c)^2 / (g + c)^2 * ((c * (g + c) - 1)^2 / (c * (g - c) + 1)^2 + 1) where + // c = CosIncidentAngle + // g = sqrt(c^2 + RefractionIndex^2 - 1) + +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMVECTOR G = XMVectorMultiplyAdd(RefractionIndex, RefractionIndex, g_XMNegativeOne.v); + G = XMVectorMultiplyAdd(CosIncidentAngle, CosIncidentAngle, G); + G = XMVectorAbs(G); + G = XMVectorSqrt(G); + + XMVECTOR S = XMVectorAdd(G, CosIncidentAngle); + XMVECTOR D = XMVectorSubtract(G, CosIncidentAngle); + + XMVECTOR V0 = XMVectorMultiply(D, D); + XMVECTOR V1 = XMVectorMultiply(S, S); + V1 = XMVectorReciprocal(V1); + V0 = XMVectorMultiply(g_XMOneHalf.v, V0); + V0 = XMVectorMultiply(V0, V1); + + XMVECTOR V2 = XMVectorMultiplyAdd(CosIncidentAngle, S, g_XMNegativeOne.v); + XMVECTOR V3 = XMVectorMultiplyAdd(CosIncidentAngle, D, g_XMOne.v); + V2 = XMVectorMultiply(V2, V2); + V3 = XMVectorMultiply(V3, V3); + V3 = XMVectorReciprocal(V3); + V2 = XMVectorMultiplyAdd(V2, V3, g_XMOne.v); + + XMVECTOR Result = XMVectorMultiply(V0, V2); + + Result = XMVectorSaturate(Result); + + return Result; + +#elif defined(_XM_SSE_INTRINSICS_) + // G = sqrt(abs((RefractionIndex^2-1) + CosIncidentAngle^2)) + XMVECTOR G = _mm_mul_ps(RefractionIndex, RefractionIndex); + XMVECTOR vTemp = _mm_mul_ps(CosIncidentAngle, CosIncidentAngle); + G = _mm_sub_ps(G, g_XMOne); + vTemp = _mm_add_ps(vTemp, G); + // max((0-vTemp),vTemp) == abs(vTemp) + // The abs is needed to deal with refraction and cosine being zero + G = _mm_setzero_ps(); + G = _mm_sub_ps(G, vTemp); + G = _mm_max_ps(G, vTemp); + // Last operation, the sqrt() + G = _mm_sqrt_ps(G); + + // Calc G-C and G+C + XMVECTOR GAddC = _mm_add_ps(G, CosIncidentAngle); + XMVECTOR GSubC = _mm_sub_ps(G, CosIncidentAngle); + // Perform the term (0.5f *(g - c)^2) / (g + c)^2 + XMVECTOR vResult = _mm_mul_ps(GSubC, GSubC); + vTemp = _mm_mul_ps(GAddC, GAddC); + vResult = _mm_mul_ps(vResult, g_XMOneHalf); + vResult = _mm_div_ps(vResult, vTemp); + // Perform the term ((c * (g + c) - 1)^2 / (c * (g - c) + 1)^2 + 1) + GAddC = _mm_mul_ps(GAddC, CosIncidentAngle); + GSubC = _mm_mul_ps(GSubC, CosIncidentAngle); + GAddC = _mm_sub_ps(GAddC, g_XMOne); + GSubC = _mm_add_ps(GSubC, g_XMOne); + GAddC = _mm_mul_ps(GAddC, GAddC); + GSubC = _mm_mul_ps(GSubC, GSubC); + GAddC = _mm_div_ps(GAddC, GSubC); + GAddC = _mm_add_ps(GAddC, g_XMOne); + // Multiply the two term parts + vResult = _mm_mul_ps(vResult, GAddC); + // Clamp to 0.0 - 1.0f + vResult = _mm_max_ps(vResult, g_XMZero); + vResult = _mm_min_ps(vResult, g_XMOne); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XMScalarNearEqual +( + float S1, + float S2, + float Epsilon +) noexcept +{ + float Delta = S1 - S2; + return (fabsf(Delta) <= Epsilon); +} + +//------------------------------------------------------------------------------ +// Modulo the range of the given angle such that -XM_PI <= Angle < XM_PI +inline float XMScalarModAngle(float Angle) noexcept +{ + // Note: The modulo is performed with unsigned math only to work + // around a precision error on numbers that are close to PI + + // Normalize the range from 0.0f to XM_2PI + Angle = Angle + XM_PI; + // Perform the modulo, unsigned + float fTemp = fabsf(Angle); + fTemp = fTemp - (XM_2PI * static_cast(static_cast(fTemp / XM_2PI))); + // Restore the number to the range of -XM_PI to XM_PI-epsilon + fTemp = fTemp - XM_PI; + // If the modulo'd value was negative, restore negation + if (Angle < 0.0f) + { + fTemp = -fTemp; + } + return fTemp; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarSin(float Value) noexcept +{ + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with sin(y) = sin(Value). + if (y > XM_PIDIV2) + { + y = XM_PI - y; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + } + + // 11-degree minimax approximation + float y2 = y * y; + return (((((-2.3889859e-08f * y2 + 2.7525562e-06f) * y2 - 0.00019840874f) * y2 + 0.0083333310f) * y2 - 0.16666667f) * y2 + 1.0f) * y; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarSinEst(float Value) noexcept +{ + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with sin(y) = sin(Value). + if (y > XM_PIDIV2) + { + y = XM_PI - y; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + } + + // 7-degree minimax approximation + float y2 = y * y; + return (((-0.00018524670f * y2 + 0.0083139502f) * y2 - 0.16665852f) * y2 + 1.0f) * y; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarCos(float Value) noexcept +{ + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with cos(y) = sign*cos(x). + float sign; + if (y > XM_PIDIV2) + { + y = XM_PI - y; + sign = -1.0f; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + sign = -1.0f; + } + else + { + sign = +1.0f; + } + + // 10-degree minimax approximation + float y2 = y * y; + float p = ((((-2.6051615e-07f * y2 + 2.4760495e-05f) * y2 - 0.0013888378f) * y2 + 0.041666638f) * y2 - 0.5f) * y2 + 1.0f; + return sign * p; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarCosEst(float Value) noexcept +{ + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with cos(y) = sign*cos(x). + float sign; + if (y > XM_PIDIV2) + { + y = XM_PI - y; + sign = -1.0f; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + sign = -1.0f; + } + else + { + sign = +1.0f; + } + + // 6-degree minimax approximation + float y2 = y * y; + float p = ((-0.0012712436f * y2 + 0.041493919f) * y2 - 0.49992746f) * y2 + 1.0f; + return sign * p; +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XMScalarSinCos +( + float* pSin, + float* pCos, + float Value +) noexcept +{ + assert(pSin); + assert(pCos); + + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with sin(y) = sin(Value). + float sign; + if (y > XM_PIDIV2) + { + y = XM_PI - y; + sign = -1.0f; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + sign = -1.0f; + } + else + { + sign = +1.0f; + } + + float y2 = y * y; + + // 11-degree minimax approximation + *pSin = (((((-2.3889859e-08f * y2 + 2.7525562e-06f) * y2 - 0.00019840874f) * y2 + 0.0083333310f) * y2 - 0.16666667f) * y2 + 1.0f) * y; + + // 10-degree minimax approximation + float p = ((((-2.6051615e-07f * y2 + 2.4760495e-05f) * y2 - 0.0013888378f) * y2 + 0.041666638f) * y2 - 0.5f) * y2 + 1.0f; + *pCos = sign * p; +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XMScalarSinCosEst +( + float* pSin, + float* pCos, + float Value +) noexcept +{ + assert(pSin); + assert(pCos); + + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with sin(y) = sin(Value). + float sign; + if (y > XM_PIDIV2) + { + y = XM_PI - y; + sign = -1.0f; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + sign = -1.0f; + } + else + { + sign = +1.0f; + } + + float y2 = y * y; + + // 7-degree minimax approximation + *pSin = (((-0.00018524670f * y2 + 0.0083139502f) * y2 - 0.16665852f) * y2 + 1.0f) * y; + + // 6-degree minimax approximation + float p = ((-0.0012712436f * y2 + 0.041493919f) * y2 - 0.49992746f) * y2 + 1.0f; + *pCos = sign * p; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarASin(float Value) noexcept +{ + // Clamp input to [-1,1]. + bool nonnegative = (Value >= 0.0f); + float x = fabsf(Value); + float omx = 1.0f - x; + if (omx < 0.0f) + { + omx = 0.0f; + } + float root = sqrtf(omx); + + // 7-degree minimax approximation + float result = ((((((-0.0012624911f * x + 0.0066700901f) * x - 0.0170881256f) * x + 0.0308918810f) * x - 0.0501743046f) * x + 0.0889789874f) * x - 0.2145988016f) * x + 1.5707963050f; + result *= root; // acos(|x|) + + // acos(x) = pi - acos(-x) when x < 0, asin(x) = pi/2 - acos(x) + return (nonnegative ? XM_PIDIV2 - result : result - XM_PIDIV2); +} + +//------------------------------------------------------------------------------ + +inline float XMScalarASinEst(float Value) noexcept +{ + // Clamp input to [-1,1]. + bool nonnegative = (Value >= 0.0f); + float x = fabsf(Value); + float omx = 1.0f - x; + if (omx < 0.0f) + { + omx = 0.0f; + } + float root = sqrtf(omx); + + // 3-degree minimax approximation + float result = ((-0.0187293f * x + 0.0742610f) * x - 0.2121144f) * x + 1.5707288f; + result *= root; // acos(|x|) + + // acos(x) = pi - acos(-x) when x < 0, asin(x) = pi/2 - acos(x) + return (nonnegative ? XM_PIDIV2 - result : result - XM_PIDIV2); +} + +//------------------------------------------------------------------------------ + +inline float XMScalarACos(float Value) noexcept +{ + // Clamp input to [-1,1]. + bool nonnegative = (Value >= 0.0f); + float x = fabsf(Value); + float omx = 1.0f - x; + if (omx < 0.0f) + { + omx = 0.0f; + } + float root = sqrtf(omx); + + // 7-degree minimax approximation + float result = ((((((-0.0012624911f * x + 0.0066700901f) * x - 0.0170881256f) * x + 0.0308918810f) * x - 0.0501743046f) * x + 0.0889789874f) * x - 0.2145988016f) * x + 1.5707963050f; + result *= root; + + // acos(x) = pi - acos(-x) when x < 0 + return (nonnegative ? result : XM_PI - result); +} + +//------------------------------------------------------------------------------ + +inline float XMScalarACosEst(float Value) noexcept +{ + // Clamp input to [-1,1]. + bool nonnegative = (Value >= 0.0f); + float x = fabsf(Value); + float omx = 1.0f - x; + if (omx < 0.0f) + { + omx = 0.0f; + } + float root = sqrtf(omx); + + // 3-degree minimax approximation + float result = ((-0.0187293f * x + 0.0742610f) * x - 0.2121144f) * x + 1.5707288f; + result *= root; + + // acos(x) = pi - acos(-x) when x < 0 + return (nonnegative ? result : XM_PI - result); +} + diff --git a/include/directxmath/directxmathvector.inl b/include/directxmath/directxmathvector.inl new file mode 100644 index 0000000..f76d597 --- /dev/null +++ b/include/directxmath/directxmathvector.inl @@ -0,0 +1,14689 @@ +//------------------------------------------------------------------------------------- +// DirectXMathVector.inl -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +#if defined(_XM_NO_INTRINSICS_) +#define XMISNAN(x) isnan(x) +#define XMISINF(x) isinf(x) +#endif + +#if defined(_XM_SSE_INTRINSICS_) + +#define XM3UNPACK3INTO4(l1, l2, l3) \ + XMVECTOR V3 = _mm_shuffle_ps(l2, l3, _MM_SHUFFLE(0, 0, 3, 2));\ + XMVECTOR V2 = _mm_shuffle_ps(l2, l1, _MM_SHUFFLE(3, 3, 1, 0));\ + V2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 0, 2));\ + XMVECTOR V4 = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(L3), 32 / 8)) + +#define XM3PACK4INTO3(v2x) \ + v2x = _mm_shuffle_ps(V2, V3, _MM_SHUFFLE(1, 0, 2, 1));\ + V2 = _mm_shuffle_ps(V2, V1, _MM_SHUFFLE(2, 2, 0, 0));\ + V1 = _mm_shuffle_ps(V1, V2, _MM_SHUFFLE(0, 2, 1, 0));\ + V3 = _mm_shuffle_ps(V3, V4, _MM_SHUFFLE(0, 0, 2, 2));\ + V3 = _mm_shuffle_ps(V3, V4, _MM_SHUFFLE(2, 1, 2, 0)) + +#endif + +/**************************************************************************** + * + * General Vector + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Assignment operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + // Return a vector with all elements equaling zero +inline XMVECTOR XM_CALLCONV XMVectorZero() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { 0.0f, 0.0f, 0.0f, 0.0f } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_f32(0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_setzero_ps(); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with four floating point values +inline XMVECTOR XM_CALLCONV XMVectorSet +( + float x, + float y, + float z, + float w +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { x, y, z, w } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t V0 = vcreate_f32( + static_cast(*reinterpret_cast(&x)) + | (static_cast(*reinterpret_cast(&y)) << 32)); + float32x2_t V1 = vcreate_f32( + static_cast(*reinterpret_cast(&z)) + | (static_cast(*reinterpret_cast(&w)) << 32)); + return vcombine_f32(V0, V1); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_set_ps(w, z, y, x); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with four integer values +inline XMVECTOR XM_CALLCONV XMVectorSetInt +( + uint32_t x, + uint32_t y, + uint32_t z, + uint32_t w +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult = { { { x, y, z, w } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t V0 = vcreate_u32(static_cast(x) | (static_cast(y) << 32)); + uint32x2_t V1 = vcreate_u32(static_cast(z) | (static_cast(w) << 32)); + return vcombine_u32(V0, V1); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_set_epi32(static_cast(w), static_cast(z), static_cast(y), static_cast(x)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with a replicated floating point value +inline XMVECTOR XM_CALLCONV XMVectorReplicate(float Value) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = Value; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_f32(Value); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_set_ps1(Value); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with a replicated floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorReplicatePtr(const float* pValue) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float Value = pValue[0]; + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = Value; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_dup_f32(pValue); +#elif defined(_XM_AVX_INTRINSICS_) + return _mm_broadcast_ss(pValue); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ps1(pValue); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with a replicated integer value +inline XMVECTOR XM_CALLCONV XMVectorReplicateInt(uint32_t Value) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = Value; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(Value); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_set1_epi32(static_cast(Value)); + return _mm_castsi128_ps(vTemp); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with a replicated integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorReplicateIntPtr(const uint32_t* pValue) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t Value = pValue[0]; + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = Value; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_dup_u32(pValue); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ps1(reinterpret_cast(pValue)); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with all bits set (true mask) +inline XMVECTOR XM_CALLCONV XMVectorTrueInt() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult = { { { 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_s32(-1); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_set1_epi32(-1); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with all bits clear (false mask) +inline XMVECTOR XM_CALLCONV XMVectorFalseInt() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { 0.0f, 0.0f, 0.0f, 0.0f } } }; + return vResult; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_setzero_ps(); +#endif +} + +//------------------------------------------------------------------------------ +// Replicate the x component of the vector +inline XMVECTOR XM_CALLCONV XMVectorSplatX(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = V.vector4_f32[0]; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_lane_f32(vget_low_f32(V), 0); +#elif defined(_XM_AVX2_INTRINSICS_) && defined(_XM_FAVOR_INTEL_) + return _mm_broadcastss_ps(V); +#elif defined(_XM_SSE_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); +#endif +} + +//------------------------------------------------------------------------------ +// Replicate the y component of the vector +inline XMVECTOR XM_CALLCONV XMVectorSplatY(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = V.vector4_f32[1]; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_lane_f32(vget_low_f32(V), 1); +#elif defined(_XM_SSE_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); +#endif +} + +//------------------------------------------------------------------------------ +// Replicate the z component of the vector +inline XMVECTOR XM_CALLCONV XMVectorSplatZ(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = V.vector4_f32[2]; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_lane_f32(vget_high_f32(V), 0); +#elif defined(_XM_SSE_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); +#endif +} + +//------------------------------------------------------------------------------ +// Replicate the w component of the vector +inline XMVECTOR XM_CALLCONV XMVectorSplatW(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = V.vector4_f32[3]; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_lane_f32(vget_high_f32(V), 1); +#elif defined(_XM_SSE_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of 1.0f,1.0f,1.0f,1.0f +inline XMVECTOR XM_CALLCONV XMVectorSplatOne() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = 1.0f; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_f32(1.0f); +#elif defined(_XM_SSE_INTRINSICS_) + return g_XMOne; +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of INF,INF,INF,INF +inline XMVECTOR XM_CALLCONV XMVectorSplatInfinity() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = 0x7F800000; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0x7F800000); +#elif defined(_XM_SSE_INTRINSICS_) + return g_XMInfinity; +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of Q_NAN,Q_NAN,Q_NAN,Q_NAN +inline XMVECTOR XM_CALLCONV XMVectorSplatQNaN() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = 0x7FC00000; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0x7FC00000); +#elif defined(_XM_SSE_INTRINSICS_) + return g_XMQNaN; +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of 1.192092896e-7f,1.192092896e-7f,1.192092896e-7f,1.192092896e-7f +inline XMVECTOR XM_CALLCONV XMVectorSplatEpsilon() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = 0x34000000; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0x34000000); +#elif defined(_XM_SSE_INTRINSICS_) + return g_XMEpsilon; +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of -0.0f (0x80000000),-0.0f,-0.0f,-0.0f +inline XMVECTOR XM_CALLCONV XMVectorSplatSignMask() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = 0x80000000U; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0x80000000U); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_set1_epi32(static_cast(0x80000000)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +// Return a floating point value via an index. This is not a recommended +// function to use due to performance loss. +inline float XM_CALLCONV XMVectorGetByIndex(FXMVECTOR V, size_t i) noexcept +{ + assert(i < 4); + _Analysis_assume_(i < 4); +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[i]; +#else + XMVECTORF32 U; + U.v = V; + return U.f[i]; +#endif +} + +//------------------------------------------------------------------------------ +// Return the X component in an FPU register. +inline float XM_CALLCONV XMVectorGetX(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[0]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_f32(V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cvtss_f32(V); +#endif +} + +// Return the Y component in an FPU register. +inline float XM_CALLCONV XMVectorGetY(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_f32(V, 1); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + return _mm_cvtss_f32(vTemp); +#endif +} + +// Return the Z component in an FPU register. +inline float XM_CALLCONV XMVectorGetZ(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_f32(V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + return _mm_cvtss_f32(vTemp); +#endif +} + +// Return the W component in an FPU register. +inline float XM_CALLCONV XMVectorGetW(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_f32(V, 3); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + return _mm_cvtss_f32(vTemp); +#endif +} + +//------------------------------------------------------------------------------ + +// Store a component indexed by i into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetByIndexPtr(float* f, FXMVECTOR V, size_t i) noexcept +{ + assert(f != nullptr); + assert(i < 4); + _Analysis_assume_(i < 4); +#if defined(_XM_NO_INTRINSICS_) + *f = V.vector4_f32[i]; +#else + XMVECTORF32 U; + U.v = V; + *f = U.f[i]; +#endif +} + +//------------------------------------------------------------------------------ + +// Store the X component into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetXPtr(float* x, FXMVECTOR V) noexcept +{ + assert(x != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *x = V.vector4_f32[0]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(x, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ss(x, V); +#endif +} + +// Store the Y component into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetYPtr(float* y, FXMVECTOR V) noexcept +{ + assert(y != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *y = V.vector4_f32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(y, V, 1); +#elif defined(_XM_SSE4_INTRINSICS_) + * (reinterpret_cast(y)) = _mm_extract_ps(V, 1); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + _mm_store_ss(y, vResult); +#endif +} + +// Store the Z component into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetZPtr(float* z, FXMVECTOR V) noexcept +{ + assert(z != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *z = V.vector4_f32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(z, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + * (reinterpret_cast(z)) = _mm_extract_ps(V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(z, vResult); +#endif +} + +// Store the W component into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetWPtr(float* w, FXMVECTOR V) noexcept +{ + assert(w != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *w = V.vector4_f32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(w, V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + * (reinterpret_cast(w)) = _mm_extract_ps(V, 3); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + _mm_store_ss(w, vResult); +#endif +} + +//------------------------------------------------------------------------------ + +// Return an integer value via an index. This is not a recommended +// function to use due to performance loss. +inline uint32_t XM_CALLCONV XMVectorGetIntByIndex(FXMVECTOR V, size_t i) noexcept +{ + assert(i < 4); + _Analysis_assume_(i < 4); +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[i]; +#else + XMVECTORU32 U; + U.v = V; + return U.u[i]; +#endif +} + +//------------------------------------------------------------------------------ + +// Return the X component in an integer register. +inline uint32_t XM_CALLCONV XMVectorGetIntX(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[0]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_u32(V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + return static_cast(_mm_cvtsi128_si32(_mm_castps_si128(V))); +#endif +} + +// Return the Y component in an integer register. +inline uint32_t XM_CALLCONV XMVectorGetIntY(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_u32(V, 1); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + return static_cast(_mm_extract_epi32(V1, 1)); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(1, 1, 1, 1)); + return static_cast(_mm_cvtsi128_si32(vResulti)); +#endif +} + +// Return the Z component in an integer register. +inline uint32_t XM_CALLCONV XMVectorGetIntZ(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_u32(V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + return static_cast(_mm_extract_epi32(V1, 2)); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(2, 2, 2, 2)); + return static_cast(_mm_cvtsi128_si32(vResulti)); +#endif +} + +// Return the W component in an integer register. +inline uint32_t XM_CALLCONV XMVectorGetIntW(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_u32(V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + return static_cast(_mm_extract_epi32(V1, 3)); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(3, 3, 3, 3)); + return static_cast(_mm_cvtsi128_si32(vResulti)); +#endif +} + +//------------------------------------------------------------------------------ + +// Store a component indexed by i into a 32 bit integer location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntByIndexPtr(uint32_t* x, FXMVECTOR V, size_t i) noexcept +{ + assert(x != nullptr); + assert(i < 4); + _Analysis_assume_(i < 4); +#if defined(_XM_NO_INTRINSICS_) + *x = V.vector4_u32[i]; +#else + XMVECTORU32 U; + U.v = V; + *x = U.u[i]; +#endif +} + +//------------------------------------------------------------------------------ + +// Store the X component into a 32 bit integer location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntXPtr(uint32_t* x, FXMVECTOR V) noexcept +{ + assert(x != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *x = V.vector4_u32[0]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(x, *reinterpret_cast(&V), 0); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ss(reinterpret_cast(x), V); +#endif +} + +// Store the Y component into a 32 bit integer location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntYPtr(uint32_t* y, FXMVECTOR V) noexcept +{ + assert(y != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *y = V.vector4_u32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(y, *reinterpret_cast(&V), 1); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + *y = static_cast(_mm_extract_epi32(V1, 1)); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + _mm_store_ss(reinterpret_cast(y), vResult); +#endif +} + +// Store the Z component into a 32 bit integer locaCantion in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntZPtr(uint32_t* z, FXMVECTOR V) noexcept +{ + assert(z != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *z = V.vector4_u32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(z, *reinterpret_cast(&V), 2); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + *z = static_cast(_mm_extract_epi32(V1, 2)); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(z), vResult); +#endif +} + +// Store the W component into a 32 bit integer location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntWPtr(uint32_t* w, FXMVECTOR V) noexcept +{ + assert(w != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *w = V.vector4_u32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(w, *reinterpret_cast(&V), 3); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + *w = static_cast(_mm_extract_epi32(V1, 3)); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + _mm_store_ss(reinterpret_cast(w), vResult); +#endif +} + +//------------------------------------------------------------------------------ + +// Set a single indexed floating point component +inline XMVECTOR XM_CALLCONV XMVectorSetByIndex(FXMVECTOR V, float f, size_t i) noexcept +{ + assert(i < 4); + _Analysis_assume_(i < 4); + XMVECTORF32 U; + U.v = V; + U.f[i] = f; + return U.v; +} + +//------------------------------------------------------------------------------ + +// Sets the X component of a vector to a passed floating point value +inline XMVECTOR XM_CALLCONV XMVectorSetX(FXMVECTOR V, float x) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + x, + V.vector4_f32[1], + V.vector4_f32[2], + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_f32(x, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_set_ss(x); + vResult = _mm_move_ss(V, vResult); + return vResult; +#endif +} + +// Sets the Y component of a vector to a passed floating point value +inline XMVECTOR XM_CALLCONV XMVectorSetY(FXMVECTOR V, float y) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + y, + V.vector4_f32[2], + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_f32(y, V, 1); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vResult = _mm_set_ss(y); + vResult = _mm_insert_ps(V, vResult, 0x10); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Swap y and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + // Convert input to vector + XMVECTOR vTemp = _mm_set_ss(y); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap y and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 2, 0, 1)); + return vResult; +#endif +} +// Sets the Z component of a vector to a passed floating point value +inline XMVECTOR XM_CALLCONV XMVectorSetZ(FXMVECTOR V, float z) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + V.vector4_f32[1], + z, + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_f32(z, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vResult = _mm_set_ss(z); + vResult = _mm_insert_ps(V, vResult, 0x20); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Swap z and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 0, 1, 2)); + // Convert input to vector + XMVECTOR vTemp = _mm_set_ss(z); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap z and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 0, 1, 2)); + return vResult; +#endif +} + +// Sets the W component of a vector to a passed floating point value +inline XMVECTOR XM_CALLCONV XMVectorSetW(FXMVECTOR V, float w) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + V.vector4_f32[1], + V.vector4_f32[2], + w + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_f32(w, V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vResult = _mm_set_ss(w); + vResult = _mm_insert_ps(V, vResult, 0x30); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Swap w and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 2, 1, 3)); + // Convert input to vector + XMVECTOR vTemp = _mm_set_ss(w); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap w and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 2, 1, 3)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +// Sets a component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetByIndexPtr(FXMVECTOR V, const float* f, size_t i) noexcept +{ + assert(f != nullptr); + assert(i < 4); + _Analysis_assume_(i < 4); + XMVECTORF32 U; + U.v = V; + U.f[i] = *f; + return U.v; +} + +//------------------------------------------------------------------------------ + +// Sets the X component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetXPtr(FXMVECTOR V, const float* x) noexcept +{ + assert(x != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + *x, + V.vector4_f32[1], + V.vector4_f32[2], + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_f32(x, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_load_ss(x); + vResult = _mm_move_ss(V, vResult); + return vResult; +#endif +} + +// Sets the Y component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetYPtr(FXMVECTOR V, const float* y) noexcept +{ + assert(y != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + *y, + V.vector4_f32[2], + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_f32(y, V, 1); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap y and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(y); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap y and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 2, 0, 1)); + return vResult; +#endif +} + +// Sets the Z component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetZPtr(FXMVECTOR V, const float* z) noexcept +{ + assert(z != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + V.vector4_f32[1], + *z, + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_f32(z, V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap z and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 0, 1, 2)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(z); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap z and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 0, 1, 2)); + return vResult; +#endif +} + +// Sets the W component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetWPtr(FXMVECTOR V, const float* w) noexcept +{ + assert(w != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + V.vector4_f32[1], + V.vector4_f32[2], + *w + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_f32(w, V, 3); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap w and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 2, 1, 3)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(w); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap w and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 2, 1, 3)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +// Sets a component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntByIndex(FXMVECTOR V, uint32_t x, size_t i) noexcept +{ + assert(i < 4); + _Analysis_assume_(i < 4); + XMVECTORU32 tmp; + tmp.v = V; + tmp.u[i] = x; + return tmp; +} + +//------------------------------------------------------------------------------ + +// Sets the X component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntX(FXMVECTOR V, uint32_t x) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + x, + V.vector4_u32[1], + V.vector4_u32[2], + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_u32(x, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cvtsi32_si128(static_cast(x)); + XMVECTOR vResult = _mm_move_ss(V, _mm_castsi128_ps(vTemp)); + return vResult; +#endif +} + +// Sets the Y component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntY(FXMVECTOR V, uint32_t y) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + y, + V.vector4_u32[2], + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_u32(y, V, 1); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i vResult = _mm_castps_si128(V); + vResult = _mm_insert_epi32(vResult, static_cast(y), 1); + return _mm_castsi128_ps(vResult); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap y and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + // Convert input to vector + __m128i vTemp = _mm_cvtsi32_si128(static_cast(y)); + // Replace the x component + vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp)); + // Swap y and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 2, 0, 1)); + return vResult; +#endif +} + +// Sets the Z component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntZ(FXMVECTOR V, uint32_t z) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + V.vector4_u32[1], + z, + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_u32(z, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i vResult = _mm_castps_si128(V); + vResult = _mm_insert_epi32(vResult, static_cast(z), 2); + return _mm_castsi128_ps(vResult); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap z and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 0, 1, 2)); + // Convert input to vector + __m128i vTemp = _mm_cvtsi32_si128(static_cast(z)); + // Replace the x component + vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp)); + // Swap z and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 0, 1, 2)); + return vResult; +#endif +} + +// Sets the W component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntW(FXMVECTOR V, uint32_t w) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + V.vector4_u32[1], + V.vector4_u32[2], + w + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_u32(w, V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i vResult = _mm_castps_si128(V); + vResult = _mm_insert_epi32(vResult, static_cast(w), 3); + return _mm_castsi128_ps(vResult); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap w and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 2, 1, 3)); + // Convert input to vector + __m128i vTemp = _mm_cvtsi32_si128(static_cast(w)); + // Replace the x component + vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp)); + // Swap w and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 2, 1, 3)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +// Sets a component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntByIndexPtr(FXMVECTOR V, const uint32_t* x, size_t i) noexcept +{ + assert(x != nullptr); + assert(i < 4); + _Analysis_assume_(i < 4); + XMVECTORU32 tmp; + tmp.v = V; + tmp.u[i] = *x; + return tmp; +} + +//------------------------------------------------------------------------------ + +// Sets the X component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntXPtr(FXMVECTOR V, const uint32_t* x) noexcept +{ + assert(x != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + *x, + V.vector4_u32[1], + V.vector4_u32[2], + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_u32(x, *reinterpret_cast(&V), 0); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_load_ss(reinterpret_cast(x)); + XMVECTOR vResult = _mm_move_ss(V, vTemp); + return vResult; +#endif +} + +// Sets the Y component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntYPtr(FXMVECTOR V, const uint32_t* y) noexcept +{ + assert(y != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + *y, + V.vector4_u32[2], + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_u32(y, *reinterpret_cast(&V), 1); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap y and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(reinterpret_cast(y)); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap y and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 2, 0, 1)); + return vResult; +#endif +} + +// Sets the Z component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntZPtr(FXMVECTOR V, const uint32_t* z) noexcept +{ + assert(z != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + V.vector4_u32[1], + *z, + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_u32(z, *reinterpret_cast(&V), 2); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap z and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 0, 1, 2)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(reinterpret_cast(z)); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap z and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 0, 1, 2)); + return vResult; +#endif +} + +// Sets the W component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntWPtr(FXMVECTOR V, const uint32_t* w) noexcept +{ + assert(w != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + V.vector4_u32[1], + V.vector4_u32[2], + *w + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_u32(w, *reinterpret_cast(&V), 3); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap w and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 2, 1, 3)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(reinterpret_cast(w)); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap w and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 2, 1, 3)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSwizzle +( + FXMVECTOR V, + uint32_t E0, + uint32_t E1, + uint32_t E2, + uint32_t E3 +) noexcept +{ + assert((E0 < 4) && (E1 < 4) && (E2 < 4) && (E3 < 4)); + _Analysis_assume_((E0 < 4) && (E1 < 4) && (E2 < 4) && (E3 < 4)); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + V.vector4_f32[E0], + V.vector4_f32[E1], + V.vector4_f32[E2], + V.vector4_f32[E3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const uint32_t ControlElement[4] = + { + 0x03020100, // XM_SWIZZLE_X + 0x07060504, // XM_SWIZZLE_Y + 0x0B0A0908, // XM_SWIZZLE_Z + 0x0F0E0D0C, // XM_SWIZZLE_W + }; + + uint8x8x2_t tbl; + tbl.val[0] = vget_low_f32(V); + tbl.val[1] = vget_high_f32(V); + + uint32x2_t idx = vcreate_u32(static_cast(ControlElement[E0]) | (static_cast(ControlElement[E1]) << 32)); + const uint8x8_t rL = vtbl2_u8(tbl, vreinterpret_u8_u32(idx)); + + idx = vcreate_u32(static_cast(ControlElement[E2]) | (static_cast(ControlElement[E3]) << 32)); + const uint8x8_t rH = vtbl2_u8(tbl, vreinterpret_u8_u32(idx)); + + return vcombine_f32(rL, rH); +#elif defined(_XM_AVX_INTRINSICS_) + unsigned int elem[4] = { E0, E1, E2, E3 }; + __m128i vControl = _mm_loadu_si128(reinterpret_cast(&elem[0])); + return _mm_permutevar_ps(V, vControl); +#else + auto aPtr = reinterpret_cast(&V); + + XMVECTOR Result; + auto pWork = reinterpret_cast(&Result); + + pWork[0] = aPtr[E0]; + pWork[1] = aPtr[E1]; + pWork[2] = aPtr[E2]; + pWork[3] = aPtr[E3]; + + return Result; +#endif +} + +//------------------------------------------------------------------------------ +inline XMVECTOR XM_CALLCONV XMVectorPermute +( + FXMVECTOR V1, + FXMVECTOR V2, + uint32_t PermuteX, + uint32_t PermuteY, + uint32_t PermuteZ, + uint32_t PermuteW +) noexcept +{ + assert(PermuteX <= 7 && PermuteY <= 7 && PermuteZ <= 7 && PermuteW <= 7); + _Analysis_assume_(PermuteX <= 7 && PermuteY <= 7 && PermuteZ <= 7 && PermuteW <= 7); + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + static const uint32_t ControlElement[8] = + { + 0x03020100, // XM_PERMUTE_0X + 0x07060504, // XM_PERMUTE_0Y + 0x0B0A0908, // XM_PERMUTE_0Z + 0x0F0E0D0C, // XM_PERMUTE_0W + 0x13121110, // XM_PERMUTE_1X + 0x17161514, // XM_PERMUTE_1Y + 0x1B1A1918, // XM_PERMUTE_1Z + 0x1F1E1D1C, // XM_PERMUTE_1W + }; + + uint8x8x4_t tbl; + tbl.val[0] = vget_low_f32(V1); + tbl.val[1] = vget_high_f32(V1); + tbl.val[2] = vget_low_f32(V2); + tbl.val[3] = vget_high_f32(V2); + + uint32x2_t idx = vcreate_u32(static_cast(ControlElement[PermuteX]) | (static_cast(ControlElement[PermuteY]) << 32)); + const uint8x8_t rL = vtbl4_u8(tbl, vreinterpret_u8_u32(idx)); + + idx = vcreate_u32(static_cast(ControlElement[PermuteZ]) | (static_cast(ControlElement[PermuteW]) << 32)); + const uint8x8_t rH = vtbl4_u8(tbl, vreinterpret_u8_u32(idx)); + + return vcombine_f32(rL, rH); +#elif defined(_XM_AVX_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + static const XMVECTORU32 three = { { { 3, 3, 3, 3 } } }; + + XM_ALIGNED_DATA(16) unsigned int elem[4] = { PermuteX, PermuteY, PermuteZ, PermuteW }; + __m128i vControl = _mm_load_si128(reinterpret_cast(&elem[0])); + + __m128i vSelect = _mm_cmpgt_epi32(vControl, three); + vControl = _mm_castps_si128(_mm_and_ps(_mm_castsi128_ps(vControl), three)); + + __m128 shuffled1 = _mm_permutevar_ps(V1, vControl); + __m128 shuffled2 = _mm_permutevar_ps(V2, vControl); + + __m128 masked1 = _mm_andnot_ps(_mm_castsi128_ps(vSelect), shuffled1); + __m128 masked2 = _mm_and_ps(_mm_castsi128_ps(vSelect), shuffled2); + + return _mm_or_ps(masked1, masked2); +#else + + const uint32_t* aPtr[2]; + aPtr[0] = reinterpret_cast(&V1); + aPtr[1] = reinterpret_cast(&V2); + + XMVECTOR Result; + auto pWork = reinterpret_cast(&Result); + + const uint32_t i0 = PermuteX & 3; + const uint32_t vi0 = PermuteX >> 2; + pWork[0] = aPtr[vi0][i0]; + + const uint32_t i1 = PermuteY & 3; + const uint32_t vi1 = PermuteY >> 2; + pWork[1] = aPtr[vi1][i1]; + + const uint32_t i2 = PermuteZ & 3; + const uint32_t vi2 = PermuteZ >> 2; + pWork[2] = aPtr[vi2][i2]; + + const uint32_t i3 = PermuteW & 3; + const uint32_t vi3 = PermuteW >> 2; + pWork[3] = aPtr[vi3][i3]; + + return Result; +#endif +} + +//------------------------------------------------------------------------------ +// Define a control vector to be used in XMVectorSelect +// operations. The four integers specified in XMVectorSelectControl +// serve as indices to select between components in two vectors. +// The first index controls selection for the first component of +// the vectors involved in a select operation, the second index +// controls selection for the second component etc. A value of +// zero for an index causes the corresponding component from the first +// vector to be selected whereas a one causes the component from the +// second vector to be selected instead. + +inline XMVECTOR XM_CALLCONV XMVectorSelectControl +( + uint32_t VectorIndex0, + uint32_t VectorIndex1, + uint32_t VectorIndex2, + uint32_t VectorIndex3 +) noexcept +{ +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + // x=Index0,y=Index1,z=Index2,w=Index3 + __m128i vTemp = _mm_set_epi32(static_cast(VectorIndex3), static_cast(VectorIndex2), static_cast(VectorIndex1), static_cast(VectorIndex0)); + // Any non-zero entries become 0xFFFFFFFF else 0 + vTemp = _mm_cmpgt_epi32(vTemp, g_XMZero); + return _mm_castsi128_ps(vTemp); +#elif defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + int32x2_t V0 = vcreate_s32(static_cast(VectorIndex0) | (static_cast(VectorIndex1) << 32)); + int32x2_t V1 = vcreate_s32(static_cast(VectorIndex2) | (static_cast(VectorIndex3) << 32)); + int32x4_t vTemp = vcombine_s32(V0, V1); + // Any non-zero entries become 0xFFFFFFFF else 0 + return vcgtq_s32(vTemp, g_XMZero); +#else + XMVECTOR ControlVector; + const uint32_t ControlElement[] = + { + XM_SELECT_0, + XM_SELECT_1 + }; + + assert(VectorIndex0 < 2); + assert(VectorIndex1 < 2); + assert(VectorIndex2 < 2); + assert(VectorIndex3 < 2); + _Analysis_assume_(VectorIndex0 < 2); + _Analysis_assume_(VectorIndex1 < 2); + _Analysis_assume_(VectorIndex2 < 2); + _Analysis_assume_(VectorIndex3 < 2); + + ControlVector.vector4_u32[0] = ControlElement[VectorIndex0]; + ControlVector.vector4_u32[1] = ControlElement[VectorIndex1]; + ControlVector.vector4_u32[2] = ControlElement[VectorIndex2]; + ControlVector.vector4_u32[3] = ControlElement[VectorIndex3]; + + return ControlVector; + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSelect +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Control +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + (V1.vector4_u32[0] & ~Control.vector4_u32[0]) | (V2.vector4_u32[0] & Control.vector4_u32[0]), + (V1.vector4_u32[1] & ~Control.vector4_u32[1]) | (V2.vector4_u32[1] & Control.vector4_u32[1]), + (V1.vector4_u32[2] & ~Control.vector4_u32[2]) | (V2.vector4_u32[2] & Control.vector4_u32[2]), + (V1.vector4_u32[3] & ~Control.vector4_u32[3]) | (V2.vector4_u32[3] & Control.vector4_u32[3]), + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vbslq_f32(Control, V2, V1); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp1 = _mm_andnot_ps(Control, V1); + XMVECTOR vTemp2 = _mm_and_ps(V2, Control); + return _mm_or_ps(vTemp1, vTemp2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMergeXY +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0], + V2.vector4_u32[0], + V1.vector4_u32[1], + V2.vector4_u32[1], + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vzipq_f32(V1, V2).val[0]; +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_unpacklo_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMergeZW +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[2], + V2.vector4_u32[2], + V1.vector4_u32[3], + V2.vector4_u32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vzipq_f32(V1, V2).val[1]; +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_unpackhi_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2, uint32_t Elements) noexcept +{ + assert(Elements < 4); + _Analysis_assume_(Elements < 4); + return XMVectorPermute(V1, V2, Elements, ((Elements)+1), ((Elements)+2), ((Elements)+3)); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V, uint32_t Elements) noexcept +{ + assert(Elements < 4); + _Analysis_assume_(Elements < 4); + return XMVectorSwizzle(V, Elements & 3, (Elements + 1) & 3, (Elements + 2) & 3, (Elements + 3) & 3); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V, uint32_t Elements) noexcept +{ + assert(Elements < 4); + _Analysis_assume_(Elements < 4); + return XMVectorSwizzle(V, (4 - (Elements)) & 3, (5 - (Elements)) & 3, (6 - (Elements)) & 3, (7 - (Elements)) & 3); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorInsert( + FXMVECTOR VD, FXMVECTOR VS, + uint32_t VSLeftRotateElements, + uint32_t Select0, uint32_t Select1, uint32_t Select2, uint32_t Select3) noexcept +{ + XMVECTOR Control = XMVectorSelectControl(Select0 & 1, Select1 & 1, Select2 & 1, Select3 & 1); + return XMVectorSelect(VD, XMVectorRotateLeft(VS, VSLeftRotateElements), Control); +} + +//------------------------------------------------------------------------------ +// Comparison operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] == V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] == V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] == V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] == V2.vector4_f32[3]) ? 0xFFFFFFFF : 0, + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vceqq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmpeq_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorEqualR +( + uint32_t* pCR, + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + uint32_t ux = (V1.vector4_f32[0] == V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0; + uint32_t uy = (V1.vector4_f32[1] == V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0; + uint32_t uz = (V1.vector4_f32[2] == V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0; + uint32_t uw = (V1.vector4_f32[3] == V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0; + uint32_t CR = 0; + if (ux & uy & uz & uw) + { + // All elements are greater + CR = XM_CRMASK_CR6TRUE; + } + else if (!(ux | uy | uz | uw)) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + + XMVECTORU32 Control = { { { ux, uy, uz, uw } } }; + return Control; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are equal + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + // All elements are not equal + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0xf) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ +// Treat the components of the vectors as unsigned integers and +// compare individual bits between the two. This is useful for +// comparing control vectors and result vectors returned from +// other comparison operations. + +inline XMVECTOR XM_CALLCONV XMVectorEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_u32[0] == V2.vector4_u32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_u32[1] == V2.vector4_u32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_u32[2] == V2.vector4_u32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_u32[3] == V2.vector4_u32[3]) ? 0xFFFFFFFF : 0, + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vceqq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorEqualIntR +( + uint32_t* pCR, + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Control = XMVectorEqualInt(V1, V2); + + *pCR = 0; + if (XMVector4EqualInt(Control, XMVectorTrueInt())) + { + // All elements are equal + *pCR |= XM_CRMASK_CR6TRUE; + } + else if (XMVector4EqualInt(Control, XMVectorFalseInt())) + { + // All elements are not equal + *pCR |= XM_CRMASK_CR6FALSE; + } + return Control; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are equal + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + // All elements are not equal + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + int iTemp = _mm_movemask_ps(_mm_castsi128_ps(V)); + uint32_t CR = 0; + if (iTemp == 0x0F) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTemp) + { + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNearEqual +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Epsilon +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fDeltax = V1.vector4_f32[0] - V2.vector4_f32[0]; + float fDeltay = V1.vector4_f32[1] - V2.vector4_f32[1]; + float fDeltaz = V1.vector4_f32[2] - V2.vector4_f32[2]; + float fDeltaw = V1.vector4_f32[3] - V2.vector4_f32[3]; + + fDeltax = fabsf(fDeltax); + fDeltay = fabsf(fDeltay); + fDeltaz = fabsf(fDeltaz); + fDeltaw = fabsf(fDeltaw); + + XMVECTORU32 Control = { { { + (fDeltax <= Epsilon.vector4_f32[0]) ? 0xFFFFFFFFU : 0, + (fDeltay <= Epsilon.vector4_f32[1]) ? 0xFFFFFFFFU : 0, + (fDeltaz <= Epsilon.vector4_f32[2]) ? 0xFFFFFFFFU : 0, + (fDeltaw <= Epsilon.vector4_f32[3]) ? 0xFFFFFFFFU : 0, + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vDelta = vsubq_f32(V1, V2); +#ifdef _MSC_VER + return vacleq_f32(vDelta, Epsilon); +#else + return vcleq_f32(vabsq_f32(vDelta), Epsilon); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + // Get the difference + XMVECTOR vDelta = _mm_sub_ps(V1, V2); + // Get the absolute value of the difference + XMVECTOR vTemp = _mm_setzero_ps(); + vTemp = _mm_sub_ps(vTemp, vDelta); + vTemp = _mm_max_ps(vTemp, vDelta); + vTemp = _mm_cmple_ps(vTemp, Epsilon); + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNotEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] != V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] != V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] != V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] != V2.vector4_f32[3]) ? 0xFFFFFFFF : 0, + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmvnq_u32(vceqq_f32(V1, V2)); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmpneq_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNotEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_u32[0] != V2.vector4_u32[0]) ? 0xFFFFFFFFU : 0, + (V1.vector4_u32[1] != V2.vector4_u32[1]) ? 0xFFFFFFFFU : 0, + (V1.vector4_u32[2] != V2.vector4_u32[2]) ? 0xFFFFFFFFU : 0, + (V1.vector4_u32[3] != V2.vector4_u32[3]) ? 0xFFFFFFFFU : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmvnq_u32(vceqq_u32(V1, V2)); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return _mm_xor_ps(_mm_castsi128_ps(V), g_XMNegOneMask); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorGreater +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] > V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] > V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] > V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] > V2.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vcgtq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmpgt_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorGreaterR +( + uint32_t* pCR, + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + + uint32_t ux = (V1.vector4_f32[0] > V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0; + uint32_t uy = (V1.vector4_f32[1] > V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0; + uint32_t uz = (V1.vector4_f32[2] > V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0; + uint32_t uw = (V1.vector4_f32[3] > V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0; + uint32_t CR = 0; + if (ux & uy & uz & uw) + { + // All elements are greater + CR = XM_CRMASK_CR6TRUE; + } + else if (!(ux | uy | uz | uw)) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + + XMVECTORU32 Control = { { { ux, uy, uz, uw } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are greater + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0xf) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorGreaterOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] >= V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] >= V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] >= V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] >= V2.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vcgeq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmpge_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorGreaterOrEqualR +( + uint32_t* pCR, + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + + uint32_t ux = (V1.vector4_f32[0] >= V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0; + uint32_t uy = (V1.vector4_f32[1] >= V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0; + uint32_t uz = (V1.vector4_f32[2] >= V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0; + uint32_t uw = (V1.vector4_f32[3] >= V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0; + uint32_t CR = 0; + if (ux & uy & uz & uw) + { + // All elements are greater + CR = XM_CRMASK_CR6TRUE; + } + else if (!(ux | uy | uz | uw)) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + + XMVECTORU32 Control = { { { ux, uy, uz, uw } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are greater or equal + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + // All elements are not greater or equal + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0xf) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLess +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] < V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] < V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] < V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] < V2.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vcltq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmplt_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLessOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] <= V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] <= V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] <= V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] <= V2.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vcleq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmple_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorInBounds +( + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = vcleq_f32(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = vnegq_f32(Bounds); + // Test if greater or equal (Reversed) + vTemp2 = vcleq_f32(vTemp2, V); + // Blend answers + vTemp1 = vandq_u32(vTemp1, vTemp2); + return vTemp1; +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + return vTemp1; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorInBoundsR +( + uint32_t* pCR, + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + + uint32_t ux = (V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) ? 0xFFFFFFFFU : 0; + uint32_t uy = (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) ? 0xFFFFFFFFU : 0; + uint32_t uz = (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) ? 0xFFFFFFFFU : 0; + uint32_t uw = (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3]) ? 0xFFFFFFFFU : 0; + + uint32_t CR = 0; + if (ux & uy & uz & uw) + { + // All elements are in bounds + CR = XM_CRMASK_CR6BOUNDS; + } + *pCR = CR; + + XMVECTORU32 Control = { { { ux, uy, uz, uw } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = vcleq_f32(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = vnegq_f32(Bounds); + // Test if greater or equal (Reversed) + vTemp2 = vcleq_f32(vTemp2, V); + // Blend answers + vTemp1 = vandq_u32(vTemp1, vTemp2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTemp1), vget_high_u8(vTemp1)); + uint16x4x2_t vTemp3 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp3.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are in bounds + CR = XM_CRMASK_CR6BOUNDS; + } + *pCR = CR; + return vTemp1; +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + + uint32_t CR = 0; + if (_mm_movemask_ps(vTemp1) == 0xf) + { + // All elements are in bounds + CR = XM_CRMASK_CR6BOUNDS; + } + *pCR = CR; + return vTemp1; +#endif +} + +//------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline XMVECTOR XM_CALLCONV XMVectorIsNaN(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + XMISNAN(V.vector4_f32[0]) ? 0xFFFFFFFFU : 0, + XMISNAN(V.vector4_f32[1]) ? 0xFFFFFFFFU : 0, + XMISNAN(V.vector4_f32[2]) ? 0xFFFFFFFFU : 0, + XMISNAN(V.vector4_f32[3]) ? 0xFFFFFFFFU : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test against itself. NaN is always not equal + uint32x4_t vTempNan = vceqq_f32(V, V); + // Flip results + return vmvnq_u32(vTempNan); +#elif defined(_XM_SSE_INTRINSICS_) + // Test against itself. NaN is always not equal + return _mm_cmpneq_ps(V, V); +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorIsInfinite(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + XMISINF(V.vector4_f32[0]) ? 0xFFFFFFFFU : 0, + XMISINF(V.vector4_f32[1]) ? 0xFFFFFFFFU : 0, + XMISINF(V.vector4_f32[2]) ? 0xFFFFFFFFU : 0, + XMISINF(V.vector4_f32[3]) ? 0xFFFFFFFFU : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bit + uint32x4_t vTemp = vandq_u32(V, g_XMAbsMask); + // Compare to infinity + vTemp = vceqq_f32(vTemp, g_XMInfinity); + // If any are infinity, the signs are true. + return vTemp; +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bit + __m128 vTemp = _mm_and_ps(V, g_XMAbsMask); + // Compare to infinity + vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity); + // If any are infinity, the signs are true. + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ +// Rounding and clamping operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMin +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + (V1.vector4_f32[0] < V2.vector4_f32[0]) ? V1.vector4_f32[0] : V2.vector4_f32[0], + (V1.vector4_f32[1] < V2.vector4_f32[1]) ? V1.vector4_f32[1] : V2.vector4_f32[1], + (V1.vector4_f32[2] < V2.vector4_f32[2]) ? V1.vector4_f32[2] : V2.vector4_f32[2], + (V1.vector4_f32[3] < V2.vector4_f32[3]) ? V1.vector4_f32[3] : V2.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vminq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_min_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMax +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + (V1.vector4_f32[0] > V2.vector4_f32[0]) ? V1.vector4_f32[0] : V2.vector4_f32[0], + (V1.vector4_f32[1] > V2.vector4_f32[1]) ? V1.vector4_f32[1] : V2.vector4_f32[1], + (V1.vector4_f32[2] > V2.vector4_f32[2]) ? V1.vector4_f32[2] : V2.vector4_f32[2], + (V1.vector4_f32[3] > V2.vector4_f32[3]) ? V1.vector4_f32[3] : V2.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmaxq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_max_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +namespace Internal +{ + // Round to nearest (even) a.k.a. banker's rounding + inline float round_to_nearest(float x) + { + float i = floorf(x); + x -= i; + if (x < 0.5f) + return i; + if (x > 0.5f) + return i + 1.f; + + float int_part; + (void)modff(i / 2.f, &int_part); + if ((2.f * int_part) == i) + { + return i; + } + + return i + 1.f; + } +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline XMVECTOR XM_CALLCONV XMVectorRound(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + Internal::round_to_nearest(V.vector4_f32[0]), + Internal::round_to_nearest(V.vector4_f32[1]), + Internal::round_to_nearest(V.vector4_f32[2]), + Internal::round_to_nearest(V.vector4_f32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vrndnq_f32(V); +#else + uint32x4_t sign = vandq_u32(V, g_XMNegativeZero); + uint32x4_t sMagic = vorrq_u32(g_XMNoFraction, sign); + float32x4_t R1 = vaddq_f32(V, sMagic); + R1 = vsubq_f32(R1, sMagic); + float32x4_t R2 = vabsq_f32(V); + uint32x4_t mask = vcleq_f32(R2, g_XMNoFraction); + XMVECTOR vResult = vbslq_f32(mask, R1, V); + return vResult; +#endif +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_round_ps(V, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 sign = _mm_and_ps(V, g_XMNegativeZero); + __m128 sMagic = _mm_or_ps(g_XMNoFraction, sign); + __m128 R1 = _mm_add_ps(V, sMagic); + R1 = _mm_sub_ps(R1, sMagic); + __m128 R2 = _mm_and_ps(V, g_XMAbsMask); + __m128 mask = _mm_cmple_ps(R2, g_XMNoFraction); + R2 = _mm_andnot_ps(mask, V); + R1 = _mm_and_ps(R1, mask); + XMVECTOR vResult = _mm_xor_ps(R1, R2); + return vResult; +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorTruncate(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR Result; + uint32_t i; + + // Avoid C4701 + Result.vector4_f32[0] = 0.0f; + + for (i = 0; i < 4; i++) + { + if (XMISNAN(V.vector4_f32[i])) + { + Result.vector4_u32[i] = 0x7FC00000; + } + else if (fabsf(V.vector4_f32[i]) < 8388608.0f) + { + Result.vector4_f32[i] = static_cast(static_cast(V.vector4_f32[i])); + } + else + { + Result.vector4_f32[i] = V.vector4_f32[i]; + } + } + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vrndq_f32(V); +#else + float32x4_t vTest = vabsq_f32(V); + vTest = vcltq_f32(vTest, g_XMNoFraction); + + int32x4_t vInt = vcvtq_s32_f32(V); + XMVECTOR vResult = vcvtq_f32_s32(vInt); + + // All numbers less than 8388608 will use the round to int + // All others, use the ORIGINAL value + return vbslq_f32(vTest, vResult, V); +#endif +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_round_ps(V, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +#elif defined(_XM_SSE_INTRINSICS_) + // To handle NAN, INF and numbers greater than 8388608, use masking + // Get the abs value + __m128i vTest = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + // Test for greater than 8388608 (All floats with NO fractionals, NAN and INF + vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction); + // Convert to int and back to float for rounding with truncation + __m128i vInt = _mm_cvttps_epi32(V); + // Convert back to floats + XMVECTOR vResult = _mm_cvtepi32_ps(vInt); + // All numbers less than 8388608 will use the round to int + vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest)); + // All others, use the ORIGINAL value + vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V)); + vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorFloor(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + floorf(V.vector4_f32[0]), + floorf(V.vector4_f32[1]), + floorf(V.vector4_f32[2]), + floorf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vrndmq_f32(V); +#else + float32x4_t vTest = vabsq_f32(V); + vTest = vcltq_f32(vTest, g_XMNoFraction); + // Truncate + int32x4_t vInt = vcvtq_s32_f32(V); + XMVECTOR vResult = vcvtq_f32_s32(vInt); + XMVECTOR vLarger = vcgtq_f32(vResult, V); + // 0 -> 0, 0xffffffff -> -1.0f + vLarger = vcvtq_f32_s32(vLarger); + vResult = vaddq_f32(vResult, vLarger); + // All numbers less than 8388608 will use the round to int + // All others, use the ORIGINAL value + return vbslq_f32(vTest, vResult, V); +#endif +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_floor_ps(V); +#elif defined(_XM_SSE_INTRINSICS_) + // To handle NAN, INF and numbers greater than 8388608, use masking + __m128i vTest = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction); + // Truncate + __m128i vInt = _mm_cvttps_epi32(V); + XMVECTOR vResult = _mm_cvtepi32_ps(vInt); + __m128 vLarger = _mm_cmpgt_ps(vResult, V); + // 0 -> 0, 0xffffffff -> -1.0f + vLarger = _mm_cvtepi32_ps(_mm_castps_si128(vLarger)); + vResult = _mm_add_ps(vResult, vLarger); + // All numbers less than 8388608 will use the round to int + vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest)); + // All others, use the ORIGINAL value + vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V)); + vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCeiling(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + ceilf(V.vector4_f32[0]), + ceilf(V.vector4_f32[1]), + ceilf(V.vector4_f32[2]), + ceilf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vrndpq_f32(V); +#else + float32x4_t vTest = vabsq_f32(V); + vTest = vcltq_f32(vTest, g_XMNoFraction); + // Truncate + int32x4_t vInt = vcvtq_s32_f32(V); + XMVECTOR vResult = vcvtq_f32_s32(vInt); + XMVECTOR vSmaller = vcltq_f32(vResult, V); + // 0 -> 0, 0xffffffff -> -1.0f + vSmaller = vcvtq_f32_s32(vSmaller); + vResult = vsubq_f32(vResult, vSmaller); + // All numbers less than 8388608 will use the round to int + // All others, use the ORIGINAL value + return vbslq_f32(vTest, vResult, V); +#endif +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_ceil_ps(V); +#elif defined(_XM_SSE_INTRINSICS_) + // To handle NAN, INF and numbers greater than 8388608, use masking + __m128i vTest = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction); + // Truncate + __m128i vInt = _mm_cvttps_epi32(V); + XMVECTOR vResult = _mm_cvtepi32_ps(vInt); + __m128 vSmaller = _mm_cmplt_ps(vResult, V); + // 0 -> 0, 0xffffffff -> -1.0f + vSmaller = _mm_cvtepi32_ps(_mm_castps_si128(vSmaller)); + vResult = _mm_sub_ps(vResult, vSmaller); + // All numbers less than 8388608 will use the round to int + vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest)); + // All others, use the ORIGINAL value + vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V)); + vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorClamp +( + FXMVECTOR V, + FXMVECTOR Min, + FXMVECTOR Max +) noexcept +{ + assert(XMVector4LessOrEqual(Min, Max)); + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVectorMax(Min, V); + Result = XMVectorMin(Max, Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vResult; + vResult = vmaxq_f32(Min, V); + vResult = vminq_f32(Max, vResult); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult; + vResult = _mm_max_ps(Min, V); + vResult = _mm_min_ps(Max, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSaturate(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + const XMVECTOR Zero = XMVectorZero(); + + return XMVectorClamp(V, Zero, g_XMOne.v); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Set <0 to 0 + XMVECTOR vResult = vmaxq_f32(V, vdupq_n_f32(0)); + // Set>1 to 1 + return vminq_f32(vResult, vdupq_n_f32(1.0f)); +#elif defined(_XM_SSE_INTRINSICS_) + // Set <0 to 0 + XMVECTOR vResult = _mm_max_ps(V, g_XMZero); + // Set>1 to 1 + return _mm_min_ps(vResult, g_XMOne); +#endif +} + +//------------------------------------------------------------------------------ +// Bitwise logical operations +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAndInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0] & V2.vector4_u32[0], + V1.vector4_u32[1] & V2.vector4_u32[1], + V1.vector4_u32[2] & V2.vector4_u32[2], + V1.vector4_u32[3] & V2.vector4_u32[3] + } } }; + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vandq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_and_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAndCInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0] & ~V2.vector4_u32[0], + V1.vector4_u32[1] & ~V2.vector4_u32[1], + V1.vector4_u32[2] & ~V2.vector4_u32[2], + V1.vector4_u32[3] & ~V2.vector4_u32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vbicq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_andnot_si128(_mm_castps_si128(V2), _mm_castps_si128(V1)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorOrInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0] | V2.vector4_u32[0], + V1.vector4_u32[1] | V2.vector4_u32[1], + V1.vector4_u32[2] | V2.vector4_u32[2], + V1.vector4_u32[3] | V2.vector4_u32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vorrq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_or_si128(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNorInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + ~(V1.vector4_u32[0] | V2.vector4_u32[0]), + ~(V1.vector4_u32[1] | V2.vector4_u32[1]), + ~(V1.vector4_u32[2] | V2.vector4_u32[2]), + ~(V1.vector4_u32[3] | V2.vector4_u32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t Result = vorrq_u32(V1, V2); + return vbicq_u32(g_XMNegOneMask, Result); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i Result; + Result = _mm_or_si128(_mm_castps_si128(V1), _mm_castps_si128(V2)); + Result = _mm_andnot_si128(Result, g_XMNegOneMask); + return _mm_castsi128_ps(Result); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorXorInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0] ^ V2.vector4_u32[0], + V1.vector4_u32[1] ^ V2.vector4_u32[1], + V1.vector4_u32[2] ^ V2.vector4_u32[2], + V1.vector4_u32[3] ^ V2.vector4_u32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return veorq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_xor_si128(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNegate(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + -V.vector4_f32[0], + -V.vector4_f32[1], + -V.vector4_f32[2], + -V.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vnegq_f32(V); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR Z; + + Z = _mm_setzero_ps(); + + return _mm_sub_ps(Z, V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAdd +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + V1.vector4_f32[0] + V2.vector4_f32[0], + V1.vector4_f32[1] + V2.vector4_f32[1], + V1.vector4_f32[2] + V2.vector4_f32[2], + V1.vector4_f32[3] + V2.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vaddq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_add_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSum(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result; + Result.f[0] = + Result.f[1] = + Result.f[2] = + Result.f[3] = V.vector4_f32[0] + V.vector4_f32[1] + V.vector4_f32[2] + V.vector4_f32[3]; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + XMVECTOR vTemp = vpaddq_f32(V, V); + return vpaddq_f32(vTemp, vTemp); +#else + float32x2_t v1 = vget_low_f32(V); + float32x2_t v2 = vget_high_f32(V); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + return vcombine_f32(v1, v1); +#endif +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vTemp = _mm_hadd_ps(V, V); + return _mm_hadd_ps(vTemp, vTemp); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 3, 0, 1)); + XMVECTOR vTemp2 = _mm_add_ps(V, vTemp); + vTemp = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(1, 0, 3, 2)); + return _mm_add_ps(vTemp, vTemp2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAddAngles +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + const XMVECTOR Zero = XMVectorZero(); + + // Add the given angles together. If the range of V1 is such + // that -Pi <= V1 < Pi and the range of V2 is such that + // -2Pi <= V2 <= 2Pi, then the range of the resulting angle + // will be -Pi <= Result < Pi. + XMVECTOR Result = XMVectorAdd(V1, V2); + + XMVECTOR Mask = XMVectorLess(Result, g_XMNegativePi.v); + XMVECTOR Offset = XMVectorSelect(Zero, g_XMTwoPi.v, Mask); + + Mask = XMVectorGreaterOrEqual(Result, g_XMPi.v); + Offset = XMVectorSelect(Offset, g_XMNegativeTwoPi.v, Mask); + + Result = XMVectorAdd(Result, Offset); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Adjust the angles + XMVECTOR vResult = vaddq_f32(V1, V2); + // Less than Pi? + uint32x4_t vOffset = vcltq_f32(vResult, g_XMNegativePi); + vOffset = vandq_u32(vOffset, g_XMTwoPi); + // Add 2Pi to all entries less than -Pi + vResult = vaddq_f32(vResult, vOffset); + // Greater than or equal to Pi? + vOffset = vcgeq_f32(vResult, g_XMPi); + vOffset = vandq_u32(vOffset, g_XMTwoPi); + // Sub 2Pi to all entries greater than Pi + vResult = vsubq_f32(vResult, vOffset); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Adjust the angles + XMVECTOR vResult = _mm_add_ps(V1, V2); + // Less than Pi? + XMVECTOR vOffset = _mm_cmplt_ps(vResult, g_XMNegativePi); + vOffset = _mm_and_ps(vOffset, g_XMTwoPi); + // Add 2Pi to all entries less than -Pi + vResult = _mm_add_ps(vResult, vOffset); + // Greater than or equal to Pi? + vOffset = _mm_cmpge_ps(vResult, g_XMPi); + vOffset = _mm_and_ps(vOffset, g_XMTwoPi); + // Sub 2Pi to all entries greater than Pi + vResult = _mm_sub_ps(vResult, vOffset); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSubtract +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + V1.vector4_f32[0] - V2.vector4_f32[0], + V1.vector4_f32[1] - V2.vector4_f32[1], + V1.vector4_f32[2] - V2.vector4_f32[2], + V1.vector4_f32[3] - V2.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsubq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_sub_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSubtractAngles +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + const XMVECTOR Zero = XMVectorZero(); + + // Subtract the given angles. If the range of V1 is such + // that -Pi <= V1 < Pi and the range of V2 is such that + // -2Pi <= V2 <= 2Pi, then the range of the resulting angle + // will be -Pi <= Result < Pi. + XMVECTOR Result = XMVectorSubtract(V1, V2); + + XMVECTOR Mask = XMVectorLess(Result, g_XMNegativePi.v); + XMVECTOR Offset = XMVectorSelect(Zero, g_XMTwoPi.v, Mask); + + Mask = XMVectorGreaterOrEqual(Result, g_XMPi.v); + Offset = XMVectorSelect(Offset, g_XMNegativeTwoPi.v, Mask); + + Result = XMVectorAdd(Result, Offset); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Adjust the angles + XMVECTOR vResult = vsubq_f32(V1, V2); + // Less than Pi? + uint32x4_t vOffset = vcltq_f32(vResult, g_XMNegativePi); + vOffset = vandq_u32(vOffset, g_XMTwoPi); + // Add 2Pi to all entries less than -Pi + vResult = vaddq_f32(vResult, vOffset); + // Greater than or equal to Pi? + vOffset = vcgeq_f32(vResult, g_XMPi); + vOffset = vandq_u32(vOffset, g_XMTwoPi); + // Sub 2Pi to all entries greater than Pi + vResult = vsubq_f32(vResult, vOffset); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Adjust the angles + XMVECTOR vResult = _mm_sub_ps(V1, V2); + // Less than Pi? + XMVECTOR vOffset = _mm_cmplt_ps(vResult, g_XMNegativePi); + vOffset = _mm_and_ps(vOffset, g_XMTwoPi); + // Add 2Pi to all entries less than -Pi + vResult = _mm_add_ps(vResult, vOffset); + // Greater than or equal to Pi? + vOffset = _mm_cmpge_ps(vResult, g_XMPi); + vOffset = _mm_and_ps(vOffset, g_XMTwoPi); + // Sub 2Pi to all entries greater than Pi + vResult = _mm_sub_ps(vResult, vOffset); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMultiply +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V1.vector4_f32[0] * V2.vector4_f32[0], + V1.vector4_f32[1] * V2.vector4_f32[1], + V1.vector4_f32[2] * V2.vector4_f32[2], + V1.vector4_f32[3] * V2.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmulq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_mul_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMultiplyAdd +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR V3 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V1.vector4_f32[0] * V2.vector4_f32[0] + V3.vector4_f32[0], + V1.vector4_f32[1] * V2.vector4_f32[1] + V3.vector4_f32[1], + V1.vector4_f32[2] * V2.vector4_f32[2] + V3.vector4_f32[2], + V1.vector4_f32[3] * V2.vector4_f32[3] + V3.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vfmaq_f32(V3, V1, V2); +#else + return vmlaq_f32(V3, V1, V2); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return XM_FMADD_PS(V1, V2, V3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorDivide +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V1.vector4_f32[0] / V2.vector4_f32[0], + V1.vector4_f32[1] / V2.vector4_f32[1], + V1.vector4_f32[2] / V2.vector4_f32[2], + V1.vector4_f32[3] / V2.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vdivq_f32(V1, V2); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(V2); + float32x4_t S = vrecpsq_f32(Reciprocal, V2); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, V2); + Reciprocal = vmulq_f32(S, Reciprocal); + return vmulq_f32(V1, Reciprocal); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_div_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNegativeMultiplySubtract +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR V3 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V3.vector4_f32[0] - (V1.vector4_f32[0] * V2.vector4_f32[0]), + V3.vector4_f32[1] - (V1.vector4_f32[1] * V2.vector4_f32[1]), + V3.vector4_f32[2] - (V1.vector4_f32[2] * V2.vector4_f32[2]), + V3.vector4_f32[3] - (V1.vector4_f32[3] * V2.vector4_f32[3]) + } } }; + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vfmsq_f32(V3, V1, V2); +#else + return vmlsq_f32(V3, V1, V2); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return XM_FNMADD_PS(V1, V2, V3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorScale +( + FXMVECTOR V, + float ScaleFactor +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V.vector4_f32[0] * ScaleFactor, + V.vector4_f32[1] * ScaleFactor, + V.vector4_f32[2] * ScaleFactor, + V.vector4_f32[3] * ScaleFactor + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmulq_n_f32(V, ScaleFactor); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_set_ps1(ScaleFactor); + return _mm_mul_ps(vResult, V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorReciprocalEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + 1.f / V.vector4_f32[0], + 1.f / V.vector4_f32[1], + 1.f / V.vector4_f32[2], + 1.f / V.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vrecpeq_f32(V); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_rcp_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorReciprocal(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + 1.f / V.vector4_f32[0], + 1.f / V.vector4_f32[1], + 1.f / V.vector4_f32[2], + 1.f / V.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + float32x4_t one = vdupq_n_f32(1.0f); + return vdivq_f32(one, V); +#else + // 2 iterations of Newton-Raphson refinement + float32x4_t Reciprocal = vrecpeq_f32(V); + float32x4_t S = vrecpsq_f32(Reciprocal, V); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, V); + return vmulq_f32(S, Reciprocal); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_div_ps(g_XMOne, V); +#endif +} + +//------------------------------------------------------------------------------ +// Return an estimated square root +inline XMVECTOR XM_CALLCONV XMVectorSqrtEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sqrtf(V.vector4_f32[0]), + sqrtf(V.vector4_f32[1]), + sqrtf(V.vector4_f32[2]), + sqrtf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // 1 iteration of Newton-Raphson refinment of sqrt + float32x4_t S0 = vrsqrteq_f32(V); + float32x4_t P0 = vmulq_f32(V, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + float32x4_t S1 = vmulq_f32(S0, R0); + + XMVECTOR VEqualsInfinity = XMVectorEqualInt(V, g_XMInfinity.v); + XMVECTOR VEqualsZero = XMVectorEqual(V, vdupq_n_f32(0)); + XMVECTOR Result = vmulq_f32(V, S1); + XMVECTOR Select = XMVectorEqualInt(VEqualsInfinity, VEqualsZero); + return XMVectorSelect(V, Result, Select); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_sqrt_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSqrt(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sqrtf(V.vector4_f32[0]), + sqrtf(V.vector4_f32[1]), + sqrtf(V.vector4_f32[2]), + sqrtf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // 3 iterations of Newton-Raphson refinment of sqrt + float32x4_t S0 = vrsqrteq_f32(V); + float32x4_t P0 = vmulq_f32(V, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + float32x4_t S1 = vmulq_f32(S0, R0); + float32x4_t P1 = vmulq_f32(V, S1); + float32x4_t R1 = vrsqrtsq_f32(P1, S1); + float32x4_t S2 = vmulq_f32(S1, R1); + float32x4_t P2 = vmulq_f32(V, S2); + float32x4_t R2 = vrsqrtsq_f32(P2, S2); + float32x4_t S3 = vmulq_f32(S2, R2); + + XMVECTOR VEqualsInfinity = XMVectorEqualInt(V, g_XMInfinity.v); + XMVECTOR VEqualsZero = XMVectorEqual(V, vdupq_n_f32(0)); + XMVECTOR Result = vmulq_f32(V, S3); + XMVECTOR Select = XMVectorEqualInt(VEqualsInfinity, VEqualsZero); + return XMVectorSelect(V, Result, Select); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_sqrt_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorReciprocalSqrtEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + 1.f / sqrtf(V.vector4_f32[0]), + 1.f / sqrtf(V.vector4_f32[1]), + 1.f / sqrtf(V.vector4_f32[2]), + 1.f / sqrtf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vrsqrteq_f32(V); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_rsqrt_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorReciprocalSqrt(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + 1.f / sqrtf(V.vector4_f32[0]), + 1.f / sqrtf(V.vector4_f32[1]), + 1.f / sqrtf(V.vector4_f32[2]), + 1.f / sqrtf(V.vector4_f32[3]) + } } }; + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t S0 = vrsqrteq_f32(V); + + float32x4_t P0 = vmulq_f32(V, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + + float32x4_t S1 = vmulq_f32(S0, R0); + float32x4_t P1 = vmulq_f32(V, S1); + float32x4_t R1 = vrsqrtsq_f32(P1, S1); + + return vmulq_f32(S1, R1); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_sqrt_ps(V); + vResult = _mm_div_ps(g_XMOne, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorExp2(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + powf(2.0f, V.vector4_f32[0]), + powf(2.0f, V.vector4_f32[1]), + powf(2.0f, V.vector4_f32[2]), + powf(2.0f, V.vector4_f32[3]) + } } }; + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t itrunc = vcvtq_s32_f32(V); + float32x4_t ftrunc = vcvtq_f32_s32(itrunc); + float32x4_t y = vsubq_f32(V, ftrunc); + + float32x4_t poly = vmlaq_f32(g_XMExpEst6, g_XMExpEst7, y); + poly = vmlaq_f32(g_XMExpEst5, poly, y); + poly = vmlaq_f32(g_XMExpEst4, poly, y); + poly = vmlaq_f32(g_XMExpEst3, poly, y); + poly = vmlaq_f32(g_XMExpEst2, poly, y); + poly = vmlaq_f32(g_XMExpEst1, poly, y); + poly = vmlaq_f32(g_XMOne, poly, y); + + int32x4_t biased = vaddq_s32(itrunc, g_XMExponentBias); + biased = vshlq_n_s32(biased, 23); + float32x4_t result0 = XMVectorDivide(biased, poly); + + biased = vaddq_s32(itrunc, g_XM253); + biased = vshlq_n_s32(biased, 23); + float32x4_t result1 = XMVectorDivide(biased, poly); + result1 = vmulq_f32(g_XMMinNormal.v, result1); + + // Use selection to handle the cases + // if (V is NaN) -> QNaN; + // else if (V sign bit set) + // if (V > -150) + // if (V.exponent < -126) -> result1 + // else -> result0 + // else -> +0 + // else + // if (V < 128) -> result0 + // else -> +inf + + int32x4_t comp = vcltq_s32(V, g_XMBin128); + float32x4_t result2 = vbslq_f32(comp, result0, g_XMInfinity); + + comp = vcltq_s32(itrunc, g_XMSubnormalExponent); + float32x4_t result3 = vbslq_f32(comp, result1, result0); + + comp = vcltq_s32(V, g_XMBinNeg150); + float32x4_t result4 = vbslq_f32(comp, result3, g_XMZero); + + int32x4_t sign = vandq_s32(V, g_XMNegativeZero); + comp = vceqq_s32(sign, g_XMNegativeZero); + float32x4_t result5 = vbslq_f32(comp, result4, result2); + + int32x4_t t0 = vandq_s32(V, g_XMQNaNTest); + int32x4_t t1 = vandq_s32(V, g_XMInfinity); + t0 = vceqq_s32(t0, g_XMZero); + t1 = vceqq_s32(t1, g_XMInfinity); + int32x4_t isNaN = vbicq_s32(t1, t0); + + float32x4_t vResult = vbslq_f32(isNaN, g_XMQNaN, result5); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i itrunc = _mm_cvttps_epi32(V); + __m128 ftrunc = _mm_cvtepi32_ps(itrunc); + __m128 y = _mm_sub_ps(V, ftrunc); + + __m128 poly = XM_FMADD_PS(g_XMExpEst7, y, g_XMExpEst6); + poly = XM_FMADD_PS(poly, y, g_XMExpEst5); + poly = XM_FMADD_PS(poly, y, g_XMExpEst4); + poly = XM_FMADD_PS(poly, y, g_XMExpEst3); + poly = XM_FMADD_PS(poly, y, g_XMExpEst2); + poly = XM_FMADD_PS(poly, y, g_XMExpEst1); + poly = XM_FMADD_PS(poly, y, g_XMOne); + + __m128i biased = _mm_add_epi32(itrunc, g_XMExponentBias); + biased = _mm_slli_epi32(biased, 23); + __m128 result0 = _mm_div_ps(_mm_castsi128_ps(biased), poly); + + biased = _mm_add_epi32(itrunc, g_XM253); + biased = _mm_slli_epi32(biased, 23); + __m128 result1 = _mm_div_ps(_mm_castsi128_ps(biased), poly); + result1 = _mm_mul_ps(g_XMMinNormal.v, result1); + + // Use selection to handle the cases + // if (V is NaN) -> QNaN; + // else if (V sign bit set) + // if (V > -150) + // if (V.exponent < -126) -> result1 + // else -> result0 + // else -> +0 + // else + // if (V < 128) -> result0 + // else -> +inf + + __m128i comp = _mm_cmplt_epi32(_mm_castps_si128(V), g_XMBin128); + __m128i select0 = _mm_and_si128(comp, _mm_castps_si128(result0)); + __m128i select1 = _mm_andnot_si128(comp, g_XMInfinity); + __m128i result2 = _mm_or_si128(select0, select1); + + comp = _mm_cmplt_epi32(itrunc, g_XMSubnormalExponent); + select1 = _mm_and_si128(comp, _mm_castps_si128(result1)); + select0 = _mm_andnot_si128(comp, _mm_castps_si128(result0)); + __m128i result3 = _mm_or_si128(select0, select1); + + comp = _mm_cmplt_epi32(_mm_castps_si128(V), g_XMBinNeg150); + select0 = _mm_and_si128(comp, result3); + select1 = _mm_andnot_si128(comp, g_XMZero); + __m128i result4 = _mm_or_si128(select0, select1); + + __m128i sign = _mm_and_si128(_mm_castps_si128(V), g_XMNegativeZero); + comp = _mm_cmpeq_epi32(sign, g_XMNegativeZero); + select0 = _mm_and_si128(comp, result4); + select1 = _mm_andnot_si128(comp, result2); + __m128i result5 = _mm_or_si128(select0, select1); + + __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + t0 = _mm_cmpeq_epi32(t0, g_XMZero); + t1 = _mm_cmpeq_epi32(t1, g_XMInfinity); + __m128i isNaN = _mm_andnot_si128(t0, t1); + + select0 = _mm_and_si128(isNaN, g_XMQNaN); + select1 = _mm_andnot_si128(isNaN, result5); + __m128i vResult = _mm_or_si128(select0, select1); + + return _mm_castsi128_ps(vResult); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorExpE(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + expf(V.vector4_f32[0]), + expf(V.vector4_f32[1]), + expf(V.vector4_f32[2]), + expf(V.vector4_f32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // expE(V) = exp2(vin*log2(e)) + float32x4_t Ve = vmulq_f32(g_XMLgE, V); + + int32x4_t itrunc = vcvtq_s32_f32(Ve); + float32x4_t ftrunc = vcvtq_f32_s32(itrunc); + float32x4_t y = vsubq_f32(Ve, ftrunc); + + float32x4_t poly = vmlaq_f32(g_XMExpEst6, g_XMExpEst7, y); + poly = vmlaq_f32(g_XMExpEst5, poly, y); + poly = vmlaq_f32(g_XMExpEst4, poly, y); + poly = vmlaq_f32(g_XMExpEst3, poly, y); + poly = vmlaq_f32(g_XMExpEst2, poly, y); + poly = vmlaq_f32(g_XMExpEst1, poly, y); + poly = vmlaq_f32(g_XMOne, poly, y); + + int32x4_t biased = vaddq_s32(itrunc, g_XMExponentBias); + biased = vshlq_n_s32(biased, 23); + float32x4_t result0 = XMVectorDivide(biased, poly); + + biased = vaddq_s32(itrunc, g_XM253); + biased = vshlq_n_s32(biased, 23); + float32x4_t result1 = XMVectorDivide(biased, poly); + result1 = vmulq_f32(g_XMMinNormal.v, result1); + + // Use selection to handle the cases + // if (V is NaN) -> QNaN; + // else if (V sign bit set) + // if (V > -150) + // if (V.exponent < -126) -> result1 + // else -> result0 + // else -> +0 + // else + // if (V < 128) -> result0 + // else -> +inf + + int32x4_t comp = vcltq_s32(Ve, g_XMBin128); + float32x4_t result2 = vbslq_f32(comp, result0, g_XMInfinity); + + comp = vcltq_s32(itrunc, g_XMSubnormalExponent); + float32x4_t result3 = vbslq_f32(comp, result1, result0); + + comp = vcltq_s32(Ve, g_XMBinNeg150); + float32x4_t result4 = vbslq_f32(comp, result3, g_XMZero); + + int32x4_t sign = vandq_s32(Ve, g_XMNegativeZero); + comp = vceqq_s32(sign, g_XMNegativeZero); + float32x4_t result5 = vbslq_f32(comp, result4, result2); + + int32x4_t t0 = vandq_s32(Ve, g_XMQNaNTest); + int32x4_t t1 = vandq_s32(Ve, g_XMInfinity); + t0 = vceqq_s32(t0, g_XMZero); + t1 = vceqq_s32(t1, g_XMInfinity); + int32x4_t isNaN = vbicq_s32(t1, t0); + + float32x4_t vResult = vbslq_f32(isNaN, g_XMQNaN, result5); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // expE(V) = exp2(vin*log2(e)) + __m128 Ve = _mm_mul_ps(g_XMLgE, V); + + __m128i itrunc = _mm_cvttps_epi32(Ve); + __m128 ftrunc = _mm_cvtepi32_ps(itrunc); + __m128 y = _mm_sub_ps(Ve, ftrunc); + + __m128 poly = XM_FMADD_PS(y, g_XMExpEst7, g_XMExpEst6); + poly = XM_FMADD_PS(poly, y, g_XMExpEst5); + poly = XM_FMADD_PS(poly, y, g_XMExpEst4); + poly = XM_FMADD_PS(poly, y, g_XMExpEst3); + poly = XM_FMADD_PS(poly, y, g_XMExpEst2); + poly = XM_FMADD_PS(poly, y, g_XMExpEst1); + poly = XM_FMADD_PS(poly, y, g_XMOne); + + __m128i biased = _mm_add_epi32(itrunc, g_XMExponentBias); + biased = _mm_slli_epi32(biased, 23); + __m128 result0 = _mm_div_ps(_mm_castsi128_ps(biased), poly); + + biased = _mm_add_epi32(itrunc, g_XM253); + biased = _mm_slli_epi32(biased, 23); + __m128 result1 = _mm_div_ps(_mm_castsi128_ps(biased), poly); + result1 = _mm_mul_ps(g_XMMinNormal.v, result1); + + // Use selection to handle the cases + // if (V is NaN) -> QNaN; + // else if (V sign bit set) + // if (V > -150) + // if (V.exponent < -126) -> result1 + // else -> result0 + // else -> +0 + // else + // if (V < 128) -> result0 + // else -> +inf + + __m128i comp = _mm_cmplt_epi32(_mm_castps_si128(Ve), g_XMBin128); + __m128i select0 = _mm_and_si128(comp, _mm_castps_si128(result0)); + __m128i select1 = _mm_andnot_si128(comp, g_XMInfinity); + __m128i result2 = _mm_or_si128(select0, select1); + + comp = _mm_cmplt_epi32(itrunc, g_XMSubnormalExponent); + select1 = _mm_and_si128(comp, _mm_castps_si128(result1)); + select0 = _mm_andnot_si128(comp, _mm_castps_si128(result0)); + __m128i result3 = _mm_or_si128(select0, select1); + + comp = _mm_cmplt_epi32(_mm_castps_si128(Ve), g_XMBinNeg150); + select0 = _mm_and_si128(comp, result3); + select1 = _mm_andnot_si128(comp, g_XMZero); + __m128i result4 = _mm_or_si128(select0, select1); + + __m128i sign = _mm_and_si128(_mm_castps_si128(Ve), g_XMNegativeZero); + comp = _mm_cmpeq_epi32(sign, g_XMNegativeZero); + select0 = _mm_and_si128(comp, result4); + select1 = _mm_andnot_si128(comp, result2); + __m128i result5 = _mm_or_si128(select0, select1); + + __m128i t0 = _mm_and_si128(_mm_castps_si128(Ve), g_XMQNaNTest); + __m128i t1 = _mm_and_si128(_mm_castps_si128(Ve), g_XMInfinity); + t0 = _mm_cmpeq_epi32(t0, g_XMZero); + t1 = _mm_cmpeq_epi32(t1, g_XMInfinity); + __m128i isNaN = _mm_andnot_si128(t0, t1); + + select0 = _mm_and_si128(isNaN, g_XMQNaN); + select1 = _mm_andnot_si128(isNaN, result5); + __m128i vResult = _mm_or_si128(select0, select1); + + return _mm_castsi128_ps(vResult); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorExp(FXMVECTOR V) noexcept +{ + return XMVectorExp2(V); +} + +//------------------------------------------------------------------------------ + +#if defined(_XM_SSE_INTRINSICS_) + +namespace Internal +{ + inline __m128i multi_sll_epi32(__m128i value, __m128i count) + { + __m128i v = _mm_shuffle_epi32(value, _MM_SHUFFLE(0, 0, 0, 0)); + __m128i c = _mm_shuffle_epi32(count, _MM_SHUFFLE(0, 0, 0, 0)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r0 = _mm_sll_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(1, 1, 1, 1)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(1, 1, 1, 1)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r1 = _mm_sll_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(2, 2, 2, 2)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(2, 2, 2, 2)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r2 = _mm_sll_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(3, 3, 3, 3)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(3, 3, 3, 3)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r3 = _mm_sll_epi32(v, c); + + // (r0,r0,r1,r1) + __m128 r01 = _mm_shuffle_ps(_mm_castsi128_ps(r0), _mm_castsi128_ps(r1), _MM_SHUFFLE(0, 0, 0, 0)); + // (r2,r2,r3,r3) + __m128 r23 = _mm_shuffle_ps(_mm_castsi128_ps(r2), _mm_castsi128_ps(r3), _MM_SHUFFLE(0, 0, 0, 0)); + // (r0,r1,r2,r3) + __m128 result = _mm_shuffle_ps(r01, r23, _MM_SHUFFLE(2, 0, 2, 0)); + return _mm_castps_si128(result); + } + + inline __m128i multi_srl_epi32(__m128i value, __m128i count) + { + __m128i v = _mm_shuffle_epi32(value, _MM_SHUFFLE(0, 0, 0, 0)); + __m128i c = _mm_shuffle_epi32(count, _MM_SHUFFLE(0, 0, 0, 0)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r0 = _mm_srl_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(1, 1, 1, 1)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(1, 1, 1, 1)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r1 = _mm_srl_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(2, 2, 2, 2)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(2, 2, 2, 2)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r2 = _mm_srl_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(3, 3, 3, 3)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(3, 3, 3, 3)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r3 = _mm_srl_epi32(v, c); + + // (r0,r0,r1,r1) + __m128 r01 = _mm_shuffle_ps(_mm_castsi128_ps(r0), _mm_castsi128_ps(r1), _MM_SHUFFLE(0, 0, 0, 0)); + // (r2,r2,r3,r3) + __m128 r23 = _mm_shuffle_ps(_mm_castsi128_ps(r2), _mm_castsi128_ps(r3), _MM_SHUFFLE(0, 0, 0, 0)); + // (r0,r1,r2,r3) + __m128 result = _mm_shuffle_ps(r01, r23, _MM_SHUFFLE(2, 0, 2, 0)); + return _mm_castps_si128(result); + } + + inline __m128i GetLeadingBit(const __m128i value) + { + static const XMVECTORI32 g_XM0000FFFF = { { { 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF } } }; + static const XMVECTORI32 g_XM000000FF = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } }; + static const XMVECTORI32 g_XM0000000F = { { { 0x0000000F, 0x0000000F, 0x0000000F, 0x0000000F } } }; + static const XMVECTORI32 g_XM00000003 = { { { 0x00000003, 0x00000003, 0x00000003, 0x00000003 } } }; + + __m128i v = value, r, c, b, s; + + c = _mm_cmpgt_epi32(v, g_XM0000FFFF); // c = (v > 0xFFFF) + b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0) + r = _mm_slli_epi32(b, 4); // r = (b << 4) + v = multi_srl_epi32(v, r); // v = (v >> r) + + c = _mm_cmpgt_epi32(v, g_XM000000FF); // c = (v > 0xFF) + b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0) + s = _mm_slli_epi32(b, 3); // s = (b << 3) + v = multi_srl_epi32(v, s); // v = (v >> s) + r = _mm_or_si128(r, s); // r = (r | s) + + c = _mm_cmpgt_epi32(v, g_XM0000000F); // c = (v > 0xF) + b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0) + s = _mm_slli_epi32(b, 2); // s = (b << 2) + v = multi_srl_epi32(v, s); // v = (v >> s) + r = _mm_or_si128(r, s); // r = (r | s) + + c = _mm_cmpgt_epi32(v, g_XM00000003); // c = (v > 0x3) + b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0) + s = _mm_slli_epi32(b, 1); // s = (b << 1) + v = multi_srl_epi32(v, s); // v = (v >> s) + r = _mm_or_si128(r, s); // r = (r | s) + + s = _mm_srli_epi32(v, 1); + r = _mm_or_si128(r, s); + return r; + } +} // namespace Internal + +#endif // _XM_SSE_INTRINSICS_ + +#if defined(_XM_ARM_NEON_INTRINSICS_) + +namespace Internal +{ + inline int32x4_t GetLeadingBit(const int32x4_t value) + { + static const XMVECTORI32 g_XM0000FFFF = { { { 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF } } }; + static const XMVECTORI32 g_XM000000FF = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } }; + static const XMVECTORI32 g_XM0000000F = { { { 0x0000000F, 0x0000000F, 0x0000000F, 0x0000000F } } }; + static const XMVECTORI32 g_XM00000003 = { { { 0x00000003, 0x00000003, 0x00000003, 0x00000003 } } }; + + int32x4_t v = value, r, c, b, s; + + c = vcgtq_s32(v, g_XM0000FFFF); // c = (v > 0xFFFF) + b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0) + r = vshlq_n_s32(b, 4); // r = (b << 4) + r = vnegq_s32(r); + v = vshlq_u32(v, r); // v = (v >> r) + + c = vcgtq_s32(v, g_XM000000FF); // c = (v > 0xFF) + b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0) + s = vshlq_n_s32(b, 3); // s = (b << 3) + s = vnegq_s32(s); + v = vshlq_u32(v, s); // v = (v >> s) + r = vorrq_s32(r, s); // r = (r | s) + + c = vcgtq_s32(v, g_XM0000000F); // c = (v > 0xF) + b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0) + s = vshlq_n_s32(b, 2); // s = (b << 2) + s = vnegq_s32(s); + v = vshlq_u32(v, s); // v = (v >> s) + r = vorrq_s32(r, s); // r = (r | s) + + c = vcgtq_s32(v, g_XM00000003); // c = (v > 0x3) + b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0) + s = vshlq_n_s32(b, 1); // s = (b << 1) + s = vnegq_s32(s); + v = vshlq_u32(v, s); // v = (v >> s) + r = vorrq_s32(r, s); // r = (r | s) + + s = vshrq_n_u32(v, 1); + r = vorrq_s32(r, s); + return r; + } + +} // namespace Internal + +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLog2(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + const float fScale = 1.4426950f; // (1.0f / logf(2.0f)); + + XMVECTORF32 Result = { { { + logf(V.vector4_f32[0]) * fScale, + logf(V.vector4_f32[1]) * fScale, + logf(V.vector4_f32[2]) * fScale, + logf(V.vector4_f32[3]) * fScale + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t rawBiased = vandq_s32(V, g_XMInfinity); + int32x4_t trailing = vandq_s32(V, g_XMQNaNTest); + int32x4_t isExponentZero = vceqq_s32(g_XMZero, rawBiased); + + // Compute exponent and significand for normals. + int32x4_t biased = vshrq_n_u32(rawBiased, 23); + int32x4_t exponentNor = vsubq_s32(biased, g_XMExponentBias); + int32x4_t trailingNor = trailing; + + // Compute exponent and significand for subnormals. + int32x4_t leading = Internal::GetLeadingBit(trailing); + int32x4_t shift = vsubq_s32(g_XMNumTrailing, leading); + int32x4_t exponentSub = vsubq_s32(g_XMSubnormalExponent, shift); + int32x4_t trailingSub = vshlq_u32(trailing, shift); + trailingSub = vandq_s32(trailingSub, g_XMQNaNTest); + int32x4_t e = vbslq_f32(isExponentZero, exponentSub, exponentNor); + int32x4_t t = vbslq_f32(isExponentZero, trailingSub, trailingNor); + + // Compute the approximation. + int32x4_t tmp = vorrq_s32(g_XMOne, t); + float32x4_t y = vsubq_f32(tmp, g_XMOne); + + float32x4_t log2 = vmlaq_f32(g_XMLogEst6, g_XMLogEst7, y); + log2 = vmlaq_f32(g_XMLogEst5, log2, y); + log2 = vmlaq_f32(g_XMLogEst4, log2, y); + log2 = vmlaq_f32(g_XMLogEst3, log2, y); + log2 = vmlaq_f32(g_XMLogEst2, log2, y); + log2 = vmlaq_f32(g_XMLogEst1, log2, y); + log2 = vmlaq_f32(g_XMLogEst0, log2, y); + log2 = vmlaq_f32(vcvtq_f32_s32(e), log2, y); + + // if (x is NaN) -> QNaN + // else if (V is positive) + // if (V is infinite) -> +inf + // else -> log2(V) + // else + // if (V is zero) -> -inf + // else -> -QNaN + + int32x4_t isInfinite = vandq_s32((V), g_XMAbsMask); + isInfinite = vceqq_s32(isInfinite, g_XMInfinity); + + int32x4_t isGreaterZero = vcgtq_s32((V), g_XMZero); + int32x4_t isNotFinite = vcgtq_s32((V), g_XMInfinity); + int32x4_t isPositive = vbicq_s32(isGreaterZero, isNotFinite); + + int32x4_t isZero = vandq_s32((V), g_XMAbsMask); + isZero = vceqq_s32(isZero, g_XMZero); + + int32x4_t t0 = vandq_s32((V), g_XMQNaNTest); + int32x4_t t1 = vandq_s32((V), g_XMInfinity); + t0 = vceqq_s32(t0, g_XMZero); + t1 = vceqq_s32(t1, g_XMInfinity); + int32x4_t isNaN = vbicq_s32(t1, t0); + + float32x4_t result = vbslq_f32(isInfinite, g_XMInfinity, log2); + tmp = vbslq_f32(isZero, g_XMNegInfinity, g_XMNegQNaN); + result = vbslq_f32(isPositive, result, tmp); + result = vbslq_f32(isNaN, g_XMQNaN, result); + return result; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i rawBiased = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + __m128i trailing = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i isExponentZero = _mm_cmpeq_epi32(g_XMZero, rawBiased); + + // Compute exponent and significand for normals. + __m128i biased = _mm_srli_epi32(rawBiased, 23); + __m128i exponentNor = _mm_sub_epi32(biased, g_XMExponentBias); + __m128i trailingNor = trailing; + + // Compute exponent and significand for subnormals. + __m128i leading = Internal::GetLeadingBit(trailing); + __m128i shift = _mm_sub_epi32(g_XMNumTrailing, leading); + __m128i exponentSub = _mm_sub_epi32(g_XMSubnormalExponent, shift); + __m128i trailingSub = Internal::multi_sll_epi32(trailing, shift); + trailingSub = _mm_and_si128(trailingSub, g_XMQNaNTest); + + __m128i select0 = _mm_and_si128(isExponentZero, exponentSub); + __m128i select1 = _mm_andnot_si128(isExponentZero, exponentNor); + __m128i e = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isExponentZero, trailingSub); + select1 = _mm_andnot_si128(isExponentZero, trailingNor); + __m128i t = _mm_or_si128(select0, select1); + + // Compute the approximation. + __m128i tmp = _mm_or_si128(g_XMOne, t); + __m128 y = _mm_sub_ps(_mm_castsi128_ps(tmp), g_XMOne); + + __m128 log2 = XM_FMADD_PS(g_XMLogEst7, y, g_XMLogEst6); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst5); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst4); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst3); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst2); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst1); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst0); + log2 = XM_FMADD_PS(log2, y, _mm_cvtepi32_ps(e)); + + // if (x is NaN) -> QNaN + // else if (V is positive) + // if (V is infinite) -> +inf + // else -> log2(V) + // else + // if (V is zero) -> -inf + // else -> -QNaN + + __m128i isInfinite = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + isInfinite = _mm_cmpeq_epi32(isInfinite, g_XMInfinity); + + __m128i isGreaterZero = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMZero); + __m128i isNotFinite = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMInfinity); + __m128i isPositive = _mm_andnot_si128(isNotFinite, isGreaterZero); + + __m128i isZero = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + isZero = _mm_cmpeq_epi32(isZero, g_XMZero); + + __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + t0 = _mm_cmpeq_epi32(t0, g_XMZero); + t1 = _mm_cmpeq_epi32(t1, g_XMInfinity); + __m128i isNaN = _mm_andnot_si128(t0, t1); + + select0 = _mm_and_si128(isInfinite, g_XMInfinity); + select1 = _mm_andnot_si128(isInfinite, _mm_castps_si128(log2)); + __m128i result = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isZero, g_XMNegInfinity); + select1 = _mm_andnot_si128(isZero, g_XMNegQNaN); + tmp = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isPositive, result); + select1 = _mm_andnot_si128(isPositive, tmp); + result = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isNaN, g_XMQNaN); + select1 = _mm_andnot_si128(isNaN, result); + result = _mm_or_si128(select0, select1); + + return _mm_castsi128_ps(result); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLogE(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + logf(V.vector4_f32[0]), + logf(V.vector4_f32[1]), + logf(V.vector4_f32[2]), + logf(V.vector4_f32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t rawBiased = vandq_s32(V, g_XMInfinity); + int32x4_t trailing = vandq_s32(V, g_XMQNaNTest); + int32x4_t isExponentZero = vceqq_s32(g_XMZero, rawBiased); + + // Compute exponent and significand for normals. + int32x4_t biased = vshrq_n_u32(rawBiased, 23); + int32x4_t exponentNor = vsubq_s32(biased, g_XMExponentBias); + int32x4_t trailingNor = trailing; + + // Compute exponent and significand for subnormals. + int32x4_t leading = Internal::GetLeadingBit(trailing); + int32x4_t shift = vsubq_s32(g_XMNumTrailing, leading); + int32x4_t exponentSub = vsubq_s32(g_XMSubnormalExponent, shift); + int32x4_t trailingSub = vshlq_u32(trailing, shift); + trailingSub = vandq_s32(trailingSub, g_XMQNaNTest); + int32x4_t e = vbslq_f32(isExponentZero, exponentSub, exponentNor); + int32x4_t t = vbslq_f32(isExponentZero, trailingSub, trailingNor); + + // Compute the approximation. + int32x4_t tmp = vorrq_s32(g_XMOne, t); + float32x4_t y = vsubq_f32(tmp, g_XMOne); + + float32x4_t log2 = vmlaq_f32(g_XMLogEst6, g_XMLogEst7, y); + log2 = vmlaq_f32(g_XMLogEst5, log2, y); + log2 = vmlaq_f32(g_XMLogEst4, log2, y); + log2 = vmlaq_f32(g_XMLogEst3, log2, y); + log2 = vmlaq_f32(g_XMLogEst2, log2, y); + log2 = vmlaq_f32(g_XMLogEst1, log2, y); + log2 = vmlaq_f32(g_XMLogEst0, log2, y); + log2 = vmlaq_f32(vcvtq_f32_s32(e), log2, y); + + log2 = vmulq_f32(g_XMInvLgE, log2); + + // if (x is NaN) -> QNaN + // else if (V is positive) + // if (V is infinite) -> +inf + // else -> log2(V) + // else + // if (V is zero) -> -inf + // else -> -QNaN + + int32x4_t isInfinite = vandq_s32((V), g_XMAbsMask); + isInfinite = vceqq_s32(isInfinite, g_XMInfinity); + + int32x4_t isGreaterZero = vcgtq_s32((V), g_XMZero); + int32x4_t isNotFinite = vcgtq_s32((V), g_XMInfinity); + int32x4_t isPositive = vbicq_s32(isGreaterZero, isNotFinite); + + int32x4_t isZero = vandq_s32((V), g_XMAbsMask); + isZero = vceqq_s32(isZero, g_XMZero); + + int32x4_t t0 = vandq_s32((V), g_XMQNaNTest); + int32x4_t t1 = vandq_s32((V), g_XMInfinity); + t0 = vceqq_s32(t0, g_XMZero); + t1 = vceqq_s32(t1, g_XMInfinity); + int32x4_t isNaN = vbicq_s32(t1, t0); + + float32x4_t result = vbslq_f32(isInfinite, g_XMInfinity, log2); + tmp = vbslq_f32(isZero, g_XMNegInfinity, g_XMNegQNaN); + result = vbslq_f32(isPositive, result, tmp); + result = vbslq_f32(isNaN, g_XMQNaN, result); + return result; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i rawBiased = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + __m128i trailing = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i isExponentZero = _mm_cmpeq_epi32(g_XMZero, rawBiased); + + // Compute exponent and significand for normals. + __m128i biased = _mm_srli_epi32(rawBiased, 23); + __m128i exponentNor = _mm_sub_epi32(biased, g_XMExponentBias); + __m128i trailingNor = trailing; + + // Compute exponent and significand for subnormals. + __m128i leading = Internal::GetLeadingBit(trailing); + __m128i shift = _mm_sub_epi32(g_XMNumTrailing, leading); + __m128i exponentSub = _mm_sub_epi32(g_XMSubnormalExponent, shift); + __m128i trailingSub = Internal::multi_sll_epi32(trailing, shift); + trailingSub = _mm_and_si128(trailingSub, g_XMQNaNTest); + + __m128i select0 = _mm_and_si128(isExponentZero, exponentSub); + __m128i select1 = _mm_andnot_si128(isExponentZero, exponentNor); + __m128i e = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isExponentZero, trailingSub); + select1 = _mm_andnot_si128(isExponentZero, trailingNor); + __m128i t = _mm_or_si128(select0, select1); + + // Compute the approximation. + __m128i tmp = _mm_or_si128(g_XMOne, t); + __m128 y = _mm_sub_ps(_mm_castsi128_ps(tmp), g_XMOne); + + __m128 log2 = XM_FMADD_PS(g_XMLogEst7, y, g_XMLogEst6); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst5); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst4); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst3); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst2); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst1); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst0); + log2 = XM_FMADD_PS(log2, y, _mm_cvtepi32_ps(e)); + + log2 = _mm_mul_ps(g_XMInvLgE, log2); + + // if (x is NaN) -> QNaN + // else if (V is positive) + // if (V is infinite) -> +inf + // else -> log2(V) + // else + // if (V is zero) -> -inf + // else -> -QNaN + + __m128i isInfinite = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + isInfinite = _mm_cmpeq_epi32(isInfinite, g_XMInfinity); + + __m128i isGreaterZero = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMZero); + __m128i isNotFinite = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMInfinity); + __m128i isPositive = _mm_andnot_si128(isNotFinite, isGreaterZero); + + __m128i isZero = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + isZero = _mm_cmpeq_epi32(isZero, g_XMZero); + + __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + t0 = _mm_cmpeq_epi32(t0, g_XMZero); + t1 = _mm_cmpeq_epi32(t1, g_XMInfinity); + __m128i isNaN = _mm_andnot_si128(t0, t1); + + select0 = _mm_and_si128(isInfinite, g_XMInfinity); + select1 = _mm_andnot_si128(isInfinite, _mm_castps_si128(log2)); + __m128i result = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isZero, g_XMNegInfinity); + select1 = _mm_andnot_si128(isZero, g_XMNegQNaN); + tmp = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isPositive, result); + select1 = _mm_andnot_si128(isPositive, tmp); + result = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isNaN, g_XMQNaN); + select1 = _mm_andnot_si128(isNaN, result); + result = _mm_or_si128(select0, select1); + + return _mm_castsi128_ps(result); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLog(FXMVECTOR V) noexcept +{ + return XMVectorLog2(V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorPow +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + powf(V1.vector4_f32[0], V2.vector4_f32[0]), + powf(V1.vector4_f32[1], V2.vector4_f32[1]), + powf(V1.vector4_f32[2], V2.vector4_f32[2]), + powf(V1.vector4_f32[3], V2.vector4_f32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTORF32 vResult = { { { + powf(vgetq_lane_f32(V1, 0), vgetq_lane_f32(V2, 0)), + powf(vgetq_lane_f32(V1, 1), vgetq_lane_f32(V2, 1)), + powf(vgetq_lane_f32(V1, 2), vgetq_lane_f32(V2, 2)), + powf(vgetq_lane_f32(V1, 3), vgetq_lane_f32(V2, 3)) + } } }; + return vResult.v; +#elif defined(_XM_SSE_INTRINSICS_) + XM_ALIGNED_DATA(16) float a[4]; + XM_ALIGNED_DATA(16) float b[4]; + _mm_store_ps(a, V1); + _mm_store_ps(b, V2); + XMVECTOR vResult = _mm_setr_ps( + powf(a[0], b[0]), + powf(a[1], b[1]), + powf(a[2], b[2]), + powf(a[3], b[3])); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAbs(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { + fabsf(V.vector4_f32[0]), + fabsf(V.vector4_f32[1]), + fabsf(V.vector4_f32[2]), + fabsf(V.vector4_f32[3]) + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vabsq_f32(V); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_setzero_ps(); + vResult = _mm_sub_ps(vResult, V); + vResult = _mm_max_ps(vResult, V); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMod +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + // V1 % V2 = V1 - V2 * truncate(V1 / V2) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Quotient = XMVectorDivide(V1, V2); + Quotient = XMVectorTruncate(Quotient); + XMVECTOR Result = XMVectorNegativeMultiplySubtract(V2, Quotient, V1); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vResult = XMVectorDivide(V1, V2); + vResult = XMVectorTruncate(vResult); + return vmlsq_f32(V1, vResult, V2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_div_ps(V1, V2); + vResult = XMVectorTruncate(vResult); + return XM_FNMADD_PS(vResult, V2, V1); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorModAngles(FXMVECTOR Angles) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR V; + XMVECTOR Result; + + // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI + V = XMVectorMultiply(Angles, g_XMReciprocalTwoPi.v); + V = XMVectorRound(V); + Result = XMVectorNegativeMultiplySubtract(g_XMTwoPi.v, V, Angles); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI + XMVECTOR vResult = vmulq_f32(Angles, g_XMReciprocalTwoPi); + // Use the inline function due to complexity for rounding + vResult = XMVectorRound(vResult); + return vmlsq_f32(Angles, vResult, g_XMTwoPi); +#elif defined(_XM_SSE_INTRINSICS_) + // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI + XMVECTOR vResult = _mm_mul_ps(Angles, g_XMReciprocalTwoPi); + // Use the inline function due to complexity for rounding + vResult = XMVectorRound(vResult); + return XM_FNMADD_PS(vResult, g_XMTwoPi, Angles); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSin(FXMVECTOR V) noexcept +{ + // 11-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sinf(V.vector4_f32[0]), + sinf(V.vector4_f32[1]), + sinf(V.vector4_f32[2]), + sinf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR SC1 = g_XMSinCoefficients1; + const XMVECTOR SC0 = g_XMSinCoefficients0; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SC0), 1); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(SC1), 0); + + vConstants = vdupq_lane_f32(vget_high_f32(SC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(SC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(SC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, x); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x). + __m128 sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR SC1 = g_XMSinCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(SC1, _MM_SHUFFLE(0, 0, 0, 0)); + const XMVECTOR SC0 = g_XMSinCoefficients0; + __m128 vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, x); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCos(FXMVECTOR V) noexcept +{ + // 10-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + cosf(V.vector4_f32[0]), + cosf(V.vector4_f32[1]), + cosf(V.vector4_f32[2]), + cosf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Map V to x in [-pi,pi]. + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR CC1 = g_XMCosCoefficients1; + const XMVECTOR CC0 = g_XMCosCoefficients0; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(CC0), 1); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(CC1), 0); + + vConstants = vdupq_lane_f32(vget_high_f32(CC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(CC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(CC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, sign); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + // Map V to x in [-pi,pi]. + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, g_XMOne); + select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + sign = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR CC1 = g_XMCosCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(CC1, _MM_SHUFFLE(0, 0, 0, 0)); + const XMVECTOR CC0 = g_XMCosCoefficients0; + __m128 vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, sign); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorSinCos +( + XMVECTOR* pSin, + XMVECTOR* pCos, + FXMVECTOR V +) noexcept +{ + assert(pSin != nullptr); + assert(pCos != nullptr); + + // 11/10-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Sin = { { { + sinf(V.vector4_f32[0]), + sinf(V.vector4_f32[1]), + sinf(V.vector4_f32[2]), + sinf(V.vector4_f32[3]) + } } }; + + XMVECTORF32 Cos = { { { + cosf(V.vector4_f32[0]), + cosf(V.vector4_f32[1]), + cosf(V.vector4_f32[2]), + cosf(V.vector4_f32[3]) + } } }; + + *pSin = Sin.v; + *pCos = Cos.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation for sine + const XMVECTOR SC1 = g_XMSinCoefficients1; + const XMVECTOR SC0 = g_XMSinCoefficients0; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SC0), 1); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(SC1), 0); + + vConstants = vdupq_lane_f32(vget_high_f32(SC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(SC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(SC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + *pSin = vmulq_f32(Result, x); + + // Compute polynomial approximation for cosine + const XMVECTOR CC1 = g_XMCosCoefficients1; + const XMVECTOR CC0 = g_XMCosCoefficients0; + vConstants = vdupq_lane_f32(vget_high_f32(CC0), 1); + Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(CC1), 0); + + vConstants = vdupq_lane_f32(vget_high_f32(CC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(CC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(CC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + *pCos = vmulq_f32(Result, sign); +#elif defined(_XM_SSE_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x), cos(y) = sign*cos(x). + XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, g_XMOne); + select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + sign = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation of sine + const XMVECTOR SC1 = g_XMSinCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(SC1, _MM_SHUFFLE(0, 0, 0, 0)); + const XMVECTOR SC0 = g_XMSinCoefficients0; + __m128 vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, x); + *pSin = Result; + + // Compute polynomial approximation of cosine + const XMVECTOR CC1 = g_XMCosCoefficients1; + vConstantsB = XM_PERMUTE_PS(CC1, _MM_SHUFFLE(0, 0, 0, 0)); + const XMVECTOR CC0 = g_XMCosCoefficients0; + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(3, 3, 3, 3)); + Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, sign); + *pCos = Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorTan(FXMVECTOR V) noexcept +{ + // Cody and Waite algorithm to compute tangent. + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + tanf(V.vector4_f32[0]), + tanf(V.vector4_f32[1]), + tanf(V.vector4_f32[2]), + tanf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_SSE_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + static const XMVECTORF32 TanCoefficients0 = { { { 1.0f, -4.667168334e-1f, 2.566383229e-2f, -3.118153191e-4f } } }; + static const XMVECTORF32 TanCoefficients1 = { { { 4.981943399e-7f, -1.333835001e-1f, 3.424887824e-3f, -1.786170734e-5f } } }; + static const XMVECTORF32 TanConstants = { { { 1.570796371f, 6.077100628e-11f, 0.000244140625f, 0.63661977228f /*2 / Pi*/ } } }; + static const XMVECTORU32 Mask = { { { 0x1, 0x1, 0x1, 0x1 } } }; + + XMVECTOR TwoDivPi = XMVectorSplatW(TanConstants.v); + + XMVECTOR Zero = XMVectorZero(); + + XMVECTOR C0 = XMVectorSplatX(TanConstants.v); + XMVECTOR C1 = XMVectorSplatY(TanConstants.v); + XMVECTOR Epsilon = XMVectorSplatZ(TanConstants.v); + + XMVECTOR VA = XMVectorMultiply(V, TwoDivPi); + + VA = XMVectorRound(VA); + + XMVECTOR VC = XMVectorNegativeMultiplySubtract(VA, C0, V); + + XMVECTOR VB = XMVectorAbs(VA); + + VC = XMVectorNegativeMultiplySubtract(VA, C1, VC); + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + VB = vcvtq_u32_f32(VB); +#elif defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + reinterpret_cast<__m128i*>(&VB)[0] = _mm_cvttps_epi32(VB); +#else + for (size_t i = 0; i < 4; i++) + { + VB.vector4_u32[i] = static_cast(VB.vector4_f32[i]); + } +#endif + + XMVECTOR VC2 = XMVectorMultiply(VC, VC); + + XMVECTOR T7 = XMVectorSplatW(TanCoefficients1.v); + XMVECTOR T6 = XMVectorSplatZ(TanCoefficients1.v); + XMVECTOR T4 = XMVectorSplatX(TanCoefficients1.v); + XMVECTOR T3 = XMVectorSplatW(TanCoefficients0.v); + XMVECTOR T5 = XMVectorSplatY(TanCoefficients1.v); + XMVECTOR T2 = XMVectorSplatZ(TanCoefficients0.v); + XMVECTOR T1 = XMVectorSplatY(TanCoefficients0.v); + XMVECTOR T0 = XMVectorSplatX(TanCoefficients0.v); + + XMVECTOR VBIsEven = XMVectorAndInt(VB, Mask.v); + VBIsEven = XMVectorEqualInt(VBIsEven, Zero); + + XMVECTOR N = XMVectorMultiplyAdd(VC2, T7, T6); + XMVECTOR D = XMVectorMultiplyAdd(VC2, T4, T3); + N = XMVectorMultiplyAdd(VC2, N, T5); + D = XMVectorMultiplyAdd(VC2, D, T2); + N = XMVectorMultiply(VC2, N); + D = XMVectorMultiplyAdd(VC2, D, T1); + N = XMVectorMultiplyAdd(VC, N, VC); + XMVECTOR VCNearZero = XMVectorInBounds(VC, Epsilon); + D = XMVectorMultiplyAdd(VC2, D, T0); + + N = XMVectorSelect(N, VC, VCNearZero); + D = XMVectorSelect(D, g_XMOne.v, VCNearZero); + + XMVECTOR R0 = XMVectorNegate(N); + XMVECTOR R1 = XMVectorDivide(N, D); + R0 = XMVectorDivide(D, R0); + + XMVECTOR VIsZero = XMVectorEqual(V, Zero); + + XMVECTOR Result = XMVectorSelect(R0, R1, VBIsEven); + + Result = XMVectorSelect(Result, Zero, VIsZero); + + return Result; + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSinH(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sinhf(V.vector4_f32[0]), + sinhf(V.vector4_f32[1]), + sinhf(V.vector4_f32[2]), + sinhf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f) + + XMVECTOR V1 = vmlaq_f32(g_XMNegativeOne.v, V, Scale.v); + XMVECTOR V2 = vmlsq_f32(g_XMNegativeOne.v, V, Scale.v); + XMVECTOR E1 = XMVectorExp(V1); + XMVECTOR E2 = XMVectorExp(V2); + + return vsubq_f32(E1, E2); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f) + + XMVECTOR V1 = XM_FMADD_PS(V, Scale, g_XMNegativeOne); + XMVECTOR V2 = XM_FNMADD_PS(V, Scale, g_XMNegativeOne); + XMVECTOR E1 = XMVectorExp(V1); + XMVECTOR E2 = XMVectorExp(V2); + + return _mm_sub_ps(E1, E2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCosH(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + coshf(V.vector4_f32[0]), + coshf(V.vector4_f32[1]), + coshf(V.vector4_f32[2]), + coshf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f) + + XMVECTOR V1 = vmlaq_f32(g_XMNegativeOne.v, V, Scale.v); + XMVECTOR V2 = vmlsq_f32(g_XMNegativeOne.v, V, Scale.v); + XMVECTOR E1 = XMVectorExp(V1); + XMVECTOR E2 = XMVectorExp(V2); + return vaddq_f32(E1, E2); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f) + + XMVECTOR V1 = XM_FMADD_PS(V, Scale.v, g_XMNegativeOne.v); + XMVECTOR V2 = XM_FNMADD_PS(V, Scale.v, g_XMNegativeOne.v); + XMVECTOR E1 = XMVectorExp(V1); + XMVECTOR E2 = XMVectorExp(V2); + return _mm_add_ps(E1, E2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorTanH(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + tanhf(V.vector4_f32[0]), + tanhf(V.vector4_f32[1]), + tanhf(V.vector4_f32[2]), + tanhf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f } } }; // 2.0f / ln(2.0f) + + XMVECTOR E = vmulq_f32(V, Scale.v); + E = XMVectorExp(E); + E = vmlaq_f32(g_XMOneHalf.v, E, g_XMOneHalf.v); + E = XMVectorReciprocal(E); + return vsubq_f32(g_XMOne.v, E); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f } } }; // 2.0f / ln(2.0f) + + XMVECTOR E = _mm_mul_ps(V, Scale.v); + E = XMVectorExp(E); + E = XM_FMADD_PS(E, g_XMOneHalf.v, g_XMOneHalf.v); + E = _mm_div_ps(g_XMOne.v, E); + return _mm_sub_ps(g_XMOne.v, E); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorASin(FXMVECTOR V) noexcept +{ + // 7-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + asinf(V.vector4_f32[0]), + asinf(V.vector4_f32[1]), + asinf(V.vector4_f32[2]), + asinf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero); + float32x4_t x = vabsq_f32(V); + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + float32x4_t oneMValue = vsubq_f32(g_XMOne, x); + float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue); + float32x4_t root = XMVectorSqrt(clampOneMValue); + + // Compute polynomial approximation + const XMVECTOR AC1 = g_XMArcCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AC1), 0); + XMVECTOR t0 = vmlaq_lane_f32(vConstants, x, vget_high_f32(AC1), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AC1), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC1), 0); + t0 = vmlaq_f32(vConstants, t0, x); + + const XMVECTOR AC0 = g_XMArcCoefficients0; + vConstants = vdupq_lane_f32(vget_high_f32(AC0), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_high_f32(AC0), 0); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC0), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC0), 0); + t0 = vmlaq_f32(vConstants, t0, x); + t0 = vmulq_f32(t0, root); + + float32x4_t t1 = vsubq_f32(g_XMPi, t0); + t0 = vbslq_f32(nonnegative, t0, t1); + t0 = vsubq_f32(g_XMHalfPi, t0); + return t0; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero); + __m128 mvalue = _mm_sub_ps(g_XMZero, V); + __m128 x = _mm_max_ps(V, mvalue); // |V| + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + __m128 oneMValue = _mm_sub_ps(g_XMOne, x); + __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue); + __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|) + + // Compute polynomial approximation + const XMVECTOR AC1 = g_XMArcCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 t0 = XM_FMADD_PS(vConstantsB, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + const XMVECTOR AC0 = g_XMArcCoefficients0; + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(3, 3, 3, 3)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(2, 2, 2, 2)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + t0 = _mm_mul_ps(t0, root); + + __m128 t1 = _mm_sub_ps(g_XMPi, t0); + t0 = _mm_and_ps(nonnegative, t0); + t1 = _mm_andnot_ps(nonnegative, t1); + t0 = _mm_or_ps(t0, t1); + t0 = _mm_sub_ps(g_XMHalfPi, t0); + return t0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorACos(FXMVECTOR V) noexcept +{ + // 7-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + acosf(V.vector4_f32[0]), + acosf(V.vector4_f32[1]), + acosf(V.vector4_f32[2]), + acosf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero); + float32x4_t x = vabsq_f32(V); + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + float32x4_t oneMValue = vsubq_f32(g_XMOne, x); + float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue); + float32x4_t root = XMVectorSqrt(clampOneMValue); + + // Compute polynomial approximation + const XMVECTOR AC1 = g_XMArcCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AC1), 0); + XMVECTOR t0 = vmlaq_lane_f32(vConstants, x, vget_high_f32(AC1), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AC1), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC1), 0); + t0 = vmlaq_f32(vConstants, t0, x); + + const XMVECTOR AC0 = g_XMArcCoefficients0; + vConstants = vdupq_lane_f32(vget_high_f32(AC0), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_high_f32(AC0), 0); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC0), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC0), 0); + t0 = vmlaq_f32(vConstants, t0, x); + t0 = vmulq_f32(t0, root); + + float32x4_t t1 = vsubq_f32(g_XMPi, t0); + t0 = vbslq_f32(nonnegative, t0, t1); + return t0; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero); + __m128 mvalue = _mm_sub_ps(g_XMZero, V); + __m128 x = _mm_max_ps(V, mvalue); // |V| + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + __m128 oneMValue = _mm_sub_ps(g_XMOne, x); + __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue); + __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|) + + // Compute polynomial approximation + const XMVECTOR AC1 = g_XMArcCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 t0 = XM_FMADD_PS(vConstantsB, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + const XMVECTOR AC0 = g_XMArcCoefficients0; + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(3, 3, 3, 3)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(2, 2, 2, 2)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + t0 = _mm_mul_ps(t0, root); + + __m128 t1 = _mm_sub_ps(g_XMPi, t0); + t0 = _mm_and_ps(nonnegative, t0); + t1 = _mm_andnot_ps(nonnegative, t1); + t0 = _mm_or_ps(t0, t1); + return t0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorATan(FXMVECTOR V) noexcept +{ + // 17-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + atanf(V.vector4_f32[0]), + atanf(V.vector4_f32[1]), + atanf(V.vector4_f32[2]), + atanf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t absV = vabsq_f32(V); + float32x4_t invV = XMVectorReciprocal(V); + uint32x4_t comp = vcgtq_f32(V, g_XMOne); + uint32x4_t sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + comp = vcleq_f32(absV, g_XMOne); + sign = vbslq_f32(comp, g_XMZero, sign); + uint32x4_t x = vbslq_f32(comp, V, invV); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR TC1 = g_XMATanCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(TC1), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(TC1), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(TC1), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(TC1), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + const XMVECTOR TC0 = g_XMATanCoefficients0; + vConstants = vdupq_lane_f32(vget_high_f32(TC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_high_f32(TC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(TC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(TC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, x); + + float32x4_t result1 = vmulq_f32(sign, g_XMHalfPi); + result1 = vsubq_f32(result1, Result); + + comp = vceqq_f32(sign, g_XMZero); + Result = vbslq_f32(comp, Result, result1); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 absV = XMVectorAbs(V); + __m128 invV = _mm_div_ps(g_XMOne, V); + __m128 comp = _mm_cmpgt_ps(V, g_XMOne); + __m128 select0 = _mm_and_ps(comp, g_XMOne); + __m128 select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + __m128 sign = _mm_or_ps(select0, select1); + comp = _mm_cmple_ps(absV, g_XMOne); + select0 = _mm_and_ps(comp, g_XMZero); + select1 = _mm_andnot_ps(comp, sign); + sign = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, V); + select1 = _mm_andnot_ps(comp, invV); + __m128 x = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR TC1 = g_XMATanCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(TC1, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(TC1, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC1, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC1, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + const XMVECTOR TC0 = g_XMATanCoefficients0; + vConstants = XM_PERMUTE_PS(TC0, _MM_SHUFFLE(3, 3, 3, 3)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + + Result = _mm_mul_ps(Result, x); + __m128 result1 = _mm_mul_ps(sign, g_XMHalfPi); + result1 = _mm_sub_ps(result1, Result); + + comp = _mm_cmpeq_ps(sign, g_XMZero); + select0 = _mm_and_ps(comp, Result); + select1 = _mm_andnot_ps(comp, result1); + Result = _mm_or_ps(select0, select1); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorATan2 +( + FXMVECTOR Y, + FXMVECTOR X +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + atan2f(Y.vector4_f32[0], X.vector4_f32[0]), + atan2f(Y.vector4_f32[1], X.vector4_f32[1]), + atan2f(Y.vector4_f32[2], X.vector4_f32[2]), + atan2f(Y.vector4_f32[3], X.vector4_f32[3]) + } } }; + return Result.v; +#else + + // Return the inverse tangent of Y / X in the range of -Pi to Pi with the following exceptions: + + // Y == 0 and X is Negative -> Pi with the sign of Y + // y == 0 and x is positive -> 0 with the sign of y + // Y != 0 and X == 0 -> Pi / 2 with the sign of Y + // Y != 0 and X is Negative -> atan(y/x) + (PI with the sign of Y) + // X == -Infinity and Finite Y -> Pi with the sign of Y + // X == +Infinity and Finite Y -> 0 with the sign of Y + // Y == Infinity and X is Finite -> Pi / 2 with the sign of Y + // Y == Infinity and X == -Infinity -> 3Pi / 4 with the sign of Y + // Y == Infinity and X == +Infinity -> Pi / 4 with the sign of Y + + static const XMVECTORF32 ATan2Constants = { { { XM_PI, XM_PIDIV2, XM_PIDIV4, XM_PI * 3.0f / 4.0f } } }; + + XMVECTOR Zero = XMVectorZero(); + XMVECTOR ATanResultValid = XMVectorTrueInt(); + + XMVECTOR Pi = XMVectorSplatX(ATan2Constants); + XMVECTOR PiOverTwo = XMVectorSplatY(ATan2Constants); + XMVECTOR PiOverFour = XMVectorSplatZ(ATan2Constants); + XMVECTOR ThreePiOverFour = XMVectorSplatW(ATan2Constants); + + XMVECTOR YEqualsZero = XMVectorEqual(Y, Zero); + XMVECTOR XEqualsZero = XMVectorEqual(X, Zero); + XMVECTOR XIsPositive = XMVectorAndInt(X, g_XMNegativeZero.v); + XIsPositive = XMVectorEqualInt(XIsPositive, Zero); + XMVECTOR YEqualsInfinity = XMVectorIsInfinite(Y); + XMVECTOR XEqualsInfinity = XMVectorIsInfinite(X); + + XMVECTOR YSign = XMVectorAndInt(Y, g_XMNegativeZero.v); + Pi = XMVectorOrInt(Pi, YSign); + PiOverTwo = XMVectorOrInt(PiOverTwo, YSign); + PiOverFour = XMVectorOrInt(PiOverFour, YSign); + ThreePiOverFour = XMVectorOrInt(ThreePiOverFour, YSign); + + XMVECTOR R1 = XMVectorSelect(Pi, YSign, XIsPositive); + XMVECTOR R2 = XMVectorSelect(ATanResultValid, PiOverTwo, XEqualsZero); + XMVECTOR R3 = XMVectorSelect(R2, R1, YEqualsZero); + XMVECTOR R4 = XMVectorSelect(ThreePiOverFour, PiOverFour, XIsPositive); + XMVECTOR R5 = XMVectorSelect(PiOverTwo, R4, XEqualsInfinity); + XMVECTOR Result = XMVectorSelect(R3, R5, YEqualsInfinity); + ATanResultValid = XMVectorEqualInt(Result, ATanResultValid); + + XMVECTOR V = XMVectorDivide(Y, X); + + XMVECTOR R0 = XMVectorATan(V); + + R1 = XMVectorSelect(Pi, g_XMNegativeZero, XIsPositive); + R2 = XMVectorAdd(R0, R1); + + return XMVectorSelect(Result, R2, ATanResultValid); + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSinEst(FXMVECTOR V) noexcept +{ + // 7-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sinf(V.vector4_f32[0]), + sinf(V.vector4_f32[1]), + sinf(V.vector4_f32[2]), + sinf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR SEC = g_XMSinCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SEC), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(SEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(SEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, x); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x). + __m128 sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR SEC = g_XMSinCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, x); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCosEst(FXMVECTOR V) noexcept +{ + // 6-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + cosf(V.vector4_f32[0]), + cosf(V.vector4_f32[1]), + cosf(V.vector4_f32[2]), + cosf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Map V to x in [-pi,pi]. + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR CEC = g_XMCosCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(CEC), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(CEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(CEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, sign); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + // Map V to x in [-pi,pi]. + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, g_XMOne); + select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + sign = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR CEC = g_XMCosCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, sign); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorSinCosEst +( + XMVECTOR* pSin, + XMVECTOR* pCos, + FXMVECTOR V +) noexcept +{ + assert(pSin != nullptr); + assert(pCos != nullptr); + + // 7/6-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Sin = { { { + sinf(V.vector4_f32[0]), + sinf(V.vector4_f32[1]), + sinf(V.vector4_f32[2]), + sinf(V.vector4_f32[3]) + } } }; + + XMVECTORF32 Cos = { { { + cosf(V.vector4_f32[0]), + cosf(V.vector4_f32[1]), + cosf(V.vector4_f32[2]), + cosf(V.vector4_f32[3]) + } } }; + + *pSin = Sin.v; + *pCos = Cos.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation for sine + const XMVECTOR SEC = g_XMSinCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SEC), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(SEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(SEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + *pSin = vmulq_f32(Result, x); + + // Compute polynomial approximation + const XMVECTOR CEC = g_XMCosCoefficients1; + vConstants = vdupq_lane_f32(vget_high_f32(CEC), 0); + Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(CEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(CEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + *pCos = vmulq_f32(Result, sign); +#elif defined(_XM_SSE_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x), cos(y) = sign*cos(x). + XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, g_XMOne); + select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + sign = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation for sine + const XMVECTOR SEC = g_XMSinCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, x); + *pSin = Result; + + // Compute polynomial approximation for cosine + const XMVECTOR CEC = g_XMCosCoefficients1; + vConstantsB = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(3, 3, 3, 3)); + vConstants = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, sign); + *pCos = Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorTanEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + tanf(V.vector4_f32[0]), + tanf(V.vector4_f32[1]), + tanf(V.vector4_f32[2]), + tanf(V.vector4_f32[3]) + } } }; + return Result.v; +#else + + XMVECTOR OneOverPi = XMVectorSplatW(g_XMTanEstCoefficients.v); + + XMVECTOR V1 = XMVectorMultiply(V, OneOverPi); + V1 = XMVectorRound(V1); + + V1 = XMVectorNegativeMultiplySubtract(g_XMPi.v, V1, V); + + XMVECTOR T0 = XMVectorSplatX(g_XMTanEstCoefficients.v); + XMVECTOR T1 = XMVectorSplatY(g_XMTanEstCoefficients.v); + XMVECTOR T2 = XMVectorSplatZ(g_XMTanEstCoefficients.v); + + XMVECTOR V2T2 = XMVectorNegativeMultiplySubtract(V1, V1, T2); + XMVECTOR V2 = XMVectorMultiply(V1, V1); + XMVECTOR V1T0 = XMVectorMultiply(V1, T0); + XMVECTOR V1T1 = XMVectorMultiply(V1, T1); + + XMVECTOR D = XMVectorReciprocalEst(V2T2); + XMVECTOR N = XMVectorMultiplyAdd(V2, V1T1, V1T0); + + return XMVectorMultiply(N, D); + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorASinEst(FXMVECTOR V) noexcept +{ + // 3-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result; + Result.f[0] = asinf(V.vector4_f32[0]); + Result.f[1] = asinf(V.vector4_f32[1]); + Result.f[2] = asinf(V.vector4_f32[2]); + Result.f[3] = asinf(V.vector4_f32[3]); + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero); + float32x4_t x = vabsq_f32(V); + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + float32x4_t oneMValue = vsubq_f32(g_XMOne, x); + float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue); + float32x4_t root = XMVectorSqrt(clampOneMValue); + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMArcEstCoefficients; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0); + XMVECTOR t0 = vmlaq_lane_f32(vConstants, x, vget_high_f32(AEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 0); + t0 = vmlaq_f32(vConstants, t0, x); + t0 = vmulq_f32(t0, root); + + float32x4_t t1 = vsubq_f32(g_XMPi, t0); + t0 = vbslq_f32(nonnegative, t0, t1); + t0 = vsubq_f32(g_XMHalfPi, t0); + return t0; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero); + __m128 mvalue = _mm_sub_ps(g_XMZero, V); + __m128 x = _mm_max_ps(V, mvalue); // |V| + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + __m128 oneMValue = _mm_sub_ps(g_XMOne, x); + __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue); + __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|) + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMArcEstCoefficients; + __m128 vConstantsB = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 t0 = XM_FMADD_PS(vConstantsB, x, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + t0 = _mm_mul_ps(t0, root); + + __m128 t1 = _mm_sub_ps(g_XMPi, t0); + t0 = _mm_and_ps(nonnegative, t0); + t1 = _mm_andnot_ps(nonnegative, t1); + t0 = _mm_or_ps(t0, t1); + t0 = _mm_sub_ps(g_XMHalfPi, t0); + return t0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorACosEst(FXMVECTOR V) noexcept +{ + // 3-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + acosf(V.vector4_f32[0]), + acosf(V.vector4_f32[1]), + acosf(V.vector4_f32[2]), + acosf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero); + float32x4_t x = vabsq_f32(V); + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + float32x4_t oneMValue = vsubq_f32(g_XMOne, x); + float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue); + float32x4_t root = XMVectorSqrt(clampOneMValue); + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMArcEstCoefficients; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0); + XMVECTOR t0 = vmlaq_lane_f32(vConstants, x, vget_high_f32(AEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 0); + t0 = vmlaq_f32(vConstants, t0, x); + t0 = vmulq_f32(t0, root); + + float32x4_t t1 = vsubq_f32(g_XMPi, t0); + t0 = vbslq_f32(nonnegative, t0, t1); + return t0; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero); + __m128 mvalue = _mm_sub_ps(g_XMZero, V); + __m128 x = _mm_max_ps(V, mvalue); // |V| + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + __m128 oneMValue = _mm_sub_ps(g_XMOne, x); + __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue); + __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|) + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMArcEstCoefficients; + __m128 vConstantsB = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 t0 = XM_FMADD_PS(vConstantsB, x, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + t0 = _mm_mul_ps(t0, root); + + __m128 t1 = _mm_sub_ps(g_XMPi, t0); + t0 = _mm_and_ps(nonnegative, t0); + t1 = _mm_andnot_ps(nonnegative, t1); + t0 = _mm_or_ps(t0, t1); + return t0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorATanEst(FXMVECTOR V) noexcept +{ + // 9-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + atanf(V.vector4_f32[0]), + atanf(V.vector4_f32[1]), + atanf(V.vector4_f32[2]), + atanf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t absV = vabsq_f32(V); + float32x4_t invV = XMVectorReciprocalEst(V); + uint32x4_t comp = vcgtq_f32(V, g_XMOne); + uint32x4_t sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + comp = vcleq_f32(absV, g_XMOne); + sign = vbslq_f32(comp, g_XMZero, sign); + uint32x4_t x = vbslq_f32(comp, V, invV); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMATanEstCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(AEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + // ATanEstCoefficients0 is already splatted + Result = vmlaq_f32(g_XMATanEstCoefficients0, Result, x2); + Result = vmulq_f32(Result, x); + + float32x4_t result1 = vmulq_f32(sign, g_XMHalfPi); + result1 = vsubq_f32(result1, Result); + + comp = vceqq_f32(sign, g_XMZero); + Result = vbslq_f32(comp, Result, result1); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 absV = XMVectorAbs(V); + __m128 invV = _mm_div_ps(g_XMOne, V); + __m128 comp = _mm_cmpgt_ps(V, g_XMOne); + __m128 select0 = _mm_and_ps(comp, g_XMOne); + __m128 select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + __m128 sign = _mm_or_ps(select0, select1); + comp = _mm_cmple_ps(absV, g_XMOne); + select0 = _mm_and_ps(comp, g_XMZero); + select1 = _mm_andnot_ps(comp, sign); + sign = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, V); + select1 = _mm_andnot_ps(comp, invV); + __m128 x = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMATanEstCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + // ATanEstCoefficients0 is already splatted + Result = XM_FMADD_PS(Result, x2, g_XMATanEstCoefficients0); + Result = _mm_mul_ps(Result, x); + __m128 result1 = _mm_mul_ps(sign, g_XMHalfPi); + result1 = _mm_sub_ps(result1, Result); + + comp = _mm_cmpeq_ps(sign, g_XMZero); + select0 = _mm_and_ps(comp, Result); + select1 = _mm_andnot_ps(comp, result1); + Result = _mm_or_ps(select0, select1); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorATan2Est +( + FXMVECTOR Y, + FXMVECTOR X +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + atan2f(Y.vector4_f32[0], X.vector4_f32[0]), + atan2f(Y.vector4_f32[1], X.vector4_f32[1]), + atan2f(Y.vector4_f32[2], X.vector4_f32[2]), + atan2f(Y.vector4_f32[3], X.vector4_f32[3]), + } } }; + return Result.v; +#else + + static const XMVECTORF32 ATan2Constants = { { { XM_PI, XM_PIDIV2, XM_PIDIV4, 2.3561944905f /* Pi*3/4 */ } } }; + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR ATanResultValid = XMVectorTrueInt(); + + XMVECTOR Pi = XMVectorSplatX(ATan2Constants); + XMVECTOR PiOverTwo = XMVectorSplatY(ATan2Constants); + XMVECTOR PiOverFour = XMVectorSplatZ(ATan2Constants); + XMVECTOR ThreePiOverFour = XMVectorSplatW(ATan2Constants); + + XMVECTOR YEqualsZero = XMVectorEqual(Y, Zero); + XMVECTOR XEqualsZero = XMVectorEqual(X, Zero); + XMVECTOR XIsPositive = XMVectorAndInt(X, g_XMNegativeZero.v); + XIsPositive = XMVectorEqualInt(XIsPositive, Zero); + XMVECTOR YEqualsInfinity = XMVectorIsInfinite(Y); + XMVECTOR XEqualsInfinity = XMVectorIsInfinite(X); + + XMVECTOR YSign = XMVectorAndInt(Y, g_XMNegativeZero.v); + Pi = XMVectorOrInt(Pi, YSign); + PiOverTwo = XMVectorOrInt(PiOverTwo, YSign); + PiOverFour = XMVectorOrInt(PiOverFour, YSign); + ThreePiOverFour = XMVectorOrInt(ThreePiOverFour, YSign); + + XMVECTOR R1 = XMVectorSelect(Pi, YSign, XIsPositive); + XMVECTOR R2 = XMVectorSelect(ATanResultValid, PiOverTwo, XEqualsZero); + XMVECTOR R3 = XMVectorSelect(R2, R1, YEqualsZero); + XMVECTOR R4 = XMVectorSelect(ThreePiOverFour, PiOverFour, XIsPositive); + XMVECTOR R5 = XMVectorSelect(PiOverTwo, R4, XEqualsInfinity); + XMVECTOR Result = XMVectorSelect(R3, R5, YEqualsInfinity); + ATanResultValid = XMVectorEqualInt(Result, ATanResultValid); + + XMVECTOR Reciprocal = XMVectorReciprocalEst(X); + XMVECTOR V = XMVectorMultiply(Y, Reciprocal); + XMVECTOR R0 = XMVectorATanEst(V); + + R1 = XMVectorSelect(Pi, g_XMNegativeZero, XIsPositive); + R2 = XMVectorAdd(R0, R1); + + Result = XMVectorSelect(Result, R2, ATanResultValid); + + return Result; + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLerp +( + FXMVECTOR V0, + FXMVECTOR V1, + float t +) noexcept +{ + // V0 + t * (V1 - V0) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Scale = XMVectorReplicate(t); + XMVECTOR Length = XMVectorSubtract(V1, V0); + return XMVectorMultiplyAdd(Length, Scale, V0); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR L = vsubq_f32(V1, V0); + return vmlaq_n_f32(V0, L, t); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR L = _mm_sub_ps(V1, V0); + XMVECTOR S = _mm_set_ps1(t); + return XM_FMADD_PS(L, S, V0); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLerpV +( + FXMVECTOR V0, + FXMVECTOR V1, + FXMVECTOR T +) noexcept +{ + // V0 + T * (V1 - V0) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Length = XMVectorSubtract(V1, V0); + return XMVectorMultiplyAdd(Length, T, V0); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR L = vsubq_f32(V1, V0); + return vmlaq_f32(V0, L, T); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR Length = _mm_sub_ps(V1, V0); + return XM_FMADD_PS(Length, T, V0); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorHermite +( + FXMVECTOR Position0, + FXMVECTOR Tangent0, + FXMVECTOR Position1, + GXMVECTOR Tangent1, + float t +) noexcept +{ + // Result = (2 * t^3 - 3 * t^2 + 1) * Position0 + + // (t^3 - 2 * t^2 + t) * Tangent0 + + // (-2 * t^3 + 3 * t^2) * Position1 + + // (t^3 - t^2) * Tangent1 + +#if defined(_XM_NO_INTRINSICS_) + + float t2 = t * t; + float t3 = t * t2; + + XMVECTOR P0 = XMVectorReplicate(2.0f * t3 - 3.0f * t2 + 1.0f); + XMVECTOR T0 = XMVectorReplicate(t3 - 2.0f * t2 + t); + XMVECTOR P1 = XMVectorReplicate(-2.0f * t3 + 3.0f * t2); + XMVECTOR T1 = XMVectorReplicate(t3 - t2); + + XMVECTOR Result = XMVectorMultiply(P0, Position0); + Result = XMVectorMultiplyAdd(T0, Tangent0, Result); + Result = XMVectorMultiplyAdd(P1, Position1, Result); + Result = XMVectorMultiplyAdd(T1, Tangent1, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float t2 = t * t; + float t3 = t * t2; + + float p0 = 2.0f * t3 - 3.0f * t2 + 1.0f; + float t0 = t3 - 2.0f * t2 + t; + float p1 = -2.0f * t3 + 3.0f * t2; + float t1 = t3 - t2; + + XMVECTOR vResult = vmulq_n_f32(Position0, p0); + vResult = vmlaq_n_f32(vResult, Tangent0, t0); + vResult = vmlaq_n_f32(vResult, Position1, p1); + vResult = vmlaq_n_f32(vResult, Tangent1, t1); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + float t2 = t * t; + float t3 = t * t2; + + XMVECTOR P0 = _mm_set_ps1(2.0f * t3 - 3.0f * t2 + 1.0f); + XMVECTOR T0 = _mm_set_ps1(t3 - 2.0f * t2 + t); + XMVECTOR P1 = _mm_set_ps1(-2.0f * t3 + 3.0f * t2); + XMVECTOR T1 = _mm_set_ps1(t3 - t2); + + XMVECTOR vResult = _mm_mul_ps(P0, Position0); + vResult = XM_FMADD_PS(Tangent0, T0, vResult); + vResult = XM_FMADD_PS(Position1, P1, vResult); + vResult = XM_FMADD_PS(Tangent1, T1, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorHermiteV +( + FXMVECTOR Position0, + FXMVECTOR Tangent0, + FXMVECTOR Position1, + GXMVECTOR Tangent1, + HXMVECTOR T +) noexcept +{ + // Result = (2 * t^3 - 3 * t^2 + 1) * Position0 + + // (t^3 - 2 * t^2 + t) * Tangent0 + + // (-2 * t^3 + 3 * t^2) * Position1 + + // (t^3 - t^2) * Tangent1 + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR T2 = XMVectorMultiply(T, T); + XMVECTOR T3 = XMVectorMultiply(T, T2); + + XMVECTOR P0 = XMVectorReplicate(2.0f * T3.vector4_f32[0] - 3.0f * T2.vector4_f32[0] + 1.0f); + XMVECTOR T0 = XMVectorReplicate(T3.vector4_f32[1] - 2.0f * T2.vector4_f32[1] + T.vector4_f32[1]); + XMVECTOR P1 = XMVectorReplicate(-2.0f * T3.vector4_f32[2] + 3.0f * T2.vector4_f32[2]); + XMVECTOR T1 = XMVectorReplicate(T3.vector4_f32[3] - T2.vector4_f32[3]); + + XMVECTOR Result = XMVectorMultiply(P0, Position0); + Result = XMVectorMultiplyAdd(T0, Tangent0, Result); + Result = XMVectorMultiplyAdd(P1, Position1, Result); + Result = XMVectorMultiplyAdd(T1, Tangent1, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 CatMulT2 = { { { -3.0f, -2.0f, 3.0f, -1.0f } } }; + static const XMVECTORF32 CatMulT3 = { { { 2.0f, 1.0f, -2.0f, 1.0f } } }; + + XMVECTOR T2 = vmulq_f32(T, T); + XMVECTOR T3 = vmulq_f32(T, T2); + // Mul by the constants against t^2 + T2 = vmulq_f32(T2, CatMulT2); + // Mul by the constants against t^3 + T3 = vmlaq_f32(T2, T3, CatMulT3); + // T3 now has the pre-result. + // I need to add t.y only + T2 = vandq_u32(T, g_XMMaskY); + T3 = vaddq_f32(T3, T2); + // Add 1.0f to x + T3 = vaddq_f32(T3, g_XMIdentityR0); + // Now, I have the constants created + // Mul the x constant to Position0 + XMVECTOR vResult = vmulq_lane_f32(Position0, vget_low_f32(T3), 0); // T3[0] + // Mul the y constant to Tangent0 + vResult = vmlaq_lane_f32(vResult, Tangent0, vget_low_f32(T3), 1); // T3[1] + // Mul the z constant to Position1 + vResult = vmlaq_lane_f32(vResult, Position1, vget_high_f32(T3), 0); // T3[2] + // Mul the w constant to Tangent1 + vResult = vmlaq_lane_f32(vResult, Tangent1, vget_high_f32(T3), 1); // T3[3] + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 CatMulT2 = { { { -3.0f, -2.0f, 3.0f, -1.0f } } }; + static const XMVECTORF32 CatMulT3 = { { { 2.0f, 1.0f, -2.0f, 1.0f } } }; + + XMVECTOR T2 = _mm_mul_ps(T, T); + XMVECTOR T3 = _mm_mul_ps(T, T2); + // Mul by the constants against t^2 + T2 = _mm_mul_ps(T2, CatMulT2); + // Mul by the constants against t^3 + T3 = XM_FMADD_PS(T3, CatMulT3, T2); + // T3 now has the pre-result. + // I need to add t.y only + T2 = _mm_and_ps(T, g_XMMaskY); + T3 = _mm_add_ps(T3, T2); + // Add 1.0f to x + T3 = _mm_add_ps(T3, g_XMIdentityR0); + // Now, I have the constants created + // Mul the x constant to Position0 + XMVECTOR vResult = XM_PERMUTE_PS(T3, _MM_SHUFFLE(0, 0, 0, 0)); + vResult = _mm_mul_ps(vResult, Position0); + // Mul the y constant to Tangent0 + T2 = XM_PERMUTE_PS(T3, _MM_SHUFFLE(1, 1, 1, 1)); + vResult = XM_FMADD_PS(T2, Tangent0, vResult); + // Mul the z constant to Position1 + T2 = XM_PERMUTE_PS(T3, _MM_SHUFFLE(2, 2, 2, 2)); + vResult = XM_FMADD_PS(T2, Position1, vResult); + // Mul the w constant to Tangent1 + T3 = XM_PERMUTE_PS(T3, _MM_SHUFFLE(3, 3, 3, 3)); + vResult = XM_FMADD_PS(T3, Tangent1, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCatmullRom +( + FXMVECTOR Position0, + FXMVECTOR Position1, + FXMVECTOR Position2, + GXMVECTOR Position3, + float t +) noexcept +{ + // Result = ((-t^3 + 2 * t^2 - t) * Position0 + + // (3 * t^3 - 5 * t^2 + 2) * Position1 + + // (-3 * t^3 + 4 * t^2 + t) * Position2 + + // (t^3 - t^2) * Position3) * 0.5 + +#if defined(_XM_NO_INTRINSICS_) + + float t2 = t * t; + float t3 = t * t2; + + XMVECTOR P0 = XMVectorReplicate((-t3 + 2.0f * t2 - t) * 0.5f); + XMVECTOR P1 = XMVectorReplicate((3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f); + XMVECTOR P2 = XMVectorReplicate((-3.0f * t3 + 4.0f * t2 + t) * 0.5f); + XMVECTOR P3 = XMVectorReplicate((t3 - t2) * 0.5f); + + XMVECTOR Result = XMVectorMultiply(P0, Position0); + Result = XMVectorMultiplyAdd(P1, Position1, Result); + Result = XMVectorMultiplyAdd(P2, Position2, Result); + Result = XMVectorMultiplyAdd(P3, Position3, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float t2 = t * t; + float t3 = t * t2; + + float p0 = (-t3 + 2.0f * t2 - t) * 0.5f; + float p1 = (3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f; + float p2 = (-3.0f * t3 + 4.0f * t2 + t) * 0.5f; + float p3 = (t3 - t2) * 0.5f; + + XMVECTOR P1 = vmulq_n_f32(Position1, p1); + XMVECTOR P0 = vmlaq_n_f32(P1, Position0, p0); + XMVECTOR P3 = vmulq_n_f32(Position3, p3); + XMVECTOR P2 = vmlaq_n_f32(P3, Position2, p2); + P0 = vaddq_f32(P0, P2); + return P0; +#elif defined(_XM_SSE_INTRINSICS_) + float t2 = t * t; + float t3 = t * t2; + + XMVECTOR P0 = _mm_set_ps1((-t3 + 2.0f * t2 - t) * 0.5f); + XMVECTOR P1 = _mm_set_ps1((3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f); + XMVECTOR P2 = _mm_set_ps1((-3.0f * t3 + 4.0f * t2 + t) * 0.5f); + XMVECTOR P3 = _mm_set_ps1((t3 - t2) * 0.5f); + + P1 = _mm_mul_ps(Position1, P1); + P0 = XM_FMADD_PS(Position0, P0, P1); + P3 = _mm_mul_ps(Position3, P3); + P2 = XM_FMADD_PS(Position2, P2, P3); + P0 = _mm_add_ps(P0, P2); + return P0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCatmullRomV +( + FXMVECTOR Position0, + FXMVECTOR Position1, + FXMVECTOR Position2, + GXMVECTOR Position3, + HXMVECTOR T +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fx = T.vector4_f32[0]; + float fy = T.vector4_f32[1]; + float fz = T.vector4_f32[2]; + float fw = T.vector4_f32[3]; + XMVECTORF32 vResult = { { { + 0.5f * ((-fx * fx * fx + 2 * fx * fx - fx) * Position0.vector4_f32[0] + + (3 * fx * fx * fx - 5 * fx * fx + 2) * Position1.vector4_f32[0] + + (-3 * fx * fx * fx + 4 * fx * fx + fx) * Position2.vector4_f32[0] + + (fx * fx * fx - fx * fx) * Position3.vector4_f32[0]), + + 0.5f * ((-fy * fy * fy + 2 * fy * fy - fy) * Position0.vector4_f32[1] + + (3 * fy * fy * fy - 5 * fy * fy + 2) * Position1.vector4_f32[1] + + (-3 * fy * fy * fy + 4 * fy * fy + fy) * Position2.vector4_f32[1] + + (fy * fy * fy - fy * fy) * Position3.vector4_f32[1]), + + 0.5f * ((-fz * fz * fz + 2 * fz * fz - fz) * Position0.vector4_f32[2] + + (3 * fz * fz * fz - 5 * fz * fz + 2) * Position1.vector4_f32[2] + + (-3 * fz * fz * fz + 4 * fz * fz + fz) * Position2.vector4_f32[2] + + (fz * fz * fz - fz * fz) * Position3.vector4_f32[2]), + + 0.5f * ((-fw * fw * fw + 2 * fw * fw - fw) * Position0.vector4_f32[3] + + (3 * fw * fw * fw - 5 * fw * fw + 2) * Position1.vector4_f32[3] + + (-3 * fw * fw * fw + 4 * fw * fw + fw) * Position2.vector4_f32[3] + + (fw * fw * fw - fw * fw) * Position3.vector4_f32[3]) + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Catmul2 = { { { 2.0f, 2.0f, 2.0f, 2.0f } } }; + static const XMVECTORF32 Catmul3 = { { { 3.0f, 3.0f, 3.0f, 3.0f } } }; + static const XMVECTORF32 Catmul4 = { { { 4.0f, 4.0f, 4.0f, 4.0f } } }; + static const XMVECTORF32 Catmul5 = { { { 5.0f, 5.0f, 5.0f, 5.0f } } }; + // Cache T^2 and T^3 + XMVECTOR T2 = vmulq_f32(T, T); + XMVECTOR T3 = vmulq_f32(T, T2); + // Perform the Position0 term + XMVECTOR vResult = vaddq_f32(T2, T2); + vResult = vsubq_f32(vResult, T); + vResult = vsubq_f32(vResult, T3); + vResult = vmulq_f32(vResult, Position0); + // Perform the Position1 term and add + XMVECTOR vTemp = vmulq_f32(T3, Catmul3); + vTemp = vmlsq_f32(vTemp, T2, Catmul5); + vTemp = vaddq_f32(vTemp, Catmul2); + vResult = vmlaq_f32(vResult, vTemp, Position1); + // Perform the Position2 term and add + vTemp = vmulq_f32(T2, Catmul4); + vTemp = vmlsq_f32(vTemp, T3, Catmul3); + vTemp = vaddq_f32(vTemp, T); + vResult = vmlaq_f32(vResult, vTemp, Position2); + // Position3 is the last term + T3 = vsubq_f32(T3, T2); + vResult = vmlaq_f32(vResult, T3, Position3); + // Multiply by 0.5f and exit + vResult = vmulq_f32(vResult, g_XMOneHalf); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Catmul2 = { { { 2.0f, 2.0f, 2.0f, 2.0f } } }; + static const XMVECTORF32 Catmul3 = { { { 3.0f, 3.0f, 3.0f, 3.0f } } }; + static const XMVECTORF32 Catmul4 = { { { 4.0f, 4.0f, 4.0f, 4.0f } } }; + static const XMVECTORF32 Catmul5 = { { { 5.0f, 5.0f, 5.0f, 5.0f } } }; + // Cache T^2 and T^3 + XMVECTOR T2 = _mm_mul_ps(T, T); + XMVECTOR T3 = _mm_mul_ps(T, T2); + // Perform the Position0 term + XMVECTOR vResult = _mm_add_ps(T2, T2); + vResult = _mm_sub_ps(vResult, T); + vResult = _mm_sub_ps(vResult, T3); + vResult = _mm_mul_ps(vResult, Position0); + // Perform the Position1 term and add + XMVECTOR vTemp = _mm_mul_ps(T3, Catmul3); + vTemp = XM_FNMADD_PS(T2, Catmul5, vTemp); + vTemp = _mm_add_ps(vTemp, Catmul2); + vResult = XM_FMADD_PS(vTemp, Position1, vResult); + // Perform the Position2 term and add + vTemp = _mm_mul_ps(T2, Catmul4); + vTemp = XM_FNMADD_PS(T3, Catmul3, vTemp); + vTemp = _mm_add_ps(vTemp, T); + vResult = XM_FMADD_PS(vTemp, Position2, vResult); + // Position3 is the last term + T3 = _mm_sub_ps(T3, T2); + vResult = XM_FMADD_PS(T3, Position3, vResult); + // Multiply by 0.5f and exit + vResult = _mm_mul_ps(vResult, g_XMOneHalf); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorBaryCentric +( + FXMVECTOR Position0, + FXMVECTOR Position1, + FXMVECTOR Position2, + float f, + float g +) noexcept +{ + // Result = Position0 + f * (Position1 - Position0) + g * (Position2 - Position0) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR P10 = XMVectorSubtract(Position1, Position0); + XMVECTOR ScaleF = XMVectorReplicate(f); + + XMVECTOR P20 = XMVectorSubtract(Position2, Position0); + XMVECTOR ScaleG = XMVectorReplicate(g); + + XMVECTOR Result = XMVectorMultiplyAdd(P10, ScaleF, Position0); + Result = XMVectorMultiplyAdd(P20, ScaleG, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR R1 = vsubq_f32(Position1, Position0); + XMVECTOR R2 = vsubq_f32(Position2, Position0); + R1 = vmlaq_n_f32(Position0, R1, f); + return vmlaq_n_f32(R1, R2, g); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR R1 = _mm_sub_ps(Position1, Position0); + XMVECTOR R2 = _mm_sub_ps(Position2, Position0); + XMVECTOR SF = _mm_set_ps1(f); + R1 = XM_FMADD_PS(R1, SF, Position0); + XMVECTOR SG = _mm_set_ps1(g); + return XM_FMADD_PS(R2, SG, R1); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorBaryCentricV +( + FXMVECTOR Position0, + FXMVECTOR Position1, + FXMVECTOR Position2, + GXMVECTOR F, + HXMVECTOR G +) noexcept +{ + // Result = Position0 + f * (Position1 - Position0) + g * (Position2 - Position0) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR P10 = XMVectorSubtract(Position1, Position0); + XMVECTOR P20 = XMVectorSubtract(Position2, Position0); + + XMVECTOR Result = XMVectorMultiplyAdd(P10, F, Position0); + Result = XMVectorMultiplyAdd(P20, G, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR R1 = vsubq_f32(Position1, Position0); + XMVECTOR R2 = vsubq_f32(Position2, Position0); + R1 = vmlaq_f32(Position0, R1, F); + return vmlaq_f32(R1, R2, G); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR R1 = _mm_sub_ps(Position1, Position0); + XMVECTOR R2 = _mm_sub_ps(Position2, Position0); + R1 = XM_FMADD_PS(R1, F, Position0); + return XM_FMADD_PS(R2, G, R1); +#endif +} + +/**************************************************************************** + * + * 2D Vector + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2Equal +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + // z and w are don't care + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector2EqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_f32[0] == V2.vector4_f32[0]) && + (V1.vector4_f32[1] == V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) && + (V1.vector4_f32[1] != V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_f32(vget_low_f32(V1), vget_low_f32(V2)); + uint64_t r = vget_lane_u64(vTemp, 0); + uint32_t CR = 0; + if (r == 0xFFFFFFFFFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + // z and w are don't care + int iTest = _mm_movemask_ps(vTemp) & 3; + uint32_t CR = 0; + if (iTest == 3) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2EqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_u32(vget_low_u32(V1), vget_low_u32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector2EqualIntR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_u32[0] == V2.vector4_u32[0]) && + (V1.vector4_u32[1] == V2.vector4_u32[1])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_u32[0] != V2.vector4_u32[0]) && + (V1.vector4_u32[1] != V2.vector4_u32[1])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_u32(vget_low_u32(V1), vget_low_u32(V2)); + uint64_t r = vget_lane_u64(vTemp, 0); + uint32_t CR = 0; + if (r == 0xFFFFFFFFFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + int iTest = _mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3; + uint32_t CR = 0; + if (iTest == 3) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2NearEqual +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Epsilon +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float dx = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]); + float dy = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]); + return ((dx <= Epsilon.vector4_f32[0]) && + (dy <= Epsilon.vector4_f32[1])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t vDelta = vsub_f32(vget_low_u32(V1), vget_low_u32(V2)); +#ifdef _MSC_VER + uint32x2_t vTemp = vacle_f32(vDelta, vget_low_u32(Epsilon)); +#else + uint32x2_t vTemp = vcle_f32(vabs_f32(vDelta), vget_low_u32(Epsilon)); +#endif + uint64_t r = vget_lane_u64(vTemp, 0); + return (r == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Get the difference + XMVECTOR vDelta = _mm_sub_ps(V1, V2); + // Get the absolute value of the difference + XMVECTOR vTemp = _mm_setzero_ps(); + vTemp = _mm_sub_ps(vTemp, vDelta); + vTemp = _mm_max_ps(vTemp, vDelta); + vTemp = _mm_cmple_ps(vTemp, Epsilon); + // z and w are don't care + return (((_mm_movemask_ps(vTemp) & 3) == 0x3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2NotEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) != 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + // z and w are don't care + return (((_mm_movemask_ps(vTemp) & 3) != 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2NotEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_u32(vget_low_u32(V1), vget_low_u32(V2)); + return (vget_lane_u64(vTemp, 0) != 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3) != 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2Greater +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcgt_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + // z and w are don't care + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector2GreaterR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_f32[0] > V2.vector4_f32[0]) && + (V1.vector4_f32[1] > V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] <= V2.vector4_f32[0]) && + (V1.vector4_f32[1] <= V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcgt_f32(vget_low_f32(V1), vget_low_f32(V2)); + uint64_t r = vget_lane_u64(vTemp, 0); + uint32_t CR = 0; + if (r == 0xFFFFFFFFFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp) & 3; + uint32_t CR = 0; + if (iTest == 3) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2GreaterOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcge_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector2GreaterOrEqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) && + (V1.vector4_f32[1] >= V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) && + (V1.vector4_f32[1] < V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcge_f32(vget_low_f32(V1), vget_low_f32(V2)); + uint64_t r = vget_lane_u64(vTemp, 0); + uint32_t CR = 0; + if (r == 0xFFFFFFFFFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp) & 3; + uint32_t CR = 0; + if (iTest == 3) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2Less +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vclt_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmplt_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2LessOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcle_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmple_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2InBounds +( + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) && + (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + float32x2_t B = vget_low_f32(Bounds); + // Test if less than or equal + uint32x2_t ivTemp1 = vcle_f32(VL, B); + // Negate the bounds + float32x2_t vTemp2 = vneg_f32(B); + // Test if greater or equal (Reversed) + uint32x2_t ivTemp2 = vcle_f32(vTemp2, VL); + // Blend answers + ivTemp1 = vand_u32(ivTemp1, ivTemp2); + // x and y in bounds? + return (vget_lane_u64(ivTemp1, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + // x and y in bounds? (z and w are don't care) + return (((_mm_movemask_ps(vTemp1) & 0x3) == 0x3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline bool XM_CALLCONV XMVector2IsNaN(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (XMISNAN(V.vector4_f32[0]) || + XMISNAN(V.vector4_f32[1])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Test against itself. NaN is always not equal + uint32x2_t vTempNan = vceq_f32(VL, VL); + // If x or y are NaN, the mask is zero + return (vget_lane_u64(vTempNan, 0) != 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test against itself. NaN is always not equal + XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); + // If x or y are NaN, the mask is non-zero + return ((_mm_movemask_ps(vTempNan) & 3) != 0); +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2IsInfinite(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + return (XMISINF(V.vector4_f32[0]) || + XMISINF(V.vector4_f32[1])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bit + uint32x2_t vTemp = vand_u32(vget_low_f32(V), vget_low_f32(g_XMAbsMask)); + // Compare to infinity + vTemp = vceq_f32(vTemp, vget_low_f32(g_XMInfinity)); + // If any are infinity, the signs are true. + return vget_lane_u64(vTemp, 0) != 0; +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bit + __m128 vTemp = _mm_and_ps(V, g_XMAbsMask); + // Compare to infinity + vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity); + // If x or z are infinity, the signs are true. + return ((_mm_movemask_ps(vTemp) & 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Dot +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result; + Result.f[0] = + Result.f[1] = + Result.f[2] = + Result.f[3] = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1]; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Perform the dot product on x and y + float32x2_t vTemp = vmul_f32(vget_low_f32(V1), vget_low_f32(V2)); + vTemp = vpadd_f32(vTemp, vTemp); + return vcombine_f32(vTemp, vTemp); +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_dp_ps(V1, V2, 0x3f); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vDot = _mm_mul_ps(V1, V2); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_moveldup_ps(vDot); + return vDot; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V1, V2); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Cross +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + // [ V1.x*V2.y - V1.y*V2.x, V1.x*V2.y - V1.y*V2.x ] + +#if defined(_XM_NO_INTRINSICS_) + float fCross = (V1.vector4_f32[0] * V2.vector4_f32[1]) - (V1.vector4_f32[1] * V2.vector4_f32[0]); + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = fCross; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Negate = { { { 1.f, -1.f, 0, 0 } } }; + + float32x2_t vTemp = vmul_f32(vget_low_f32(V1), vrev64_f32(vget_low_f32(V2))); + vTemp = vmul_f32(vTemp, vget_low_f32(Negate)); + vTemp = vpadd_f32(vTemp, vTemp); + return vcombine_f32(vTemp, vTemp); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap x and y + XMVECTOR vResult = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 1, 0, 1)); + // Perform the muls + vResult = _mm_mul_ps(vResult, V1); + // Splat y + XMVECTOR vTemp = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(1, 1, 1, 1)); + // Sub the values + vResult = _mm_sub_ss(vResult, vTemp); + // Splat the cross product + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 0, 0, 0)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2LengthSq(FXMVECTOR V) noexcept +{ + return XMVector2Dot(V, V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2ReciprocalLengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2LengthSq(V); + Result = XMVectorReciprocalSqrtEst(Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + // Reciprocal sqrt (estimate) + vTemp = vrsqrte_f32(vTemp); + return vcombine_f32(vTemp, vTemp); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + return _mm_rsqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_rsqrt_ss(vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = _mm_rsqrt_ss(vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2ReciprocalLength(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2LengthSq(V); + Result = XMVectorReciprocalSqrt(Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + // Reciprocal sqrt + float32x2_t S0 = vrsqrte_f32(vTemp); + float32x2_t P0 = vmul_f32(vTemp, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(vTemp, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + XMVECTOR vLengthSq = _mm_sqrt_ps(vTemp); + return _mm_div_ps(g_XMOne, vLengthSq); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ss(vTemp); + vLengthSq = _mm_div_ss(g_XMOne, vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = _mm_sqrt_ss(vLengthSq); + vLengthSq = _mm_div_ss(g_XMOne, vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2LengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2LengthSq(V); + Result = XMVectorSqrtEst(Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(vTemp, zero); + // Sqrt (estimate) + float32x2_t Result = vrsqrte_f32(vTemp); + Result = vmul_f32(vTemp, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ss(vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = _mm_sqrt_ss(vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Length(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2LengthSq(V); + Result = XMVectorSqrt(Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(vTemp, zero); + // Sqrt + float32x2_t S0 = vrsqrte_f32(vTemp); + float32x2_t P0 = vmul_f32(vTemp, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(vTemp, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + Result = vmul_f32(vTemp, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ss(vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ +// XMVector2NormalizeEst uses a reciprocal estimate and +// returns QNaN on zero and infinite vectors. + +inline XMVECTOR XM_CALLCONV XMVector2NormalizeEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2ReciprocalLength(V); + Result = XMVectorMultiply(V, Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + // Reciprocal sqrt (estimate) + vTemp = vrsqrte_f32(vTemp); + // Normalize + float32x2_t Result = vmul_f32(VL, vTemp); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + XMVECTOR vResult = _mm_rsqrt_ps(vTemp); + return _mm_mul_ps(vResult, V); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_rsqrt_ss(vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + vLengthSq = _mm_mul_ps(vLengthSq, V); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = _mm_rsqrt_ss(vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + vLengthSq = _mm_mul_ps(vLengthSq, V); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Normalize(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR vResult = XMVector2Length(V); + float fLength = vResult.vector4_f32[0]; + + // Prevent divide by zero + if (fLength > 0) + { + fLength = 1.0f / fLength; + } + + vResult.vector4_f32[0] = V.vector4_f32[0] * fLength; + vResult.vector4_f32[1] = V.vector4_f32[1] * fLength; + vResult.vector4_f32[2] = V.vector4_f32[2] * fLength; + vResult.vector4_f32[3] = V.vector4_f32[3] * fLength; + return vResult; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + uint32x2_t VEqualsZero = vceq_f32(vTemp, vdup_n_f32(0)); + uint32x2_t VEqualsInf = vceq_f32(vTemp, vget_low_f32(g_XMInfinity)); + // Reciprocal sqrt (2 iterations of Newton-Raphson) + float32x2_t S0 = vrsqrte_f32(vTemp); + float32x2_t P0 = vmul_f32(vTemp, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(vTemp, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + vTemp = vmul_f32(S1, R1); + // Normalize + float32x2_t Result = vmul_f32(VL, vTemp); + Result = vbsl_f32(VEqualsZero, vdup_n_f32(0), Result); + Result = vbsl_f32(VEqualsInf, vget_low_f32(g_XMQNaN), Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vLengthSq = _mm_dp_ps(V, V, 0x3f); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE3_INTRINSICS_) + // Perform the dot product on x and y only + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_moveldup_ps(vLengthSq); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y only + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2ClampLength +( + FXMVECTOR V, + float LengthMin, + float LengthMax +) noexcept +{ + XMVECTOR ClampMax = XMVectorReplicate(LengthMax); + XMVECTOR ClampMin = XMVectorReplicate(LengthMin); + return XMVector2ClampLengthV(V, ClampMin, ClampMax); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2ClampLengthV +( + FXMVECTOR V, + FXMVECTOR LengthMin, + FXMVECTOR LengthMax +) noexcept +{ + assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin))); + assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax))); + assert(XMVector2GreaterOrEqual(LengthMin, g_XMZero)); + assert(XMVector2GreaterOrEqual(LengthMax, g_XMZero)); + assert(XMVector2GreaterOrEqual(LengthMax, LengthMin)); + + XMVECTOR LengthSq = XMVector2LengthSq(V); + + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq); + + XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v); + XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero); + + XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength); + + XMVECTOR Normal = XMVectorMultiply(V, RcpLength); + + XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength); + Length = XMVectorSelect(LengthSq, Length, Select); + Normal = XMVectorSelect(LengthSq, Normal, Select); + + XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax); + XMVECTOR ControlMin = XMVectorLess(Length, LengthMin); + + XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax); + ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin); + + XMVECTOR Result = XMVectorMultiply(Normal, ClampLength); + + // Preserve the original vector (with no precision loss) if the length falls within the given range + XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin); + Result = XMVectorSelect(Result, V, Control); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Reflect +( + FXMVECTOR Incident, + FXMVECTOR Normal +) noexcept +{ + // Result = Incident - (2 * dot(Incident, Normal)) * Normal + + XMVECTOR Result; + Result = XMVector2Dot(Incident, Normal); + Result = XMVectorAdd(Result, Result); + Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Refract +( + FXMVECTOR Incident, + FXMVECTOR Normal, + float RefractionIndex +) noexcept +{ + XMVECTOR Index = XMVectorReplicate(RefractionIndex); + return XMVector2RefractV(Incident, Normal, Index); +} + +//------------------------------------------------------------------------------ + +// Return the refraction of a 2D vector +inline XMVECTOR XM_CALLCONV XMVector2RefractV +( + FXMVECTOR Incident, + FXMVECTOR Normal, + FXMVECTOR RefractionIndex +) noexcept +{ + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + +#if defined(_XM_NO_INTRINSICS_) + + float IDotN = (Incident.vector4_f32[0] * Normal.vector4_f32[0]) + (Incident.vector4_f32[1] * Normal.vector4_f32[1]); + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + float RY = 1.0f - (IDotN * IDotN); + float RX = 1.0f - (RY * RefractionIndex.vector4_f32[0] * RefractionIndex.vector4_f32[0]); + RY = 1.0f - (RY * RefractionIndex.vector4_f32[1] * RefractionIndex.vector4_f32[1]); + if (RX >= 0.0f) + { + RX = (RefractionIndex.vector4_f32[0] * Incident.vector4_f32[0]) - (Normal.vector4_f32[0] * ((RefractionIndex.vector4_f32[0] * IDotN) + sqrtf(RX))); + } + else + { + RX = 0.0f; + } + if (RY >= 0.0f) + { + RY = (RefractionIndex.vector4_f32[1] * Incident.vector4_f32[1]) - (Normal.vector4_f32[1] * ((RefractionIndex.vector4_f32[1] * IDotN) + sqrtf(RY))); + } + else + { + RY = 0.0f; + } + + XMVECTOR vResult; + vResult.vector4_f32[0] = RX; + vResult.vector4_f32[1] = RY; + vResult.vector4_f32[2] = 0.0f; + vResult.vector4_f32[3] = 0.0f; + return vResult; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t IL = vget_low_f32(Incident); + float32x2_t NL = vget_low_f32(Normal); + float32x2_t RIL = vget_low_f32(RefractionIndex); + // Get the 2D Dot product of Incident-Normal + float32x2_t vTemp = vmul_f32(IL, NL); + float32x2_t IDotN = vpadd_f32(vTemp, vTemp); + // vTemp = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + vTemp = vmls_f32(vget_low_f32(g_XMOne), IDotN, IDotN); + vTemp = vmul_f32(vTemp, RIL); + vTemp = vmls_f32(vget_low_f32(g_XMOne), vTemp, RIL); + // If any terms are <=0, sqrt() will fail, punt to zero + uint32x2_t vMask = vcgt_f32(vTemp, vget_low_f32(g_XMZero)); + // Sqrt(vTemp) + float32x2_t S0 = vrsqrte_f32(vTemp); + float32x2_t P0 = vmul_f32(vTemp, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(vTemp, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t S2 = vmul_f32(S1, R1); + vTemp = vmul_f32(vTemp, S2); + // R = RefractionIndex * IDotN + sqrt(R) + vTemp = vmla_f32(vTemp, RIL, IDotN); + // Result = RefractionIndex * Incident - Normal * R + float32x2_t vResult = vmul_f32(RIL, IL); + vResult = vmls_f32(vResult, vTemp, NL); + vResult = vand_u32(vResult, vMask); + return vcombine_f32(vResult, vResult); +#elif defined(_XM_SSE_INTRINSICS_) + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + // Get the 2D Dot product of Incident-Normal + XMVECTOR IDotN = XMVector2Dot(Incident, Normal); + // vTemp = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + XMVECTOR vTemp = XM_FNMADD_PS(IDotN, IDotN, g_XMOne); + vTemp = _mm_mul_ps(vTemp, RefractionIndex); + vTemp = XM_FNMADD_PS(vTemp, RefractionIndex, g_XMOne); + // If any terms are <=0, sqrt() will fail, punt to zero + XMVECTOR vMask = _mm_cmpgt_ps(vTemp, g_XMZero); + // R = RefractionIndex * IDotN + sqrt(R) + vTemp = _mm_sqrt_ps(vTemp); + vTemp = XM_FMADD_PS(RefractionIndex, IDotN, vTemp); + // Result = RefractionIndex * Incident - Normal * R + XMVECTOR vResult = _mm_mul_ps(RefractionIndex, Incident); + vResult = XM_FNMADD_PS(vTemp, Normal, vResult); + vResult = _mm_and_ps(vResult, vMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Orthogonal(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + -V.vector4_f32[1], + V.vector4_f32[0], + 0.f, + 0.f + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Negate = { { { -1.f, 1.f, 0, 0 } } }; + const float32x2_t zero = vdup_n_f32(0); + + float32x2_t VL = vget_low_f32(V); + float32x2_t Result = vmul_f32(vrev64_f32(VL), vget_low_f32(Negate)); + return vcombine_f32(Result, zero); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + vResult = _mm_mul_ps(vResult, g_XMNegateX); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormalsEst +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector2Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACosEst(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormals +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector2Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne, g_XMOne); + Result = XMVectorACos(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenVectors +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + XMVECTOR L1 = XMVector2ReciprocalLength(V1); + XMVECTOR L2 = XMVector2ReciprocalLength(V2); + + XMVECTOR Dot = XMVector2Dot(V1, V2); + + L1 = XMVectorMultiply(L1, L2); + + XMVECTOR CosAngle = XMVectorMultiply(Dot, L1); + CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v); + + return XMVectorACos(CosAngle); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2LinePointDistance +( + FXMVECTOR LinePoint1, + FXMVECTOR LinePoint2, + FXMVECTOR Point +) noexcept +{ + // Given a vector PointVector from LinePoint1 to Point and a vector + // LineVector from LinePoint1 to LinePoint2, the scaled distance + // PointProjectionScale from LinePoint1 to the perpendicular projection + // of PointVector onto the line is defined as: + // + // PointProjectionScale = dot(PointVector, LineVector) / LengthSq(LineVector) + + XMVECTOR PointVector = XMVectorSubtract(Point, LinePoint1); + XMVECTOR LineVector = XMVectorSubtract(LinePoint2, LinePoint1); + + XMVECTOR LengthSq = XMVector2LengthSq(LineVector); + + XMVECTOR PointProjectionScale = XMVector2Dot(PointVector, LineVector); + PointProjectionScale = XMVectorDivide(PointProjectionScale, LengthSq); + + XMVECTOR DistanceVector = XMVectorMultiply(LineVector, PointProjectionScale); + DistanceVector = XMVectorSubtract(PointVector, DistanceVector); + + return XMVector2Length(DistanceVector); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2IntersectLine +( + FXMVECTOR Line1Point1, + FXMVECTOR Line1Point2, + FXMVECTOR Line2Point1, + GXMVECTOR Line2Point2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMVECTOR V1 = XMVectorSubtract(Line1Point2, Line1Point1); + XMVECTOR V2 = XMVectorSubtract(Line2Point2, Line2Point1); + XMVECTOR V3 = XMVectorSubtract(Line1Point1, Line2Point1); + + XMVECTOR C1 = XMVector2Cross(V1, V2); + XMVECTOR C2 = XMVector2Cross(V2, V3); + + XMVECTOR Result; + const XMVECTOR Zero = XMVectorZero(); + if (XMVector2NearEqual(C1, Zero, g_XMEpsilon.v)) + { + if (XMVector2NearEqual(C2, Zero, g_XMEpsilon.v)) + { + // Coincident + Result = g_XMInfinity.v; + } + else + { + // Parallel + Result = g_XMQNaN.v; + } + } + else + { + // Intersection point = Line1Point1 + V1 * (C2 / C1) + XMVECTOR Scale = XMVectorReciprocal(C1); + Scale = XMVectorMultiply(C2, Scale); + Result = XMVectorMultiplyAdd(V1, Scale, Line1Point1); + } + + return Result; + +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR V1 = _mm_sub_ps(Line1Point2, Line1Point1); + XMVECTOR V2 = _mm_sub_ps(Line2Point2, Line2Point1); + XMVECTOR V3 = _mm_sub_ps(Line1Point1, Line2Point1); + // Generate the cross products + XMVECTOR C1 = XMVector2Cross(V1, V2); + XMVECTOR C2 = XMVector2Cross(V2, V3); + // If C1 is not close to epsilon, use the calculated value + XMVECTOR vResultMask = _mm_setzero_ps(); + vResultMask = _mm_sub_ps(vResultMask, C1); + vResultMask = _mm_max_ps(vResultMask, C1); + // 0xFFFFFFFF if the calculated value is to be used + vResultMask = _mm_cmpgt_ps(vResultMask, g_XMEpsilon); + // If C1 is close to epsilon, which fail type is it? INFINITY or NAN? + XMVECTOR vFailMask = _mm_setzero_ps(); + vFailMask = _mm_sub_ps(vFailMask, C2); + vFailMask = _mm_max_ps(vFailMask, C2); + vFailMask = _mm_cmple_ps(vFailMask, g_XMEpsilon); + XMVECTOR vFail = _mm_and_ps(vFailMask, g_XMInfinity); + vFailMask = _mm_andnot_ps(vFailMask, g_XMQNaN); + // vFail is NAN or INF + vFail = _mm_or_ps(vFail, vFailMask); + // Intersection point = Line1Point1 + V1 * (C2 / C1) + XMVECTOR vResult = _mm_div_ps(C2, C1); + vResult = XM_FMADD_PS(vResult, V1, Line1Point1); + // Use result, or failure value + vResult = _mm_and_ps(vResult, vResultMask); + vResultMask = _mm_andnot_ps(vResultMask, vFail); + vResult = _mm_or_ps(vResult, vResultMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Transform +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Y, M.r[1], M.r[3]); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + float32x4_t Result = vmlaq_lane_f32(M.r[3], M.r[1], VL, 1); // Y + return vmlaq_lane_f32(Result, M.r[0], VL, 0); // X +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = XM_FMADD_PS(vResult, M.r[1], M.r[3]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMFLOAT4* XM_CALLCONV XMVector2TransformStream +( + XMFLOAT4* pOutputStream, + size_t OutputStride, + const XMFLOAT2* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT2)); + + assert(OutputStride >= sizeof(XMFLOAT4)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat2(reinterpret_cast(pInputVector)); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Y, row1, row3); + Result = XMVectorMultiplyAdd(X, row0, Result); + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015, "PREfast noise: Esp:1307" ) +#endif + + XMStoreFloat4(reinterpret_cast(pOutputVector), Result); + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT4))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x2_t V = vld2q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + float32x2_t r3 = vget_low_f32(row3); + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(row3); + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Cx+O + XMVECTOR vResult3 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy+O + vResult3 = vmlaq_lane_f32(vResult3, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + float32x4x4_t R; + R.val[0] = vResult0; + R.val[1] = vResult1; + R.val[2] = vResult2; + R.val[3] = vResult3; + + vst4q_f32(reinterpret_cast(pOutputVector), R); + pOutputVector += sizeof(XMFLOAT4) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t V = vld1_f32(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(row3, row0, V, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, V, 1); // Y + + vst1q_f32(reinterpret_cast(pOutputVector), vResult); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_AVX2_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + __m256 row0 = _mm256_broadcast_ps(&M.r[0]); + __m256 row1 = _mm256_broadcast_ps(&M.r[1]); + __m256 row3 = _mm256_broadcast_ps(&M.r[3]); + + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT4)) + { + if (!(reinterpret_cast(pOutputStream) & 0x1F)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + X1 = _mm256_insertf128_ps(vTempA, _mm256_castps256_ps128(vTempA2), 1); + XM256_STREAM_PS(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT4) * 2; + + X2 = _mm256_insertf128_ps(vTempA2, _mm256_extractf128_ps(vTempA, 1), 0); + XM256_STREAM_PS(reinterpret_cast(pOutputVector), X2); + pOutputVector += sizeof(XMFLOAT4) * 2; + + i += 4; + } + } + else + { + // Packed input, packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + X1 = _mm256_insertf128_ps(vTempA, _mm256_castps256_ps128(vTempA2), 1); + _mm256_storeu_ps(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT4) * 2; + + X2 = _mm256_insertf128_ps(vTempA2, _mm256_extractf128_ps(vTempA, 1), 0); + _mm256_storeu_ps(reinterpret_cast(pOutputVector), X2); + pOutputVector += sizeof(XMFLOAT4) * 2; + + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_castps256_ps128(vTempA)); + pOutputVector += OutputStride; + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_castps256_ps128(vTempA2)); + pOutputVector += OutputStride; + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_extractf128_ps(vTempA, 1)); + pOutputVector += OutputStride; + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_extractf128_ps(vTempA2, 1)); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + if (i < VectorCount) + { + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t two = VectorCount >> 1; + if (two > 0) + { + if (InputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + // Packed input, aligned output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 2; + } + } + else + { + // Packed input, unaligned output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 2; + } + } + } + } + + if (!(reinterpret_cast(pInputVector) & 0xF) && !(InputStride & 0xF)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + // Aligned input, aligned output + for (; i < VectorCount; i++) + { + XMVECTOR V = _mm_castsi128_ps(_mm_loadl_epi64(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + else + { + // Aligned input, unaligned output + for (; i < VectorCount; i++) + { + XMVECTOR V = _mm_castsi128_ps(_mm_loadl_epi64(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + } + else + { + // Unaligned input + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2TransformCoord +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Y, M.r[1], M.r[3]); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + XMVECTOR W = XMVectorSplatW(Result); + return XMVectorDivide(Result, W); +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMFLOAT2* XM_CALLCONV XMVector2TransformCoordStream +( + XMFLOAT2* pOutputStream, + size_t OutputStride, + const XMFLOAT2* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT2)); + + assert(OutputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT2)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat2(reinterpret_cast(pInputVector)); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Y, row1, row3); + Result = XMVectorMultiplyAdd(X, row0, Result); + + XMVECTOR W = XMVectorSplatW(Result); + + Result = XMVectorDivide(Result, W); + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015, "PREfast noise: Esp:1307" ) +#endif + + XMStoreFloat2(reinterpret_cast(pOutputVector), Result); + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT2))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x2_t V = vld2q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + float32x2_t r3 = vget_low_f32(row3); + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(row3); + r = vget_high_f32(row0); + XMVECTOR W = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + W = vmlaq_lane_f32(W, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + V.val[0] = vdivq_f32(vResult0, W); + V.val[1] = vdivq_f32(vResult1, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + V.val[0] = vmulq_f32(vResult0, Reciprocal); + V.val[1] = vmulq_f32(vResult1, Reciprocal); +#endif + + vst2q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t V = vld1_f32(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(row3, row0, V, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, V, 1); // Y + + V = vget_high_f32(vResult); + float32x2_t W = vdup_lane_f32(V, 1); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + V = vget_low_f32(vResult); + V = vdiv_f32(V, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal for W + float32x2_t Reciprocal = vrecpe_f32(W); + float32x2_t S = vrecps_f32(Reciprocal, W); + Reciprocal = vmul_f32(S, Reciprocal); + S = vrecps_f32(Reciprocal, W); + Reciprocal = vmul_f32(S, Reciprocal); + + V = vget_low_f32(vResult); + V = vmul_f32(V, Reciprocal); +#endif + + vst1_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_AVX2_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + __m256 row0 = _mm256_broadcast_ps(&M.r[0]); + __m256 row1 = _mm256_broadcast_ps(&M.r[1]); + __m256 row3 = _mm256_broadcast_ps(&M.r[3]); + + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0x1F)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + __m256 W = _mm256_shuffle_ps(vTempA, vTempA, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA = _mm256_div_ps(vTempA, W); + + W = _mm256_shuffle_ps(vTempA2, vTempA2, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA2 = _mm256_div_ps(vTempA2, W); + + X1 = _mm256_shuffle_ps(vTempA, vTempA2, 0x44); + XM256_STREAM_PS(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + else + { + // Packed input, packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + __m256 W = _mm256_shuffle_ps(vTempA, vTempA, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA = _mm256_div_ps(vTempA, W); + + W = _mm256_shuffle_ps(vTempA2, vTempA2, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA2 = _mm256_div_ps(vTempA2, W); + + X1 = _mm256_shuffle_ps(vTempA, vTempA2, 0x44); + _mm256_storeu_ps(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + __m256 W = _mm256_shuffle_ps(vTempA, vTempA, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA = _mm256_div_ps(vTempA, W); + + W = _mm256_shuffle_ps(vTempA2, vTempA2, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA2 = _mm256_div_ps(vTempA2, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_castps256_ps128(vTempA))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_castps256_ps128(vTempA2))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_extractf128_ps(vTempA, 1))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_extractf128_ps(vTempA2, 1))); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + if (i < VectorCount) + { + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t two = VectorCount >> 1; + if (two > 0) + { + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + XMVECTOR V1 = _mm_div_ps(vTemp, W); + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + XMVECTOR V2 = _mm_div_ps(vTemp, W); + + vTemp = _mm_movelh_ps(V1, V2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += sizeof(XMFLOAT2) * 2; + + i += 2; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + XMVECTOR V1 = _mm_div_ps(vTemp, W); + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + XMVECTOR V2 = _mm_div_ps(vTemp, W); + + vTemp = _mm_movelh_ps(V1, V2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += sizeof(XMFLOAT2) * 2; + + i += 2; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + + i += 2; + } + } + } + } + + if (!(reinterpret_cast(pInputVector) & 0xF) && !(InputStride & 0xF)) + { + // Aligned input + for (; i < VectorCount; i++) + { + XMVECTOR V = _mm_castsi128_ps(_mm_loadl_epi64(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned input + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2TransformNormal +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(Y, M.r[1]); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + float32x4_t Result = vmulq_lane_f32(M.r[1], VL, 1); // Y + return vmlaq_lane_f32(Result, M.r[0], VL, 0); // X +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = _mm_mul_ps(vResult, M.r[1]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMFLOAT2* XM_CALLCONV XMVector2TransformNormalStream +( + XMFLOAT2* pOutputStream, + size_t OutputStride, + const XMFLOAT2* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT2)); + + assert(OutputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT2)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat2(reinterpret_cast(pInputVector)); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(Y, row1); + Result = XMVectorMultiplyAdd(X, row0, Result); + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015, "PREfast noise: Esp:1307" ) +#endif + + XMStoreFloat2(reinterpret_cast(pOutputVector), Result); + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT2))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x2_t V = vld2q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmulq_lane_f32(V.val[0], r, 0); // Ax + XMVECTOR vResult1 = vmulq_lane_f32(V.val[0], r, 1); // Bx + + XM_PREFETCH(pInputVector); + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + V.val[0] = vResult0; + V.val[1] = vResult1; + + vst2q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t V = vld1_f32(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vResult = vmulq_lane_f32(row0, V, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, V, 1); // Y + + V = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_AVX2_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + __m256 row0 = _mm256_broadcast_ps(&M.r[0]); + __m256 row1 = _mm256_broadcast_ps(&M.r[1]); + + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0x1F)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempA = _mm256_mul_ps(Y1, row1); + __m256 vTempB = _mm256_mul_ps(Y2, row1); + vTempA = _mm256_fmadd_ps(X1, row0, vTempA); + vTempB = _mm256_fmadd_ps(X2, row0, vTempB); + + X1 = _mm256_shuffle_ps(vTempA, vTempB, 0x44); + XM256_STREAM_PS(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + else + { + // Packed input, packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempA = _mm256_mul_ps(Y1, row1); + __m256 vTempB = _mm256_mul_ps(Y2, row1); + vTempA = _mm256_fmadd_ps(X1, row0, vTempA); + vTempB = _mm256_fmadd_ps(X2, row0, vTempB); + + X1 = _mm256_shuffle_ps(vTempA, vTempB, 0x44); + _mm256_storeu_ps(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempA = _mm256_mul_ps(Y1, row1); + __m256 vTempB = _mm256_mul_ps(Y2, row1); + vTempA = _mm256_fmadd_ps(X1, row0, vTempA); + vTempB = _mm256_fmadd_ps(X2, row0, vTempB); + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_castps256_ps128(vTempA))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_castps256_ps128(vTempB))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_extractf128_ps(vTempA, 1))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_extractf128_ps(vTempB, 1))); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + if (i < VectorCount) + { + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + + size_t i = 0; + size_t two = VectorCount >> 1; + if (two > 0) + { + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + XMVECTOR V1 = XM_FMADD_PS(X, row0, vTemp); + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = _mm_mul_ps(Y, row1); + XMVECTOR V2 = XM_FMADD_PS(X, row0, vTemp); + + vTemp = _mm_movelh_ps(V1, V2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += sizeof(XMFLOAT2) * 2; + + i += 2; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + XMVECTOR V1 = XM_FMADD_PS(X, row0, vTemp); + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = _mm_mul_ps(Y, row1); + XMVECTOR V2 = XM_FMADD_PS(X, row0, vTemp); + + vTemp = _mm_movelh_ps(V1, V2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += sizeof(XMFLOAT2) * 2; + + i += 2; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + + i += 2; + } + } + } + } + + if (!(reinterpret_cast(pInputVector) & 0xF) && !(InputStride & 0xF)) + { + // Aligned input + for (; i < VectorCount; i++) + { + XMVECTOR V = _mm_castsi128_ps(_mm_loadl_epi64(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned input + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +/**************************************************************************** + * + * 3D Vector + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3Equal +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1]) && (V1.vector4_f32[2] == V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector3EqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if ((V1.vector4_f32[0] == V2.vector4_f32[0]) && + (V1.vector4_f32[1] == V2.vector4_f32[1]) && + (V1.vector4_f32[2] == V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) && + (V1.vector4_f32[1] != V2.vector4_f32[1]) && + (V1.vector4_f32[2] != V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU; + + uint32_t CR = 0; + if (r == 0xFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp) & 7; + uint32_t CR = 0; + if (iTest == 7) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3EqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1]) && (V1.vector4_u32[2] == V2.vector4_u32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector3EqualIntR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if ((V1.vector4_u32[0] == V2.vector4_u32[0]) && + (V1.vector4_u32[1] == V2.vector4_u32[1]) && + (V1.vector4_u32[2] == V2.vector4_u32[2])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_u32[0] != V2.vector4_u32[0]) && + (V1.vector4_u32[1] != V2.vector4_u32[1]) && + (V1.vector4_u32[2] != V2.vector4_u32[2])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU; + + uint32_t CR = 0; + if (r == 0xFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + int iTemp = _mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7; + uint32_t CR = 0; + if (iTemp == 7) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTemp) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3NearEqual +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Epsilon +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float dx, dy, dz; + + dx = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]); + dy = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]); + dz = fabsf(V1.vector4_f32[2] - V2.vector4_f32[2]); + return (((dx <= Epsilon.vector4_f32[0]) && + (dy <= Epsilon.vector4_f32[1]) && + (dz <= Epsilon.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vDelta = vsubq_f32(V1, V2); +#ifdef _MSC_VER + uint32x4_t vResult = vacleq_f32(vDelta, Epsilon); +#else + uint32x4_t vResult = vcleq_f32(vabsq_f32(vDelta), Epsilon); +#endif + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Get the difference + XMVECTOR vDelta = _mm_sub_ps(V1, V2); + // Get the absolute value of the difference + XMVECTOR vTemp = _mm_setzero_ps(); + vTemp = _mm_sub_ps(vTemp, vDelta); + vTemp = _mm_max_ps(vTemp, vDelta); + vTemp = _mm_cmple_ps(vTemp, Epsilon); + // w is don't care + return (((_mm_movemask_ps(vTemp) & 7) == 0x7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3NotEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1]) || (V1.vector4_f32[2] != V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) != 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3NotEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1]) || (V1.vector4_u32[2] != V2.vector4_u32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7) != 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3Greater +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1]) && (V1.vector4_f32[2] > V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector3GreaterR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if ((V1.vector4_f32[0] > V2.vector4_f32[0]) && + (V1.vector4_f32[1] > V2.vector4_f32[1]) && + (V1.vector4_f32[2] > V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] <= V2.vector4_f32[0]) && + (V1.vector4_f32[1] <= V2.vector4_f32[1]) && + (V1.vector4_f32[2] <= V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU; + + uint32_t CR = 0; + if (r == 0xFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp) & 7; + if (iTest == 7) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3GreaterOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1]) && (V1.vector4_f32[2] >= V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector3GreaterOrEqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) && + (V1.vector4_f32[1] >= V2.vector4_f32[1]) && + (V1.vector4_f32[2] >= V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) && + (V1.vector4_f32[1] < V2.vector4_f32[1]) && + (V1.vector4_f32[2] < V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU; + + uint32_t CR = 0; + if (r == 0xFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp) & 7; + if (iTest == 7) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3Less +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1]) && (V1.vector4_f32[2] < V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcltq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmplt_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3LessOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1]) && (V1.vector4_f32[2] <= V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcleq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmple_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3InBounds +( + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) && + (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) && + (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test if less than or equal + uint32x4_t ivTemp1 = vcleq_f32(V, Bounds); + // Negate the bounds + float32x4_t vTemp2 = vnegq_f32(Bounds); + // Test if greater or equal (Reversed) + uint32x4_t ivTemp2 = vcleq_f32(vTemp2, V); + // Blend answers + ivTemp1 = vandq_u32(ivTemp1, ivTemp2); + // in bounds? + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(ivTemp1), vget_high_u8(ivTemp1)); + uint16x4x2_t vTemp3 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp3.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + // x,y and z in bounds? (w is don't care) + return (((_mm_movemask_ps(vTemp1) & 0x7) == 0x7) != 0); +#else + return XMComparisonAllInBounds(XMVector3InBoundsR(V, Bounds)); +#endif +} + +//------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline bool XM_CALLCONV XMVector3IsNaN(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + return (XMISNAN(V.vector4_f32[0]) || + XMISNAN(V.vector4_f32[1]) || + XMISNAN(V.vector4_f32[2])); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test against itself. NaN is always not equal + uint32x4_t vTempNan = vceqq_f32(V, V); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempNan), vget_high_u8(vTempNan)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + // If x or y or z are NaN, the mask is zero + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test against itself. NaN is always not equal + XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); + // If x or y or z are NaN, the mask is non-zero + return ((_mm_movemask_ps(vTempNan) & 7) != 0); +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3IsInfinite(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (XMISINF(V.vector4_f32[0]) || + XMISINF(V.vector4_f32[1]) || + XMISINF(V.vector4_f32[2])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bit + uint32x4_t vTempInf = vandq_u32(V, g_XMAbsMask); + // Compare to infinity + vTempInf = vceqq_f32(vTempInf, g_XMInfinity); + // If any are infinity, the signs are true. + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempInf), vget_high_u8(vTempInf)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) != 0); +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bit + __m128 vTemp = _mm_and_ps(V, g_XMAbsMask); + // Compare to infinity + vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity); + // If x,y or z are infinity, the signs are true. + return ((_mm_movemask_ps(vTemp) & 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Dot +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fValue = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1] + V1.vector4_f32[2] * V2.vector4_f32[2]; + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = fValue; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vTemp = vmulq_f32(V1, V2); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + return vcombine_f32(v1, v1); +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_dp_ps(V1, V2, 0x7f); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vTemp = _mm_mul_ps(V1, V2); + vTemp = _mm_and_ps(vTemp, g_XMMask3); + vTemp = _mm_hadd_ps(vTemp, vTemp); + return _mm_hadd_ps(vTemp, vTemp); +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product + XMVECTOR vDot = _mm_mul_ps(V1, V2); + // x=Dot.vector4_f32[1], y=Dot.vector4_f32[2] + XMVECTOR vTemp = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(2, 1, 2, 1)); + // Result.vector4_f32[0] = x+y + vDot = _mm_add_ss(vDot, vTemp); + // x=Dot.vector4_f32[2] + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // Result.vector4_f32[0] = (x+y)+z + vDot = _mm_add_ss(vDot, vTemp); + // Splat x + return XM_PERMUTE_PS(vDot, _MM_SHUFFLE(0, 0, 0, 0)); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Cross +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + // [ V1.y*V2.z - V1.z*V2.y, V1.z*V2.x - V1.x*V2.z, V1.x*V2.y - V1.y*V2.x ] + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { + (V1.vector4_f32[1] * V2.vector4_f32[2]) - (V1.vector4_f32[2] * V2.vector4_f32[1]), + (V1.vector4_f32[2] * V2.vector4_f32[0]) - (V1.vector4_f32[0] * V2.vector4_f32[2]), + (V1.vector4_f32[0] * V2.vector4_f32[1]) - (V1.vector4_f32[1] * V2.vector4_f32[0]), + 0.0f + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t v1xy = vget_low_f32(V1); + float32x2_t v2xy = vget_low_f32(V2); + + float32x2_t v1yx = vrev64_f32(v1xy); + float32x2_t v2yx = vrev64_f32(v2xy); + + float32x2_t v1zz = vdup_lane_f32(vget_high_f32(V1), 0); + float32x2_t v2zz = vdup_lane_f32(vget_high_f32(V2), 0); + + XMVECTOR vResult = vmulq_f32(vcombine_f32(v1yx, v1xy), vcombine_f32(v2zz, v2yx)); + vResult = vmlsq_f32(vResult, vcombine_f32(v1zz, v1yx), vcombine_f32(v2yx, v2xy)); + vResult = veorq_u32(vResult, g_XMFlipY); + return vandq_u32(vResult, g_XMMask3); +#elif defined(_XM_SSE_INTRINSICS_) + // y1,z1,x1,w1 + XMVECTOR vTemp1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(3, 0, 2, 1)); + // z2,x2,y2,w2 + XMVECTOR vTemp2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(3, 1, 0, 2)); + // Perform the left operation + XMVECTOR vResult = _mm_mul_ps(vTemp1, vTemp2); + // z1,x1,y1,w1 + vTemp1 = XM_PERMUTE_PS(vTemp1, _MM_SHUFFLE(3, 0, 2, 1)); + // y2,z2,x2,w2 + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(3, 1, 0, 2)); + // Perform the right operation + vResult = XM_FNMADD_PS(vTemp1, vTemp2, vResult); + // Set w to zero + return _mm_and_ps(vResult, g_XMMask3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3LengthSq(FXMVECTOR V) noexcept +{ + return XMVector3Dot(V, V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3ReciprocalLengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector3LengthSq(V); + Result = XMVectorReciprocalSqrtEst(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + // Reciprocal sqrt (estimate) + v2 = vrsqrte_f32(v1); + return vcombine_f32(v2, v2); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + return _mm_rsqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_rsqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and y + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2)); + // x+z, y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // y,y,y,y + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // x+z+y,??,??,?? + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // Splat the length squared + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the reciprocal + vLengthSq = _mm_rsqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3ReciprocalLength(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector3LengthSq(V); + Result = XMVectorReciprocalSqrt(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + // Reciprocal sqrt + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + XMVECTOR vLengthSq = _mm_sqrt_ps(vTemp); + return _mm_div_ps(g_XMOne, vLengthSq); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vDot = _mm_mul_ps(V, V); + vDot = _mm_and_ps(vDot, g_XMMask3); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_sqrt_ps(vDot); + vDot = _mm_div_ps(g_XMOne, vDot); + return vDot; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product + XMVECTOR vDot = _mm_mul_ps(V, V); + // x=Dot.y, y=Dot.z + XMVECTOR vTemp = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(2, 1, 2, 1)); + // Result.x = x+y + vDot = _mm_add_ss(vDot, vTemp); + // x=Dot.z + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // Result.x = (x+y)+z + vDot = _mm_add_ss(vDot, vTemp); + // Splat x + vDot = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the reciprocal + vDot = _mm_sqrt_ps(vDot); + // Get the reciprocal + vDot = _mm_div_ps(g_XMOne, vDot); + return vDot; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3LengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector3LengthSq(V); + Result = XMVectorSqrtEst(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(v1, zero); + // Sqrt (estimate) + float32x2_t Result = vrsqrte_f32(v1); + Result = vmul_f32(v1, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and y + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2)); + // x+z, y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // y,y,y,y + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // x+z+y,??,??,?? + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // Splat the length squared + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the length + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Length(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector3LengthSq(V); + Result = XMVectorSqrt(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(v1, zero); + // Sqrt + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + Result = vmul_f32(v1, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and y + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2)); + // x+z, y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // y,y,y,y + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // x+z+y,??,??,?? + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // Splat the length squared + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the length + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ +// XMVector3NormalizeEst uses a reciprocal estimate and +// returns QNaN on zero and infinite vectors. + +inline XMVECTOR XM_CALLCONV XMVector3NormalizeEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector3ReciprocalLength(V); + Result = XMVectorMultiply(V, Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + // Reciprocal sqrt (estimate) + v2 = vrsqrte_f32(v1); + // Normalize + return vmulq_f32(V, vcombine_f32(v2, v2)); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + XMVECTOR vResult = _mm_rsqrt_ps(vTemp); + return _mm_mul_ps(vResult, V); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vDot = _mm_mul_ps(V, V); + vDot = _mm_and_ps(vDot, g_XMMask3); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_rsqrt_ps(vDot); + vDot = _mm_mul_ps(vDot, V); + return vDot; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product + XMVECTOR vDot = _mm_mul_ps(V, V); + // x=Dot.y, y=Dot.z + XMVECTOR vTemp = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(2, 1, 2, 1)); + // Result.x = x+y + vDot = _mm_add_ss(vDot, vTemp); + // x=Dot.z + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // Result.x = (x+y)+z + vDot = _mm_add_ss(vDot, vTemp); + // Splat x + vDot = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the reciprocal + vDot = _mm_rsqrt_ps(vDot); + // Perform the normalization + vDot = _mm_mul_ps(vDot, V); + return vDot; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Normalize(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fLength; + XMVECTOR vResult; + + vResult = XMVector3Length(V); + fLength = vResult.vector4_f32[0]; + + // Prevent divide by zero + if (fLength > 0) + { + fLength = 1.0f / fLength; + } + + vResult.vector4_f32[0] = V.vector4_f32[0] * fLength; + vResult.vector4_f32[1] = V.vector4_f32[1] * fLength; + vResult.vector4_f32[2] = V.vector4_f32[2] * fLength; + vResult.vector4_f32[3] = V.vector4_f32[3] * fLength; + return vResult; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + uint32x2_t VEqualsZero = vceq_f32(v1, vdup_n_f32(0)); + uint32x2_t VEqualsInf = vceq_f32(v1, vget_low_f32(g_XMInfinity)); + // Reciprocal sqrt (2 iterations of Newton-Raphson) + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + v2 = vmul_f32(S1, R1); + // Normalize + XMVECTOR vResult = vmulq_f32(V, vcombine_f32(v2, v2)); + vResult = vbslq_f32(vcombine_f32(VEqualsZero, VEqualsZero), vdupq_n_f32(0), vResult); + return vbslq_f32(vcombine_f32(VEqualsInf, VEqualsInf), g_XMQNaN, vResult); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vLengthSq = _mm_dp_ps(V, V, 0x7f); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE3_INTRINSICS_) + // Perform the dot product on x,y and z only + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z only + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 1, 2, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3ClampLength +( + FXMVECTOR V, + float LengthMin, + float LengthMax +) noexcept +{ + XMVECTOR ClampMax = XMVectorReplicate(LengthMax); + XMVECTOR ClampMin = XMVectorReplicate(LengthMin); + + return XMVector3ClampLengthV(V, ClampMin, ClampMax); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3ClampLengthV +( + FXMVECTOR V, + FXMVECTOR LengthMin, + FXMVECTOR LengthMax +) noexcept +{ + assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetZ(LengthMin) == XMVectorGetX(LengthMin))); + assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetZ(LengthMax) == XMVectorGetX(LengthMax))); + assert(XMVector3GreaterOrEqual(LengthMin, XMVectorZero())); + assert(XMVector3GreaterOrEqual(LengthMax, XMVectorZero())); + assert(XMVector3GreaterOrEqual(LengthMax, LengthMin)); + + XMVECTOR LengthSq = XMVector3LengthSq(V); + + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq); + + XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v); + XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero); + + XMVECTOR Normal = XMVectorMultiply(V, RcpLength); + + XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength); + + XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength); + Length = XMVectorSelect(LengthSq, Length, Select); + Normal = XMVectorSelect(LengthSq, Normal, Select); + + XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax); + XMVECTOR ControlMin = XMVectorLess(Length, LengthMin); + + XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax); + ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin); + + XMVECTOR Result = XMVectorMultiply(Normal, ClampLength); + + // Preserve the original vector (with no precision loss) if the length falls within the given range + XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin); + Result = XMVectorSelect(Result, V, Control); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Reflect +( + FXMVECTOR Incident, + FXMVECTOR Normal +) noexcept +{ + // Result = Incident - (2 * dot(Incident, Normal)) * Normal + + XMVECTOR Result = XMVector3Dot(Incident, Normal); + Result = XMVectorAdd(Result, Result); + Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Refract +( + FXMVECTOR Incident, + FXMVECTOR Normal, + float RefractionIndex +) noexcept +{ + XMVECTOR Index = XMVectorReplicate(RefractionIndex); + return XMVector3RefractV(Incident, Normal, Index); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3RefractV +( + FXMVECTOR Incident, + FXMVECTOR Normal, + FXMVECTOR RefractionIndex +) noexcept +{ + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + +#if defined(_XM_NO_INTRINSICS_) + + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR IDotN = XMVector3Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + XMVECTOR R = XMVectorNegativeMultiplySubtract(IDotN, IDotN, g_XMOne.v); + R = XMVectorMultiply(R, RefractionIndex); + R = XMVectorNegativeMultiplySubtract(R, RefractionIndex, g_XMOne.v); + + if (XMVector4LessOrEqual(R, Zero)) + { + // Total internal reflection + return Zero; + } + else + { + // R = RefractionIndex * IDotN + sqrt(R) + R = XMVectorSqrt(R); + R = XMVectorMultiplyAdd(RefractionIndex, IDotN, R); + + // Result = RefractionIndex * Incident - Normal * R + XMVECTOR Result = XMVectorMultiply(RefractionIndex, Incident); + Result = XMVectorNegativeMultiplySubtract(Normal, R, Result); + + return Result; + } + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR IDotN = XMVector3Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + float32x4_t R = vmlsq_f32(g_XMOne, IDotN, IDotN); + R = vmulq_f32(R, RefractionIndex); + R = vmlsq_f32(g_XMOne, R, RefractionIndex); + + uint32x4_t vResult = vcleq_f32(R, g_XMZero); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + if (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU) + { + // Total internal reflection + vResult = g_XMZero; + } + else + { + // Sqrt(R) + float32x4_t S0 = vrsqrteq_f32(R); + float32x4_t P0 = vmulq_f32(R, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + float32x4_t S1 = vmulq_f32(S0, R0); + float32x4_t P1 = vmulq_f32(R, S1); + float32x4_t R1 = vrsqrtsq_f32(P1, S1); + float32x4_t S2 = vmulq_f32(S1, R1); + R = vmulq_f32(R, S2); + // R = RefractionIndex * IDotN + sqrt(R) + R = vmlaq_f32(R, RefractionIndex, IDotN); + // Result = RefractionIndex * Incident - Normal * R + vResult = vmulq_f32(RefractionIndex, Incident); + vResult = vmlsq_f32(vResult, R, Normal); + } + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + XMVECTOR IDotN = XMVector3Dot(Incident, Normal); + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + XMVECTOR R = XM_FNMADD_PS(IDotN, IDotN, g_XMOne); + XMVECTOR R2 = _mm_mul_ps(RefractionIndex, RefractionIndex); + R = XM_FNMADD_PS(R, R2, g_XMOne); + + XMVECTOR vResult = _mm_cmple_ps(R, g_XMZero); + if (_mm_movemask_ps(vResult) == 0x0f) + { + // Total internal reflection + vResult = g_XMZero; + } + else + { + // R = RefractionIndex * IDotN + sqrt(R) + R = _mm_sqrt_ps(R); + R = XM_FMADD_PS(RefractionIndex, IDotN, R); + // Result = RefractionIndex * Incident - Normal * R + vResult = _mm_mul_ps(RefractionIndex, Incident); + vResult = XM_FNMADD_PS(R, Normal, vResult); + } + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Orthogonal(FXMVECTOR V) noexcept +{ + XMVECTOR Zero = XMVectorZero(); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR YZYY = XMVectorSwizzle(V); + + XMVECTOR NegativeV = XMVectorSubtract(Zero, V); + + XMVECTOR ZIsNegative = XMVectorLess(Z, Zero); + XMVECTOR YZYYIsNegative = XMVectorLess(YZYY, Zero); + + XMVECTOR S = XMVectorAdd(YZYY, Z); + XMVECTOR D = XMVectorSubtract(YZYY, Z); + + XMVECTOR Select = XMVectorEqualInt(ZIsNegative, YZYYIsNegative); + + XMVECTOR R0 = XMVectorPermute(NegativeV, S); + XMVECTOR R1 = XMVectorPermute(V, D); + + return XMVectorSelect(R1, R0, Select); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormalsEst +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector3Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACosEst(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormals +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector3Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACos(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenVectors +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + XMVECTOR L1 = XMVector3ReciprocalLength(V1); + XMVECTOR L2 = XMVector3ReciprocalLength(V2); + + XMVECTOR Dot = XMVector3Dot(V1, V2); + + L1 = XMVectorMultiply(L1, L2); + + XMVECTOR CosAngle = XMVectorMultiply(Dot, L1); + CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v); + + return XMVectorACos(CosAngle); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3LinePointDistance +( + FXMVECTOR LinePoint1, + FXMVECTOR LinePoint2, + FXMVECTOR Point +) noexcept +{ + // Given a vector PointVector from LinePoint1 to Point and a vector + // LineVector from LinePoint1 to LinePoint2, the scaled distance + // PointProjectionScale from LinePoint1 to the perpendicular projection + // of PointVector onto the line is defined as: + // + // PointProjectionScale = dot(PointVector, LineVector) / LengthSq(LineVector) + + XMVECTOR PointVector = XMVectorSubtract(Point, LinePoint1); + XMVECTOR LineVector = XMVectorSubtract(LinePoint2, LinePoint1); + + XMVECTOR LengthSq = XMVector3LengthSq(LineVector); + + XMVECTOR PointProjectionScale = XMVector3Dot(PointVector, LineVector); + PointProjectionScale = XMVectorDivide(PointProjectionScale, LengthSq); + + XMVECTOR DistanceVector = XMVectorMultiply(LineVector, PointProjectionScale); + DistanceVector = XMVectorSubtract(PointVector, DistanceVector); + + return XMVector3Length(DistanceVector); +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XM_CALLCONV XMVector3ComponentsFromNormal +( + XMVECTOR* pParallel, + XMVECTOR* pPerpendicular, + FXMVECTOR V, + FXMVECTOR Normal +) noexcept +{ + assert(pParallel != nullptr); + assert(pPerpendicular != nullptr); + + XMVECTOR Scale = XMVector3Dot(V, Normal); + + XMVECTOR Parallel = XMVectorMultiply(Normal, Scale); + + *pParallel = Parallel; + *pPerpendicular = XMVectorSubtract(V, Parallel); +} + +//------------------------------------------------------------------------------ +// Transform a vector using a rotation expressed as a unit quaternion + +inline XMVECTOR XM_CALLCONV XMVector3Rotate +( + FXMVECTOR V, + FXMVECTOR RotationQuaternion +) noexcept +{ + XMVECTOR A = XMVectorSelect(g_XMSelect1110.v, V, g_XMSelect1110.v); + XMVECTOR Q = XMQuaternionConjugate(RotationQuaternion); + XMVECTOR Result = XMQuaternionMultiply(Q, A); + return XMQuaternionMultiply(Result, RotationQuaternion); +} + +//------------------------------------------------------------------------------ +// Transform a vector using the inverse of a rotation expressed as a unit quaternion + +inline XMVECTOR XM_CALLCONV XMVector3InverseRotate +( + FXMVECTOR V, + FXMVECTOR RotationQuaternion +) noexcept +{ + XMVECTOR A = XMVectorSelect(g_XMSelect1110.v, V, g_XMSelect1110.v); + XMVECTOR Result = XMQuaternionMultiply(RotationQuaternion, A); + XMVECTOR Q = XMQuaternionConjugate(RotationQuaternion); + return XMQuaternionMultiply(Result, Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Transform +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Z, M.r[2], M.r[3]); + Result = XMVectorMultiplyAdd(Y, M.r[1], Result); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + XMVECTOR vResult = vmlaq_lane_f32(M.r[3], M.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, M.r[1], VL, 1); // Y + return vmlaq_lane_f32(vResult, M.r[2], vget_high_f32(V), 0); // Z +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); // Z + vResult = XM_FMADD_PS(vResult, M.r[2], M.r[3]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = XM_FMADD_PS(vTemp, M.r[1], vResult); + vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT4* XM_CALLCONV XMVector3TransformStream +( + XMFLOAT4* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT4)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Z, row2, row3); + Result = XMVectorMultiplyAdd(Y, row1, Result); + Result = XMVectorMultiplyAdd(X, row0, Result); + + XMStoreFloat4(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT4))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + float32x2_t r3 = vget_low_f32(row3); + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(row3); + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Cx+O + XMVECTOR vResult3 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy+O + vResult3 = vmlaq_lane_f32(vResult3, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(row2); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(row2); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz+O + vResult3 = vmlaq_lane_f32(vResult3, V.val[2], r, 1); // Dx+Hy+Lz+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + + float32x4x4_t R; + R.val[0] = vResult0; + R.val[1] = vResult1; + R.val[2] = vResult2; + R.val[3] = vResult3; + + vst4q_f32(reinterpret_cast(pOutputVector), R); + pOutputVector += sizeof(XMFLOAT4) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(row3, row0, VL, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, row2, VH, 0); // Z + + vst1q_f32(reinterpret_cast(pOutputVector), vResult); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + // Packed input, aligned output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + else + { + // Packed input, unaligned output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + // Aligned output + for (; i < VectorCount; ++i) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned output + for (; i < VectorCount; ++i) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3TransformCoord +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Z, M.r[2], M.r[3]); + Result = XMVectorMultiplyAdd(Y, M.r[1], Result); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + XMVECTOR W = XMVectorSplatW(Result); + return XMVectorDivide(Result, W); +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT3* XM_CALLCONV XMVector3TransformCoordStream +( + XMFLOAT3* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Z, row2, row3); + Result = XMVectorMultiplyAdd(Y, row1, Result); + Result = XMVectorMultiplyAdd(X, row0, Result); + + XMVECTOR W = XMVectorSplatW(Result); + + Result = XMVectorDivide(Result, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + float32x2_t r3 = vget_low_f32(row3); + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(row3); + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Cx+O + XMVECTOR W = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy+O + W = vmlaq_lane_f32(W, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(row2); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(row2); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz+O + W = vmlaq_lane_f32(W, V.val[2], r, 1); // Dx+Hy+Lz+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + V.val[0] = vdivq_f32(vResult0, W); + V.val[1] = vdivq_f32(vResult1, W); + V.val[2] = vdivq_f32(vResult2, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + V.val[0] = vmulq_f32(vResult0, Reciprocal); + V.val[1] = vmulq_f32(vResult1, Reciprocal); + V.val[2] = vmulq_f32(vResult2, Reciprocal); +#endif + + vst3q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT3) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(row3, row0, VL, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, row2, VH, 0); // Z + + VH = vget_high_f32(vResult); + XMVECTOR W = vdupq_lane_f32(VH, 1); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + vResult = vdivq_f32(vResult, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal for W + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + vResult = vmulq_f32(vResult, Reciprocal); +#endif + + VL = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), VL); + vst1q_lane_f32(reinterpret_cast(pOutputVector) + 2, vResult, 2); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (OutputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V1 = _mm_div_ps(vTemp, W); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V2 = _mm_div_ps(vTemp, W); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V3 = _mm_div_ps(vTemp, W); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V4 = _mm_div_ps(vTemp, W); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector), V1); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 16), vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V1 = _mm_div_ps(vTemp, W); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V2 = _mm_div_ps(vTemp, W); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V3 = _mm_div_ps(vTemp, W); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V4 = _mm_div_ps(vTemp, W); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector), V1); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 16), vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3TransformNormal +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(Z, M.r[2]); + Result = XMVectorMultiplyAdd(Y, M.r[1], Result); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + XMVECTOR vResult = vmulq_lane_f32(M.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, M.r[1], VL, 1); // Y + return vmlaq_lane_f32(vResult, M.r[2], vget_high_f32(V), 0); // Z +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); // Z + vResult = _mm_mul_ps(vResult, M.r[2]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = XM_FMADD_PS(vTemp, M.r[1], vResult); + vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT3* XM_CALLCONV XMVector3TransformNormalStream +( + XMFLOAT3* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(Z, row2); + Result = XMVectorMultiplyAdd(Y, row1, Result); + Result = XMVectorMultiplyAdd(X, row0, Result); + + XMStoreFloat3(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmulq_lane_f32(V.val[0], r, 0); // Ax + XMVECTOR vResult1 = vmulq_lane_f32(V.val[0], r, 1); // Bx + + XM_PREFETCH(pInputVector); + + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmulq_lane_f32(V.val[0], r, 0); // Cx + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(row2); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(row2); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + + V.val[0] = vResult0; + V.val[1] = vResult1; + V.val[2] = vResult2; + + vst3q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT3) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + XMVECTOR vResult = vmulq_lane_f32(row0, VL, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, row2, VH, 0); // Z + + VL = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), VL); + vst1q_lane_f32(reinterpret_cast(pOutputVector) + 2, vResult, 2); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (OutputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Z, row2); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V1 = _mm_add_ps(vTemp, vTemp3); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V2 = _mm_add_ps(vTemp, vTemp3); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V3 = _mm_add_ps(vTemp, vTemp3); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V4 = _mm_add_ps(vTemp, vTemp3); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector), V1); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 16), vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Z, row2); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V1 = _mm_add_ps(vTemp, vTemp3); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V2 = _mm_add_ps(vTemp, vTemp3); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V3 = _mm_add_ps(vTemp, vTemp3); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V4 = _mm_add_ps(vTemp, vTemp3); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector), V1); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 16), vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Z, row2); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Z, row2); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Project +( + FXMVECTOR V, + float ViewportX, + float ViewportY, + float ViewportWidth, + float ViewportHeight, + float ViewportMinZ, + float ViewportMaxZ, + FXMMATRIX Projection, + CXMMATRIX View, + CXMMATRIX World +) noexcept +{ + const float HalfViewportWidth = ViewportWidth * 0.5f; + const float HalfViewportHeight = ViewportHeight * 0.5f; + + XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 0.0f); + XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + + XMVECTOR Result = XMVector3TransformCoord(V, Transform); + + Result = XMVectorMultiplyAdd(Result, Scale, Offset); + + return Result; +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT3* XM_CALLCONV XMVector3ProjectStream +( + XMFLOAT3* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + float ViewportX, + float ViewportY, + float ViewportWidth, + float ViewportHeight, + float ViewportMinZ, + float ViewportMaxZ, + FXMMATRIX Projection, + CXMMATRIX View, + CXMMATRIX World +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3)); + +#if defined(_XM_NO_INTRINSICS_) + + const float HalfViewportWidth = ViewportWidth * 0.5f; + const float HalfViewportHeight = ViewportHeight * 0.5f; + + XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f); + XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + + XMVECTOR Result = XMVector3TransformCoord(V, Transform); + Result = XMVectorMultiplyAdd(Result, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + const float HalfViewportWidth = ViewportWidth * 0.5f; + const float HalfViewportHeight = ViewportHeight * 0.5f; + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3))) + { + XMVECTOR ScaleX = vdupq_n_f32(HalfViewportWidth); + XMVECTOR ScaleY = vdupq_n_f32(-HalfViewportHeight); + XMVECTOR ScaleZ = vdupq_n_f32(ViewportMaxZ - ViewportMinZ); + + XMVECTOR OffsetX = vdupq_n_f32(ViewportX + HalfViewportWidth); + XMVECTOR OffsetY = vdupq_n_f32(ViewportY + HalfViewportHeight); + XMVECTOR OffsetZ = vdupq_n_f32(ViewportMinZ); + + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + float32x2_t r3 = vget_low_f32(Transform.r[3]); + float32x2_t r = vget_low_f32(Transform.r[0]); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(Transform.r[3]); + r = vget_high_f32(Transform.r[0]); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Cx+O + XMVECTOR W = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(Transform.r[1]); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(Transform.r[1]); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy+O + W = vmlaq_lane_f32(W, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(Transform.r[2]); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(Transform.r[2]); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz+O + W = vmlaq_lane_f32(W, V.val[2], r, 1); // Dx+Hy+Lz+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + vResult0 = vdivq_f32(vResult0, W); + vResult1 = vdivq_f32(vResult1, W); + vResult2 = vdivq_f32(vResult2, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + vResult0 = vmulq_f32(vResult0, Reciprocal); + vResult1 = vmulq_f32(vResult1, Reciprocal); + vResult2 = vmulq_f32(vResult2, Reciprocal); +#endif + + V.val[0] = vmlaq_f32(OffsetX, vResult0, ScaleX); + V.val[1] = vmlaq_f32(OffsetY, vResult1, ScaleY); + V.val[2] = vmlaq_f32(OffsetZ, vResult2, ScaleZ); + + vst3q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT3) * 4; + + i += 4; + } + } + } + + if (i < VectorCount) + { + XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f); + XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f); + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(Transform.r[3], Transform.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, Transform.r[1], VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, Transform.r[2], VH, 0); // Z + + VH = vget_high_f32(vResult); + XMVECTOR W = vdupq_lane_f32(VH, 1); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + vResult = vdivq_f32(vResult, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal for W + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + vResult = vmulq_f32(vResult, Reciprocal); +#endif + + vResult = vmlaq_f32(Offset, vResult, Scale); + + VL = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), VL); + vst1q_lane_f32(reinterpret_cast(pOutputVector) + 2, vResult, 2); + pOutputVector += OutputStride; + } + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + const float HalfViewportWidth = ViewportWidth * 0.5f; + const float HalfViewportHeight = ViewportHeight * 0.5f; + + XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f); + XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (OutputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V1 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V2 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V3 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V4 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector), V1); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 16), vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V1 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V2 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V3 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V4 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector), V1); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 16), vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Unproject +( + FXMVECTOR V, + float ViewportX, + float ViewportY, + float ViewportWidth, + float ViewportHeight, + float ViewportMinZ, + float ViewportMaxZ, + FXMMATRIX Projection, + CXMMATRIX View, + CXMMATRIX World +) noexcept +{ + static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } }; + + XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f); + Scale = XMVectorReciprocal(Scale); + + XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f); + Offset = XMVectorMultiplyAdd(Scale, Offset, D.v); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + Transform = XMMatrixInverse(nullptr, Transform); + + XMVECTOR Result = XMVectorMultiplyAdd(V, Scale, Offset); + + return XMVector3TransformCoord(Result, Transform); +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT3* XM_CALLCONV XMVector3UnprojectStream +( + XMFLOAT3* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + float ViewportX, + float ViewportY, + float ViewportWidth, + float ViewportHeight, + float ViewportMinZ, + float ViewportMaxZ, + FXMMATRIX Projection, + CXMMATRIX View, + CXMMATRIX World +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3)); + +#if defined(_XM_NO_INTRINSICS_) + + static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } }; + + XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f); + Scale = XMVectorReciprocal(Scale); + + XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f); + Offset = XMVectorMultiplyAdd(Scale, Offset, D.v); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + Transform = XMMatrixInverse(nullptr, Transform); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + + XMVECTOR Result = XMVectorMultiplyAdd(V, Scale, Offset); + + Result = XMVector3TransformCoord(Result, Transform); + + XMStoreFloat3(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + Transform = XMMatrixInverse(nullptr, Transform); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + float sx = 1.f / (ViewportWidth * 0.5f); + float sy = 1.f / (-ViewportHeight * 0.5f); + float sz = 1.f / (ViewportMaxZ - ViewportMinZ); + + float ox = (-ViewportX * sx) - 1.f; + float oy = (-ViewportY * sy) + 1.f; + float oz = (-ViewportMinZ * sz); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + XMVECTOR ScaleX = vdupq_n_f32(sx); + XMVECTOR OffsetX = vdupq_n_f32(ox); + XMVECTOR VX = vmlaq_f32(OffsetX, ScaleX, V.val[0]); + + float32x2_t r3 = vget_low_f32(Transform.r[3]); + float32x2_t r = vget_low_f32(Transform.r[0]); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), VX, r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), VX, r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(Transform.r[3]); + r = vget_high_f32(Transform.r[0]); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), VX, r, 0); // Cx+O + XMVECTOR W = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), VX, r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + XMVECTOR ScaleY = vdupq_n_f32(sy); + XMVECTOR OffsetY = vdupq_n_f32(oy); + XMVECTOR VY = vmlaq_f32(OffsetY, ScaleY, V.val[1]); + + r = vget_low_f32(Transform.r[1]); + vResult0 = vmlaq_lane_f32(vResult0, VY, r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, VY, r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(Transform.r[1]); + vResult2 = vmlaq_lane_f32(vResult2, VY, r, 0); // Cx+Gy+O + W = vmlaq_lane_f32(W, VY, r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + XMVECTOR ScaleZ = vdupq_n_f32(sz); + XMVECTOR OffsetZ = vdupq_n_f32(oz); + XMVECTOR VZ = vmlaq_f32(OffsetZ, ScaleZ, V.val[2]); + + r = vget_low_f32(Transform.r[2]); + vResult0 = vmlaq_lane_f32(vResult0, VZ, r, 0); // Ax+Ey+Iz+M + vResult1 = vmlaq_lane_f32(vResult1, VZ, r, 1); // Bx+Fy+Jz+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(Transform.r[2]); + vResult2 = vmlaq_lane_f32(vResult2, VZ, r, 0); // Cx+Gy+Kz+O + W = vmlaq_lane_f32(W, VZ, r, 1); // Dx+Hy+Lz+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + V.val[0] = vdivq_f32(vResult0, W); + V.val[1] = vdivq_f32(vResult1, W); + V.val[2] = vdivq_f32(vResult2, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + V.val[0] = vmulq_f32(vResult0, Reciprocal); + V.val[1] = vmulq_f32(vResult1, Reciprocal); + V.val[2] = vmulq_f32(vResult2, Reciprocal); +#endif + + vst3q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT3) * 4; + + i += 4; + } + } + } + + if (i < VectorCount) + { + float32x2_t ScaleL = vcreate_f32( + static_cast(*reinterpret_cast(&sx)) + | (static_cast(*reinterpret_cast(&sy)) << 32)); + float32x2_t ScaleH = vcreate_f32(static_cast(*reinterpret_cast(&sz))); + + float32x2_t OffsetL = vcreate_f32( + static_cast(*reinterpret_cast(&ox)) + | (static_cast(*reinterpret_cast(&oy)) << 32)); + float32x2_t OffsetH = vcreate_f32(static_cast(*reinterpret_cast(&oz))); + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + VL = vmla_f32(OffsetL, VL, ScaleL); + VH = vmla_f32(OffsetH, VH, ScaleH); + + XMVECTOR vResult = vmlaq_lane_f32(Transform.r[3], Transform.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, Transform.r[1], VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, Transform.r[2], VH, 0); // Z + + VH = vget_high_f32(vResult); + XMVECTOR W = vdupq_lane_f32(VH, 1); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + vResult = vdivq_f32(vResult, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal for W + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + vResult = vmulq_f32(vResult, Reciprocal); +#endif + + VL = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), VL); + vst1q_lane_f32(reinterpret_cast(pOutputVector) + 2, vResult, 2); + pOutputVector += OutputStride; + } + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } }; + + XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f); + Scale = XMVectorReciprocal(Scale); + + XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f); + Offset = _mm_mul_ps(Scale, Offset); + Offset = _mm_add_ps(Offset, D); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + Transform = XMMatrixInverse(nullptr, Transform); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (OutputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + V1 = XM_FMADD_PS(V1, Scale, Offset); + + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V1 = _mm_div_ps(vTemp, W); + + // Result 2 + V2 = XM_FMADD_PS(V2, Scale, Offset); + + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V2 = _mm_div_ps(vTemp, W); + + // Result 3 + V3 = XM_FMADD_PS(V3, Scale, Offset); + + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V3 = _mm_div_ps(vTemp, W); + + // Result 4 + V4 = XM_FMADD_PS(V4, Scale, Offset); + + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V4 = _mm_div_ps(vTemp, W); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector), V1); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 16), vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + V1 = XM_FMADD_PS(V1, Scale, Offset); + + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V1 = _mm_div_ps(vTemp, W); + + // Result 2 + V2 = XM_FMADD_PS(V2, Scale, Offset); + + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V2 = _mm_div_ps(vTemp, W); + + // Result 3 + V3 = XM_FMADD_PS(V3, Scale, Offset); + + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V3 = _mm_div_ps(vTemp, W); + + // Result 4 + V4 = XM_FMADD_PS(V4, Scale, Offset); + + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V4 = _mm_div_ps(vTemp, W); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector), V1); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 16), vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + V1 = XM_FMADD_PS(V1, Scale, Offset); + + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + V2 = XM_FMADD_PS(V2, Scale, Offset); + + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + V3 = XM_FMADD_PS(V3, Scale, Offset); + + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + V4 = XM_FMADD_PS(V4, Scale, Offset); + + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + V = _mm_mul_ps(V, Scale); + V = _mm_add_ps(V, Offset); + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +/**************************************************************************** + * + * 4D Vector + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4Equal +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1]) && (V1.vector4_f32[2] == V2.vector4_f32[2]) && (V1.vector4_f32[3] == V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4EqualR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector4EqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + + if ((V1.vector4_f32[0] == V2.vector4_f32[0]) && + (V1.vector4_f32[1] == V2.vector4_f32[1]) && + (V1.vector4_f32[2] == V2.vector4_f32[2]) && + (V1.vector4_f32[3] == V2.vector4_f32[3])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) && + (V1.vector4_f32[1] != V2.vector4_f32[1]) && + (V1.vector4_f32[2] != V2.vector4_f32[2]) && + (V1.vector4_f32[3] != V2.vector4_f32[3])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp); + uint32_t CR = 0; + if (iTest == 0xf) // All equal? + { + CR = XM_CRMASK_CR6TRUE; + } + else if (iTest == 0) // All not equal? + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4EqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1]) && (V1.vector4_u32[2] == V2.vector4_u32[2]) && (V1.vector4_u32[3] == V2.vector4_u32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return ((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) == 0xf) != 0); +#else + return XMComparisonAllTrue(XMVector4EqualIntR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector4EqualIntR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if (V1.vector4_u32[0] == V2.vector4_u32[0] && + V1.vector4_u32[1] == V2.vector4_u32[1] && + V1.vector4_u32[2] == V2.vector4_u32[2] && + V1.vector4_u32[3] == V2.vector4_u32[3]) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (V1.vector4_u32[0] != V2.vector4_u32[0] && + V1.vector4_u32[1] != V2.vector4_u32[1] && + V1.vector4_u32[2] != V2.vector4_u32[2] && + V1.vector4_u32[3] != V2.vector4_u32[3]) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + int iTest = _mm_movemask_ps(_mm_castsi128_ps(vTemp)); + uint32_t CR = 0; + if (iTest == 0xf) // All equal? + { + CR = XM_CRMASK_CR6TRUE; + } + else if (iTest == 0) // All not equal? + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +inline bool XM_CALLCONV XMVector4NearEqual +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Epsilon +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float dx, dy, dz, dw; + + dx = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]); + dy = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]); + dz = fabsf(V1.vector4_f32[2] - V2.vector4_f32[2]); + dw = fabsf(V1.vector4_f32[3] - V2.vector4_f32[3]); + return (((dx <= Epsilon.vector4_f32[0]) && + (dy <= Epsilon.vector4_f32[1]) && + (dz <= Epsilon.vector4_f32[2]) && + (dw <= Epsilon.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vDelta = vsubq_f32(V1, V2); +#ifdef _MSC_VER + uint32x4_t vResult = vacleq_f32(vDelta, Epsilon); +#else + uint32x4_t vResult = vcleq_f32(vabsq_f32(vDelta), Epsilon); +#endif + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Get the difference + XMVECTOR vDelta = _mm_sub_ps(V1, V2); + // Get the absolute value of the difference + XMVECTOR vTemp = _mm_setzero_ps(); + vTemp = _mm_sub_ps(vTemp, vDelta); + vTemp = _mm_max_ps(vTemp, vDelta); + vTemp = _mm_cmple_ps(vTemp, Epsilon); + return ((_mm_movemask_ps(vTemp) == 0xf) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4NotEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1]) || (V1.vector4_f32[2] != V2.vector4_f32[2]) || (V1.vector4_f32[3] != V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) != 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpneq_ps(V1, V2); + return ((_mm_movemask_ps(vTemp)) != 0); +#else + return XMComparisonAnyFalse(XMVector4EqualR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4NotEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1]) || (V1.vector4_u32[2] != V2.vector4_u32[2]) || (V1.vector4_u32[3] != V2.vector4_u32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) != 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return ((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) != 0xF) != 0); +#else + return XMComparisonAnyFalse(XMVector4EqualIntR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4Greater +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1]) && (V1.vector4_f32[2] > V2.vector4_f32[2]) && (V1.vector4_f32[3] > V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4GreaterR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector4GreaterR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if (V1.vector4_f32[0] > V2.vector4_f32[0] && + V1.vector4_f32[1] > V2.vector4_f32[1] && + V1.vector4_f32[2] > V2.vector4_f32[2] && + V1.vector4_f32[3] > V2.vector4_f32[3]) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (V1.vector4_f32[0] <= V2.vector4_f32[0] && + V1.vector4_f32[1] <= V2.vector4_f32[1] && + V1.vector4_f32[2] <= V2.vector4_f32[2] && + V1.vector4_f32[3] <= V2.vector4_f32[3]) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + uint32_t CR = 0; + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0xf) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4GreaterOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1]) && (V1.vector4_f32[2] >= V2.vector4_f32[2]) && (V1.vector4_f32[3] >= V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4GreaterOrEqualR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector4GreaterOrEqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) && + (V1.vector4_f32[1] >= V2.vector4_f32[1]) && + (V1.vector4_f32[2] >= V2.vector4_f32[2]) && + (V1.vector4_f32[3] >= V2.vector4_f32[3])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) && + (V1.vector4_f32[1] < V2.vector4_f32[1]) && + (V1.vector4_f32[2] < V2.vector4_f32[2]) && + (V1.vector4_f32[3] < V2.vector4_f32[3])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + uint32_t CR = 0; + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0x0f) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4Less +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1]) && (V1.vector4_f32[2] < V2.vector4_f32[2]) && (V1.vector4_f32[3] < V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcltq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmplt_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4GreaterR(V2, V1)); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4LessOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1]) && (V1.vector4_f32[2] <= V2.vector4_f32[2]) && (V1.vector4_f32[3] <= V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcleq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmple_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4GreaterOrEqualR(V2, V1)); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4InBounds +( + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) && + (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) && + (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) && + (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test if less than or equal + uint32x4_t ivTemp1 = vcleq_f32(V, Bounds); + // Negate the bounds + float32x4_t vTemp2 = vnegq_f32(Bounds); + // Test if greater or equal (Reversed) + uint32x4_t ivTemp2 = vcleq_f32(vTemp2, V); + // Blend answers + ivTemp1 = vandq_u32(ivTemp1, ivTemp2); + // in bounds? + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(ivTemp1), vget_high_u8(ivTemp1)); + uint16x4x2_t vTemp3 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp3.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + // All in bounds? + return ((_mm_movemask_ps(vTemp1) == 0x0f) != 0); +#else + return XMComparisonAllInBounds(XMVector4InBoundsR(V, Bounds)); +#endif +} + +//------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline bool XM_CALLCONV XMVector4IsNaN(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (XMISNAN(V.vector4_f32[0]) || + XMISNAN(V.vector4_f32[1]) || + XMISNAN(V.vector4_f32[2]) || + XMISNAN(V.vector4_f32[3])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test against itself. NaN is always not equal + uint32x4_t vTempNan = vceqq_f32(V, V); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempNan), vget_high_u8(vTempNan)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + // If any are NaN, the mask is zero + return (vget_lane_u32(vTemp2.val[1], 1) != 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test against itself. NaN is always not equal + XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); + // If any are NaN, the mask is non-zero + return (_mm_movemask_ps(vTempNan) != 0); +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4IsInfinite(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + return (XMISINF(V.vector4_f32[0]) || + XMISINF(V.vector4_f32[1]) || + XMISINF(V.vector4_f32[2]) || + XMISINF(V.vector4_f32[3])); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bit + uint32x4_t vTempInf = vandq_u32(V, g_XMAbsMask); + // Compare to infinity + vTempInf = vceqq_f32(vTempInf, g_XMInfinity); + // If any are infinity, the signs are true. + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempInf), vget_high_u8(vTempInf)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) != 0); +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bit + XMVECTOR vTemp = _mm_and_ps(V, g_XMAbsMask); + // Compare to infinity + vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity); + // If any are infinity, the signs are true. + return (_mm_movemask_ps(vTemp) != 0); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Dot +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result; + Result.f[0] = + Result.f[1] = + Result.f[2] = + Result.f[3] = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1] + V1.vector4_f32[2] * V2.vector4_f32[2] + V1.vector4_f32[3] * V2.vector4_f32[3]; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vTemp = vmulq_f32(V1, V2); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + return vcombine_f32(v1, v1); +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_dp_ps(V1, V2, 0xff); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vTemp = _mm_mul_ps(V1, V2); + vTemp = _mm_hadd_ps(vTemp, vTemp); + return _mm_hadd_ps(vTemp, vTemp); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp2 = V2; + XMVECTOR vTemp = _mm_mul_ps(V1, vTemp2); + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp, _MM_SHUFFLE(1, 0, 0, 0)); // Copy X to the Z position and Y to the W position + vTemp2 = _mm_add_ps(vTemp2, vTemp); // Add Z = X+Z; W = Y+W; + vTemp = _mm_shuffle_ps(vTemp, vTemp2, _MM_SHUFFLE(0, 3, 0, 0)); // Copy W to the Z position + vTemp = _mm_add_ps(vTemp, vTemp2); // Add Z and W together + return XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(2, 2, 2, 2)); // Splat Z and return +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Cross +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR V3 +) noexcept +{ + // [ ((v2.z*v3.w-v2.w*v3.z)*v1.y)-((v2.y*v3.w-v2.w*v3.y)*v1.z)+((v2.y*v3.z-v2.z*v3.y)*v1.w), + // ((v2.w*v3.z-v2.z*v3.w)*v1.x)-((v2.w*v3.x-v2.x*v3.w)*v1.z)+((v2.z*v3.x-v2.x*v3.z)*v1.w), + // ((v2.y*v3.w-v2.w*v3.y)*v1.x)-((v2.x*v3.w-v2.w*v3.x)*v1.y)+((v2.x*v3.y-v2.y*v3.x)*v1.w), + // ((v2.z*v3.y-v2.y*v3.z)*v1.x)-((v2.z*v3.x-v2.x*v3.z)*v1.y)+((v2.y*v3.x-v2.x*v3.y)*v1.z) ] + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + (((V2.vector4_f32[2] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[2])) * V1.vector4_f32[1]) - (((V2.vector4_f32[1] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[1])) * V1.vector4_f32[2]) + (((V2.vector4_f32[1] * V3.vector4_f32[2]) - (V2.vector4_f32[2] * V3.vector4_f32[1])) * V1.vector4_f32[3]), + (((V2.vector4_f32[3] * V3.vector4_f32[2]) - (V2.vector4_f32[2] * V3.vector4_f32[3])) * V1.vector4_f32[0]) - (((V2.vector4_f32[3] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[3])) * V1.vector4_f32[2]) + (((V2.vector4_f32[2] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[2])) * V1.vector4_f32[3]), + (((V2.vector4_f32[1] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[1])) * V1.vector4_f32[0]) - (((V2.vector4_f32[0] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[0])) * V1.vector4_f32[1]) + (((V2.vector4_f32[0] * V3.vector4_f32[1]) - (V2.vector4_f32[1] * V3.vector4_f32[0])) * V1.vector4_f32[3]), + (((V2.vector4_f32[2] * V3.vector4_f32[1]) - (V2.vector4_f32[1] * V3.vector4_f32[2])) * V1.vector4_f32[0]) - (((V2.vector4_f32[2] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[2])) * V1.vector4_f32[1]) + (((V2.vector4_f32[1] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[1])) * V1.vector4_f32[2]), + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + const float32x2_t select = vget_low_f32(g_XMMaskX); + + // Term1: V2zwyz * V3wzwy + const float32x2_t v2xy = vget_low_f32(V2); + const float32x2_t v2zw = vget_high_f32(V2); + const float32x2_t v2yx = vrev64_f32(v2xy); + const float32x2_t v2wz = vrev64_f32(v2zw); + const float32x2_t v2yz = vbsl_f32(select, v2yx, v2wz); + + const float32x2_t v3zw = vget_high_f32(V3); + const float32x2_t v3wz = vrev64_f32(v3zw); + const float32x2_t v3xy = vget_low_f32(V3); + const float32x2_t v3wy = vbsl_f32(select, v3wz, v3xy); + + float32x4_t vTemp1 = vcombine_f32(v2zw, v2yz); + float32x4_t vTemp2 = vcombine_f32(v3wz, v3wy); + XMVECTOR vResult = vmulq_f32(vTemp1, vTemp2); + + // - V2wzwy * V3zwyz + const float32x2_t v2wy = vbsl_f32(select, v2wz, v2xy); + + const float32x2_t v3yx = vrev64_f32(v3xy); + const float32x2_t v3yz = vbsl_f32(select, v3yx, v3wz); + + vTemp1 = vcombine_f32(v2wz, v2wy); + vTemp2 = vcombine_f32(v3zw, v3yz); + vResult = vmlsq_f32(vResult, vTemp1, vTemp2); + + // term1 * V1yxxx + const float32x2_t v1xy = vget_low_f32(V1); + const float32x2_t v1yx = vrev64_f32(v1xy); + + vTemp1 = vcombine_f32(v1yx, vdup_lane_f32(v1yx, 1)); + vResult = vmulq_f32(vResult, vTemp1); + + // Term2: V2ywxz * V3wxwx + const float32x2_t v2yw = vrev64_f32(v2wy); + const float32x2_t v2xz = vbsl_f32(select, v2xy, v2wz); + + const float32x2_t v3wx = vbsl_f32(select, v3wz, v3yx); + + vTemp1 = vcombine_f32(v2yw, v2xz); + vTemp2 = vcombine_f32(v3wx, v3wx); + float32x4_t vTerm = vmulq_f32(vTemp1, vTemp2); + + // - V2wxwx * V3ywxz + const float32x2_t v2wx = vbsl_f32(select, v2wz, v2yx); + + const float32x2_t v3yw = vrev64_f32(v3wy); + const float32x2_t v3xz = vbsl_f32(select, v3xy, v3wz); + + vTemp1 = vcombine_f32(v2wx, v2wx); + vTemp2 = vcombine_f32(v3yw, v3xz); + vTerm = vmlsq_f32(vTerm, vTemp1, vTemp2); + + // vResult - term2 * V1zzyy + const float32x2_t v1zw = vget_high_f32(V1); + + vTemp1 = vcombine_f32(vdup_lane_f32(v1zw, 0), vdup_lane_f32(v1yx, 0)); + vResult = vmlsq_f32(vResult, vTerm, vTemp1); + + // Term3: V2yzxy * V3zxyx + const float32x2_t v3zx = vrev64_f32(v3xz); + + vTemp1 = vcombine_f32(v2yz, v2xy); + vTemp2 = vcombine_f32(v3zx, v3yx); + vTerm = vmulq_f32(vTemp1, vTemp2); + + // - V2zxyx * V3yzxy + const float32x2_t v2zx = vrev64_f32(v2xz); + + vTemp1 = vcombine_f32(v2zx, v2yx); + vTemp2 = vcombine_f32(v3yz, v3xy); + vTerm = vmlsq_f32(vTerm, vTemp1, vTemp2); + + // vResult + term3 * V1wwwz + const float32x2_t v1wz = vrev64_f32(v1zw); + + vTemp1 = vcombine_f32(vdup_lane_f32(v1wz, 0), v1wz); + return vmlaq_f32(vResult, vTerm, vTemp1); +#elif defined(_XM_SSE_INTRINSICS_) + // V2zwyz * V3wzwy + XMVECTOR vResult = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 1, 3, 2)); + XMVECTOR vTemp3 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 3, 2, 3)); + vResult = _mm_mul_ps(vResult, vTemp3); + // - V2wzwy * V3zwyz + XMVECTOR vTemp2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 3, 2, 3)); + vTemp3 = XM_PERMUTE_PS(vTemp3, _MM_SHUFFLE(1, 3, 0, 1)); + vResult = XM_FNMADD_PS(vTemp2, vTemp3, vResult); + // term1 * V1yxxx + XMVECTOR vTemp1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 1)); + vResult = _mm_mul_ps(vResult, vTemp1); + + // V2ywxz * V3wxwx + vTemp2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 0, 3, 1)); + vTemp3 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 3, 0, 3)); + vTemp3 = _mm_mul_ps(vTemp3, vTemp2); + // - V2wxwx * V3ywxz + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(2, 1, 2, 1)); + vTemp1 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 0, 3, 1)); + vTemp3 = XM_FNMADD_PS(vTemp2, vTemp1, vTemp3); + // vResult - temp * V1zzyy + vTemp1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 2, 2)); + vResult = XM_FNMADD_PS(vTemp1, vTemp3, vResult); + + // V2yzxy * V3zxyx + vTemp2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 0, 2, 1)); + vTemp3 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 1, 0, 2)); + vTemp3 = _mm_mul_ps(vTemp3, vTemp2); + // - V2zxyx * V3yzxy + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(2, 0, 2, 1)); + vTemp1 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 0, 2, 1)); + vTemp3 = XM_FNMADD_PS(vTemp1, vTemp2, vTemp3); + // vResult + term * V1wwwz + vTemp1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 3, 3, 3)); + vResult = XM_FMADD_PS(vTemp3, vTemp1, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4LengthSq(FXMVECTOR V) noexcept +{ + return XMVector4Dot(V, V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4ReciprocalLengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector4LengthSq(V); + Result = XMVectorReciprocalSqrtEst(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + // Reciprocal sqrt (estimate) + v2 = vrsqrte_f32(v1); + return vcombine_f32(v2, v2); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + return _mm_rsqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_rsqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the reciprocal + vLengthSq = _mm_rsqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4ReciprocalLength(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector4LengthSq(V); + Result = XMVectorReciprocalSqrt(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + // Reciprocal sqrt + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + XMVECTOR vLengthSq = _mm_sqrt_ps(vTemp); + return _mm_div_ps(g_XMOne, vLengthSq); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + vLengthSq = _mm_div_ps(g_XMOne, vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the reciprocal + vLengthSq = _mm_sqrt_ps(vLengthSq); + // Accurate! + vLengthSq = _mm_div_ps(g_XMOne, vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4LengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector4LengthSq(V); + Result = XMVectorSqrtEst(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(v1, zero); + // Sqrt (estimate) + float32x2_t Result = vrsqrte_f32(v1); + Result = vmul_f32(v1, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the length + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Length(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector4LengthSq(V); + Result = XMVectorSqrt(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(v1, zero); + // Sqrt + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + Result = vmul_f32(v1, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the length + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ +// XMVector4NormalizeEst uses a reciprocal estimate and +// returns QNaN on zero and infinite vectors. + +inline XMVECTOR XM_CALLCONV XMVector4NormalizeEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector4ReciprocalLength(V); + Result = XMVectorMultiply(V, Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + // Reciprocal sqrt (estimate) + v2 = vrsqrte_f32(v1); + // Normalize + return vmulq_f32(V, vcombine_f32(v2, v2)); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + XMVECTOR vResult = _mm_rsqrt_ps(vTemp); + return _mm_mul_ps(vResult, V); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vDot = _mm_mul_ps(V, V); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_rsqrt_ps(vDot); + vDot = _mm_mul_ps(vDot, V); + return vDot; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the reciprocal + XMVECTOR vResult = _mm_rsqrt_ps(vLengthSq); + // Reciprocal mul to perform the normalization + vResult = _mm_mul_ps(vResult, V); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Normalize(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fLength; + XMVECTOR vResult; + + vResult = XMVector4Length(V); + fLength = vResult.vector4_f32[0]; + + // Prevent divide by zero + if (fLength > 0) + { + fLength = 1.0f / fLength; + } + + vResult.vector4_f32[0] = V.vector4_f32[0] * fLength; + vResult.vector4_f32[1] = V.vector4_f32[1] * fLength; + vResult.vector4_f32[2] = V.vector4_f32[2] * fLength; + vResult.vector4_f32[3] = V.vector4_f32[3] * fLength; + return vResult; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + uint32x2_t VEqualsZero = vceq_f32(v1, vdup_n_f32(0)); + uint32x2_t VEqualsInf = vceq_f32(v1, vget_low_f32(g_XMInfinity)); + // Reciprocal sqrt (2 iterations of Newton-Raphson) + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + v2 = vmul_f32(S1, R1); + // Normalize + XMVECTOR vResult = vmulq_f32(V, vcombine_f32(v2, v2)); + vResult = vbslq_f32(vcombine_f32(VEqualsZero, VEqualsZero), vdupq_n_f32(0), vResult); + return vbslq_f32(vcombine_f32(VEqualsInf, VEqualsInf), g_XMQNaN, vResult); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vLengthSq = _mm_dp_ps(V, V, 0xff); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE3_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4ClampLength +( + FXMVECTOR V, + float LengthMin, + float LengthMax +) noexcept +{ + XMVECTOR ClampMax = XMVectorReplicate(LengthMax); + XMVECTOR ClampMin = XMVectorReplicate(LengthMin); + + return XMVector4ClampLengthV(V, ClampMin, ClampMax); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4ClampLengthV +( + FXMVECTOR V, + FXMVECTOR LengthMin, + FXMVECTOR LengthMax +) noexcept +{ + assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetZ(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetW(LengthMin) == XMVectorGetX(LengthMin))); + assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetZ(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetW(LengthMax) == XMVectorGetX(LengthMax))); + assert(XMVector4GreaterOrEqual(LengthMin, XMVectorZero())); + assert(XMVector4GreaterOrEqual(LengthMax, XMVectorZero())); + assert(XMVector4GreaterOrEqual(LengthMax, LengthMin)); + + XMVECTOR LengthSq = XMVector4LengthSq(V); + + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq); + + XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v); + XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero); + + XMVECTOR Normal = XMVectorMultiply(V, RcpLength); + + XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength); + + XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength); + Length = XMVectorSelect(LengthSq, Length, Select); + Normal = XMVectorSelect(LengthSq, Normal, Select); + + XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax); + XMVECTOR ControlMin = XMVectorLess(Length, LengthMin); + + XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax); + ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin); + + XMVECTOR Result = XMVectorMultiply(Normal, ClampLength); + + // Preserve the original vector (with no precision loss) if the length falls within the given range + XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin); + Result = XMVectorSelect(Result, V, Control); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Reflect +( + FXMVECTOR Incident, + FXMVECTOR Normal +) noexcept +{ + // Result = Incident - (2 * dot(Incident, Normal)) * Normal + + XMVECTOR Result = XMVector4Dot(Incident, Normal); + Result = XMVectorAdd(Result, Result); + Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Refract +( + FXMVECTOR Incident, + FXMVECTOR Normal, + float RefractionIndex +) noexcept +{ + XMVECTOR Index = XMVectorReplicate(RefractionIndex); + return XMVector4RefractV(Incident, Normal, Index); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4RefractV +( + FXMVECTOR Incident, + FXMVECTOR Normal, + FXMVECTOR RefractionIndex +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR IDotN; + XMVECTOR R; + const XMVECTOR Zero = XMVectorZero(); + + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + + IDotN = XMVector4Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + R = XMVectorNegativeMultiplySubtract(IDotN, IDotN, g_XMOne.v); + R = XMVectorMultiply(R, RefractionIndex); + R = XMVectorNegativeMultiplySubtract(R, RefractionIndex, g_XMOne.v); + + if (XMVector4LessOrEqual(R, Zero)) + { + // Total internal reflection + return Zero; + } + else + { + XMVECTOR Result; + + // R = RefractionIndex * IDotN + sqrt(R) + R = XMVectorSqrt(R); + R = XMVectorMultiplyAdd(RefractionIndex, IDotN, R); + + // Result = RefractionIndex * Incident - Normal * R + Result = XMVectorMultiply(RefractionIndex, Incident); + Result = XMVectorNegativeMultiplySubtract(Normal, R, Result); + + return Result; + } + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR IDotN = XMVector4Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + float32x4_t R = vmlsq_f32(g_XMOne, IDotN, IDotN); + R = vmulq_f32(R, RefractionIndex); + R = vmlsq_f32(g_XMOne, R, RefractionIndex); + + uint32x4_t vResult = vcleq_f32(R, g_XMZero); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + if (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU) + { + // Total internal reflection + vResult = g_XMZero; + } + else + { + // Sqrt(R) + float32x4_t S0 = vrsqrteq_f32(R); + float32x4_t P0 = vmulq_f32(R, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + float32x4_t S1 = vmulq_f32(S0, R0); + float32x4_t P1 = vmulq_f32(R, S1); + float32x4_t R1 = vrsqrtsq_f32(P1, S1); + float32x4_t S2 = vmulq_f32(S1, R1); + R = vmulq_f32(R, S2); + // R = RefractionIndex * IDotN + sqrt(R) + R = vmlaq_f32(R, RefractionIndex, IDotN); + // Result = RefractionIndex * Incident - Normal * R + vResult = vmulq_f32(RefractionIndex, Incident); + vResult = vmlsq_f32(vResult, R, Normal); + } + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR IDotN = XMVector4Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + XMVECTOR R = XM_FNMADD_PS(IDotN, IDotN, g_XMOne); + XMVECTOR R2 = _mm_mul_ps(RefractionIndex, RefractionIndex); + R = XM_FNMADD_PS(R, R2, g_XMOne); + + XMVECTOR vResult = _mm_cmple_ps(R, g_XMZero); + if (_mm_movemask_ps(vResult) == 0x0f) + { + // Total internal reflection + vResult = g_XMZero; + } + else + { + // R = RefractionIndex * IDotN + sqrt(R) + R = _mm_sqrt_ps(R); + R = XM_FMADD_PS(RefractionIndex, IDotN, R); + // Result = RefractionIndex * Incident - Normal * R + vResult = _mm_mul_ps(RefractionIndex, Incident); + vResult = XM_FNMADD_PS(R, Normal, vResult); + } + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Orthogonal(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + V.vector4_f32[2], + V.vector4_f32[3], + -V.vector4_f32[0], + -V.vector4_f32[1] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Negate = { { { 1.f, 1.f, -1.f, -1.f } } }; + + float32x4_t Result = vcombine_f32(vget_high_f32(V), vget_low_f32(V)); + return vmulq_f32(Result, Negate); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 FlipZW = { { { 1.0f, 1.0f, -1.0f, -1.0f } } }; + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 0, 3, 2)); + vResult = _mm_mul_ps(vResult, FlipZW); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormalsEst +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector4Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACosEst(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormals +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector4Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACos(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenVectors +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + XMVECTOR L1 = XMVector4ReciprocalLength(V1); + XMVECTOR L2 = XMVector4ReciprocalLength(V2); + + XMVECTOR Dot = XMVector4Dot(V1, V2); + + L1 = XMVectorMultiply(L1, L2); + + XMVECTOR CosAngle = XMVectorMultiply(Dot, L1); + CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v); + + return XMVectorACos(CosAngle); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Transform +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fX = (M.m[0][0] * V.vector4_f32[0]) + (M.m[1][0] * V.vector4_f32[1]) + (M.m[2][0] * V.vector4_f32[2]) + (M.m[3][0] * V.vector4_f32[3]); + float fY = (M.m[0][1] * V.vector4_f32[0]) + (M.m[1][1] * V.vector4_f32[1]) + (M.m[2][1] * V.vector4_f32[2]) + (M.m[3][1] * V.vector4_f32[3]); + float fZ = (M.m[0][2] * V.vector4_f32[0]) + (M.m[1][2] * V.vector4_f32[1]) + (M.m[2][2] * V.vector4_f32[2]) + (M.m[3][2] * V.vector4_f32[3]); + float fW = (M.m[0][3] * V.vector4_f32[0]) + (M.m[1][3] * V.vector4_f32[1]) + (M.m[2][3] * V.vector4_f32[2]) + (M.m[3][3] * V.vector4_f32[3]); + XMVECTORF32 vResult = { { { fX, fY, fZ, fW } } }; + return vResult.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + XMVECTOR vResult = vmulq_lane_f32(M.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, M.r[1], VL, 1); // Y + float32x2_t VH = vget_high_f32(V); + vResult = vmlaq_lane_f32(vResult, M.r[2], VH, 0); // Z + return vmlaq_lane_f32(vResult, M.r[3], VH, 1); // W +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); // W + vResult = _mm_mul_ps(vResult, M.r[3]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); // Z + vResult = XM_FMADD_PS(vTemp, M.r[2], vResult); + vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = XM_FMADD_PS(vTemp, M.r[1], vResult); + vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT4* XM_CALLCONV XMVector4TransformStream +( + XMFLOAT4* pOutputStream, + size_t OutputStride, + const XMFLOAT4* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT4)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT4)); + + assert(OutputStride >= sizeof(XMFLOAT4)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat4(reinterpret_cast(pInputVector)); + XMVECTOR W = XMVectorSplatW(V); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(W, row3); + Result = XMVectorMultiplyAdd(Z, row2, Result); + Result = XMVectorMultiplyAdd(Y, row1, Result); + Result = XMVectorMultiplyAdd(X, row0, Result); + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015, "PREfast noise: Esp:1307" ) +#endif + + XMStoreFloat4(reinterpret_cast(pOutputVector), Result); + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT4)) && (OutputStride == sizeof(XMFLOAT4))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x4_t V = vld4q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT4) * 4; + + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmulq_lane_f32(V.val[0], r, 0); // Ax + XMVECTOR vResult1 = vmulq_lane_f32(V.val[0], r, 1); // Bx + + XM_PREFETCH(pInputVector); + + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmulq_lane_f32(V.val[0], r, 0); // Cx + XMVECTOR vResult3 = vmulq_lane_f32(V.val[0], r, 1); // Dx + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy + vResult3 = vmlaq_lane_f32(vResult3, V.val[1], r, 1); // Dx+Hy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(row2); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(row2); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz + vResult3 = vmlaq_lane_f32(vResult3, V.val[2], r, 1); // Dx+Hy+Lz + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + + r = vget_low_f32(row3); + vResult0 = vmlaq_lane_f32(vResult0, V.val[3], r, 0); // Ax+Ey+Iz+Mw + vResult1 = vmlaq_lane_f32(vResult1, V.val[3], r, 1); // Bx+Fy+Jz+Nw + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 6)); + + r = vget_high_f32(row3); + vResult2 = vmlaq_lane_f32(vResult2, V.val[3], r, 0); // Cx+Gy+Kz+Ow + vResult3 = vmlaq_lane_f32(vResult3, V.val[3], r, 1); // Dx+Hy+Lz+Pw + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 7)); + + V.val[0] = vResult0; + V.val[1] = vResult1; + V.val[2] = vResult2; + V.val[3] = vResult3; + + vst4q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT4) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = vld1q_f32(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + float32x2_t VL = vget_low_f32(V); + XMVECTOR vResult = vmulq_lane_f32(row0, VL, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, VL, 1); // Y + float32x2_t VH = vget_high_f32(V); + vResult = vmlaq_lane_f32(vResult, row2, VH, 0); // Z + vResult = vmlaq_lane_f32(vResult, row3, VH, 1); // W + + vst1q_f32(reinterpret_cast(pOutputVector), vResult); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_AVX2_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t two = VectorCount >> 1; + if (two > 0) + { + __m256 row0 = _mm256_broadcast_ps(&M.r[0]); + __m256 row1 = _mm256_broadcast_ps(&M.r[1]); + __m256 row2 = _mm256_broadcast_ps(&M.r[2]); + __m256 row3 = _mm256_broadcast_ps(&M.r[3]); + + if (InputStride == sizeof(XMFLOAT4)) + { + if (OutputStride == sizeof(XMFLOAT4)) + { + if (!(reinterpret_cast(pOutputStream) & 0x1F)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < two; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT4) * 2; + + __m256 vTempX = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 vTempY = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 vTempZ = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 vTempW = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm256_mul_ps(vTempX, row0); + vTempY = _mm256_mul_ps(vTempY, row1); + vTempZ = _mm256_fmadd_ps(vTempZ, row2, vTempX); + vTempW = _mm256_fmadd_ps(vTempW, row3, vTempY); + vTempX = _mm256_add_ps(vTempZ, vTempW); + + XM256_STREAM_PS(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += sizeof(XMFLOAT4) * 2; + + i += 2; + } + } + else + { + // Packed input, packed output + for (size_t j = 0; j < two; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT4) * 2; + + __m256 vTempX = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 vTempY = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 vTempZ = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 vTempW = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm256_mul_ps(vTempX, row0); + vTempY = _mm256_mul_ps(vTempY, row1); + vTempZ = _mm256_fmadd_ps(vTempZ, row2, vTempX); + vTempW = _mm256_fmadd_ps(vTempW, row3, vTempY); + vTempX = _mm256_add_ps(vTempZ, vTempW); + + _mm256_storeu_ps(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += sizeof(XMFLOAT4) * 2; + + i += 2; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < two; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT4) * 2; + + __m256 vTempX = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 vTempY = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 vTempZ = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 vTempW = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm256_mul_ps(vTempX, row0); + vTempY = _mm256_mul_ps(vTempY, row1); + vTempZ = _mm256_fmadd_ps(vTempZ, row2, vTempX); + vTempW = _mm256_fmadd_ps(vTempW, row3, vTempY); + vTempX = _mm256_add_ps(vTempZ, vTempW); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_castps256_ps128(vTempX)); + pOutputVector += OutputStride; + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_extractf128_ps(vTempX, 1)); + pOutputVector += OutputStride; + i += 2; + } + } + } + } + + if (i < VectorCount) + { + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + for (; i < VectorCount; i++) + { + __m128 V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + if (!(reinterpret_cast(pInputStream) & 0xF) && !(InputStride & 0xF)) + { + // Aligned input, aligned output + for (size_t i = 0; i < VectorCount; i++) + { + __m128 V = _mm_load_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned input, aligned output + for (size_t i = 0; i < VectorCount; i++) + { + __m128 V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + } + else + { + if (!(reinterpret_cast(pInputStream) & 0xF) && !(InputStride & 0xF)) + { + // Aligned input, unaligned output + for (size_t i = 0; i < VectorCount; i++) + { + __m128 V = _mm_load_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned input, unaligned output + for (size_t i = 0; i < VectorCount; i++) + { + __m128 V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +/**************************************************************************** + * + * XMVECTOR operators + * + ****************************************************************************/ + +#ifndef _XM_NO_XMVECTOR_OVERLOADS_ + + //------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V) noexcept +{ + return V; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator- (FXMVECTOR V) noexcept +{ + return XMVectorNegate(V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& XM_CALLCONV operator+= +( + XMVECTOR& V1, + FXMVECTOR V2 +) noexcept +{ + V1 = XMVectorAdd(V1, V2); + return V1; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& XM_CALLCONV operator-= +( + XMVECTOR& V1, + FXMVECTOR V2 +) noexcept +{ + V1 = XMVectorSubtract(V1, V2); + return V1; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& XM_CALLCONV operator*= +( + XMVECTOR& V1, + FXMVECTOR V2 +) noexcept +{ + V1 = XMVectorMultiply(V1, V2); + return V1; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& XM_CALLCONV operator/= +( + XMVECTOR& V1, + FXMVECTOR V2 +) noexcept +{ + V1 = XMVectorDivide(V1, V2); + return V1; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& operator*= +( + XMVECTOR& V, + const float S +) noexcept +{ + V = XMVectorScale(V, S); + return V; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& operator/= +( + XMVECTOR& V, + const float S +) noexcept +{ + XMVECTOR vS = XMVectorReplicate(S); + V = XMVectorDivide(V, vS); + return V; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator+ +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + return XMVectorAdd(V1, V2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator- +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + return XMVectorSubtract(V1, V2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator* +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + return XMVectorMultiply(V1, V2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator/ +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + return XMVectorDivide(V1, V2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator* +( + FXMVECTOR V, + const float S +) noexcept +{ + return XMVectorScale(V, S); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator/ +( + FXMVECTOR V, + const float S +) noexcept +{ + XMVECTOR vS = XMVectorReplicate(S); + return XMVectorDivide(V, vS); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator* +( + float S, + FXMVECTOR V +) noexcept +{ + return XMVectorScale(V, S); +} + +#endif /* !_XM_NO_XMVECTOR_OVERLOADS_ */ + +#if defined(_XM_NO_INTRINSICS_) +#undef XMISNAN +#undef XMISINF +#endif + +#if defined(_XM_SSE_INTRINSICS_) +#undef XM3UNPACK3INTO4 +#undef XM3PACK4INTO3 +#endif + diff --git a/include/gltf.hpp b/include/gltf.hpp index 472c1af..be106b1 100644 --- a/include/gltf.hpp +++ b/include/gltf.hpp @@ -2,40 +2,40 @@ #define GLTF_HPP_ struct Mesh { - const D3DXVECTOR3 * position; - const DWORD position_size; + const XMFLOAT3 * position; + const int position_size; - const D3DXVECTOR3 * normal; - const DWORD normal_size; + const XMFLOAT3 * normal; + const int normal_size; - const D3DXVECTOR2 * texcoord_0; - const DWORD texcoord_0_size; + const XMFLOAT2 * texcoord_0; + const int texcoord_0_size; - const D3DXVECTOR4 * weights_0; - const DWORD weights_0_size; + const XMFLOAT4 * weights_0; + const int weights_0_size; - const D3DXVECTOR4 * joints_0; - const DWORD joints_0_size; + const XMFLOAT4 * joints_0; + const int joints_0_size; - const DWORD * indices; - const DWORD indices_size; + const int * indices; + const int indices_size; }; struct Skin; struct Node { - const DWORD parent_ix; + const int parent_ix; const Skin * skin; // skin index (global) const Mesh * mesh; // mesh index (global) - const D3DXVECTOR3 translation; - const D3DXQUATERNION rotation; - const D3DXVECTOR3 scale; + const XMFLOAT3 translation; + const XMFLOAT4 rotation; + const XMFLOAT3 scale; }; struct Skin { - const D3DXMATRIX * inverse_bind_matrices; // accessor + const XMMATRIX * inverse_bind_matrices; // accessor const int * joints; - DWORD joints_length; + int joints_length; }; enum AnimationChannelPath { diff --git a/include/gltf_instance.hpp b/include/gltf_instance.hpp index 4ab5d48..dd9201d 100644 --- a/include/gltf_instance.hpp +++ b/include/gltf_instance.hpp @@ -2,9 +2,9 @@ #define GLTF_INSTANCE_HPP_ struct NodeInstance { - D3DXVECTOR3 translation; - D3DXQUATERNION rotation; - D3DXVECTOR3 scale; + XMVECTOR translation; + XMVECTOR rotation; + XMVECTOR scale; }; #endif diff --git a/include/robot_player.hpp b/include/robot_player.hpp index c4ff7b7..9b97c6d 100644 --- a/include/robot_player.hpp +++ b/include/robot_player.hpp @@ -1,3029 +1,3033 @@ +#pragma once +#ifndef _ROBOT_PLAYER_HPP_ +#define _ROBOT_PLAYER_HPP_ +namespace robot_player { const int skin_0__joints__length = 39; -extern const D3DXVECTOR3 accessor_0[]; +extern const XMFLOAT3 accessor_0[]; const int accessor_0__length = 464; -const int accessor_0__size = (sizeof (D3DXVECTOR3)) * 464; +const int accessor_0__size = (sizeof (XMFLOAT3)) * 464; -extern const D3DXVECTOR3 accessor_1[]; +extern const XMFLOAT3 accessor_1[]; const int accessor_1__length = 464; -const int accessor_1__size = (sizeof (D3DXVECTOR3)) * 464; +const int accessor_1__size = (sizeof (XMFLOAT3)) * 464; -extern const D3DXVECTOR2 accessor_2[]; +extern const XMFLOAT2 accessor_2[]; const int accessor_2__length = 464; -const int accessor_2__size = (sizeof (D3DXVECTOR2)) * 464; +const int accessor_2__size = (sizeof (XMFLOAT2)) * 464; -extern const D3DXVECTOR4 accessor_3[]; +extern const XMFLOAT4 accessor_3[]; const int accessor_3__length = 464; -const int accessor_3__size = (sizeof (D3DXVECTOR4)) * 464; +const int accessor_3__size = (sizeof (XMFLOAT4)) * 464; -extern const D3DXVECTOR4 accessor_4[]; +extern const XMFLOAT4 accessor_4[]; const int accessor_4__length = 464; -const int accessor_4__size = (sizeof (D3DXVECTOR4)) * 464; +const int accessor_4__size = (sizeof (XMFLOAT4)) * 464; -extern const D3DXVECTOR4 accessor_5[]; +extern const XMFLOAT4 accessor_5[]; const int accessor_5__length = 464; -const int accessor_5__size = (sizeof (D3DXVECTOR4)) * 464; +const int accessor_5__size = (sizeof (XMFLOAT4)) * 464; -extern const D3DXVECTOR4 accessor_6[]; +extern const XMFLOAT4 accessor_6[]; const int accessor_6__length = 464; -const int accessor_6__size = (sizeof (D3DXVECTOR4)) * 464; +const int accessor_6__size = (sizeof (XMFLOAT4)) * 464; -extern const D3DXVECTOR4 accessor_7[]; +extern const XMFLOAT4 accessor_7[]; const int accessor_7__length = 464; -const int accessor_7__size = (sizeof (D3DXVECTOR4)) * 464; +const int accessor_7__size = (sizeof (XMFLOAT4)) * 464; -extern const DWORD accessor_8[]; +extern const int accessor_8[]; const int accessor_8__length = 798; -const int accessor_8__size = (sizeof (DWORD)) * 798; +const int accessor_8__size = (sizeof (int)) * 798; -extern const D3DXMATRIX accessor_9[]; +extern const XMMATRIX accessor_9[]; const int accessor_9__length = 39; -const int accessor_9__size = (sizeof (D3DXMATRIX)) * 39; +const int accessor_9__size = (sizeof (XMMATRIX)) * 39; extern const float accessor_10[]; const int accessor_10__length = 101; const int accessor_10__size = (sizeof (float)) * 101; -extern const D3DXVECTOR3 accessor_11[]; +extern const XMFLOAT3 accessor_11[]; const int accessor_11__length = 101; -const int accessor_11__size = (sizeof (D3DXVECTOR3)) * 101; +const int accessor_11__size = (sizeof (XMFLOAT3)) * 101; -extern const D3DXVECTOR4 accessor_12[]; +extern const XMFLOAT4 accessor_12[]; const int accessor_12__length = 101; -const int accessor_12__size = (sizeof (D3DXVECTOR4)) * 101; +const int accessor_12__size = (sizeof (XMFLOAT4)) * 101; -extern const D3DXVECTOR3 accessor_13[]; +extern const XMFLOAT3 accessor_13[]; const int accessor_13__length = 101; -const int accessor_13__size = (sizeof (D3DXVECTOR3)) * 101; +const int accessor_13__size = (sizeof (XMFLOAT3)) * 101; extern const float accessor_14[]; const int accessor_14__length = 2; const int accessor_14__size = (sizeof (float)) * 2; -extern const D3DXVECTOR3 accessor_15[]; +extern const XMFLOAT3 accessor_15[]; const int accessor_15__length = 2; -const int accessor_15__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_15__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_16[]; +extern const XMFLOAT4 accessor_16[]; const int accessor_16__length = 2; -const int accessor_16__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_16__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_17[]; +extern const XMFLOAT3 accessor_17[]; const int accessor_17__length = 2; -const int accessor_17__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_17__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_18[]; +extern const XMFLOAT3 accessor_18[]; const int accessor_18__length = 2; -const int accessor_18__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_18__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_19[]; +extern const XMFLOAT4 accessor_19[]; const int accessor_19__length = 2; -const int accessor_19__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_19__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_20[]; +extern const XMFLOAT3 accessor_20[]; const int accessor_20__length = 2; -const int accessor_20__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_20__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_21[]; +extern const XMFLOAT3 accessor_21[]; const int accessor_21__length = 101; -const int accessor_21__size = (sizeof (D3DXVECTOR3)) * 101; +const int accessor_21__size = (sizeof (XMFLOAT3)) * 101; -extern const D3DXVECTOR4 accessor_22[]; +extern const XMFLOAT4 accessor_22[]; const int accessor_22__length = 101; -const int accessor_22__size = (sizeof (D3DXVECTOR4)) * 101; +const int accessor_22__size = (sizeof (XMFLOAT4)) * 101; -extern const D3DXVECTOR3 accessor_23[]; +extern const XMFLOAT3 accessor_23[]; const int accessor_23__length = 2; -const int accessor_23__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_23__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_24[]; +extern const XMFLOAT3 accessor_24[]; const int accessor_24__length = 101; -const int accessor_24__size = (sizeof (D3DXVECTOR3)) * 101; +const int accessor_24__size = (sizeof (XMFLOAT3)) * 101; -extern const D3DXVECTOR4 accessor_25[]; +extern const XMFLOAT4 accessor_25[]; const int accessor_25__length = 101; -const int accessor_25__size = (sizeof (D3DXVECTOR4)) * 101; +const int accessor_25__size = (sizeof (XMFLOAT4)) * 101; -extern const D3DXVECTOR3 accessor_26[]; +extern const XMFLOAT3 accessor_26[]; const int accessor_26__length = 2; -const int accessor_26__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_26__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_27[]; +extern const XMFLOAT3 accessor_27[]; const int accessor_27__length = 2; -const int accessor_27__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_27__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_28[]; +extern const XMFLOAT4 accessor_28[]; const int accessor_28__length = 2; -const int accessor_28__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_28__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_29[]; +extern const XMFLOAT3 accessor_29[]; const int accessor_29__length = 2; -const int accessor_29__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_29__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_30[]; +extern const XMFLOAT3 accessor_30[]; const int accessor_30__length = 2; -const int accessor_30__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_30__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_31[]; +extern const XMFLOAT4 accessor_31[]; const int accessor_31__length = 2; -const int accessor_31__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_31__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_32[]; +extern const XMFLOAT3 accessor_32[]; const int accessor_32__length = 2; -const int accessor_32__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_32__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_33[]; +extern const XMFLOAT3 accessor_33[]; const int accessor_33__length = 2; -const int accessor_33__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_33__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_34[]; +extern const XMFLOAT4 accessor_34[]; const int accessor_34__length = 2; -const int accessor_34__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_34__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_35[]; +extern const XMFLOAT3 accessor_35[]; const int accessor_35__length = 2; -const int accessor_35__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_35__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_36[]; +extern const XMFLOAT3 accessor_36[]; const int accessor_36__length = 101; -const int accessor_36__size = (sizeof (D3DXVECTOR3)) * 101; +const int accessor_36__size = (sizeof (XMFLOAT3)) * 101; -extern const D3DXVECTOR4 accessor_37[]; +extern const XMFLOAT4 accessor_37[]; const int accessor_37__length = 101; -const int accessor_37__size = (sizeof (D3DXVECTOR4)) * 101; +const int accessor_37__size = (sizeof (XMFLOAT4)) * 101; -extern const D3DXVECTOR3 accessor_38[]; +extern const XMFLOAT3 accessor_38[]; const int accessor_38__length = 2; -const int accessor_38__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_38__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_39[]; +extern const XMFLOAT3 accessor_39[]; const int accessor_39__length = 2; -const int accessor_39__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_39__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_40[]; +extern const XMFLOAT4 accessor_40[]; const int accessor_40__length = 2; -const int accessor_40__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_40__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_41[]; +extern const XMFLOAT3 accessor_41[]; const int accessor_41__length = 2; -const int accessor_41__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_41__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_42[]; +extern const XMFLOAT3 accessor_42[]; const int accessor_42__length = 2; -const int accessor_42__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_42__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_43[]; +extern const XMFLOAT4 accessor_43[]; const int accessor_43__length = 2; -const int accessor_43__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_43__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_44[]; +extern const XMFLOAT3 accessor_44[]; const int accessor_44__length = 2; -const int accessor_44__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_44__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_45[]; +extern const XMFLOAT3 accessor_45[]; const int accessor_45__length = 2; -const int accessor_45__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_45__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_46[]; +extern const XMFLOAT4 accessor_46[]; const int accessor_46__length = 2; -const int accessor_46__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_46__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_47[]; +extern const XMFLOAT3 accessor_47[]; const int accessor_47__length = 2; -const int accessor_47__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_47__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_48[]; +extern const XMFLOAT3 accessor_48[]; const int accessor_48__length = 2; -const int accessor_48__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_48__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_49[]; +extern const XMFLOAT4 accessor_49[]; const int accessor_49__length = 2; -const int accessor_49__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_49__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_50[]; +extern const XMFLOAT3 accessor_50[]; const int accessor_50__length = 2; -const int accessor_50__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_50__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_51[]; +extern const XMFLOAT3 accessor_51[]; const int accessor_51__length = 2; -const int accessor_51__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_51__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_52[]; +extern const XMFLOAT4 accessor_52[]; const int accessor_52__length = 2; -const int accessor_52__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_52__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_53[]; +extern const XMFLOAT3 accessor_53[]; const int accessor_53__length = 2; -const int accessor_53__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_53__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_54[]; +extern const XMFLOAT3 accessor_54[]; const int accessor_54__length = 2; -const int accessor_54__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_54__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_55[]; +extern const XMFLOAT4 accessor_55[]; const int accessor_55__length = 2; -const int accessor_55__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_55__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_56[]; +extern const XMFLOAT3 accessor_56[]; const int accessor_56__length = 2; -const int accessor_56__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_56__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_57[]; +extern const XMFLOAT3 accessor_57[]; const int accessor_57__length = 2; -const int accessor_57__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_57__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_58[]; +extern const XMFLOAT4 accessor_58[]; const int accessor_58__length = 2; -const int accessor_58__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_58__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_59[]; +extern const XMFLOAT3 accessor_59[]; const int accessor_59__length = 2; -const int accessor_59__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_59__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_60[]; +extern const XMFLOAT3 accessor_60[]; const int accessor_60__length = 2; -const int accessor_60__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_60__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_61[]; +extern const XMFLOAT4 accessor_61[]; const int accessor_61__length = 2; -const int accessor_61__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_61__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_62[]; +extern const XMFLOAT3 accessor_62[]; const int accessor_62__length = 2; -const int accessor_62__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_62__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_63[]; +extern const XMFLOAT3 accessor_63[]; const int accessor_63__length = 2; -const int accessor_63__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_63__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_64[]; +extern const XMFLOAT4 accessor_64[]; const int accessor_64__length = 2; -const int accessor_64__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_64__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_65[]; +extern const XMFLOAT3 accessor_65[]; const int accessor_65__length = 2; -const int accessor_65__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_65__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_66[]; +extern const XMFLOAT3 accessor_66[]; const int accessor_66__length = 2; -const int accessor_66__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_66__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_67[]; +extern const XMFLOAT4 accessor_67[]; const int accessor_67__length = 2; -const int accessor_67__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_67__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_68[]; +extern const XMFLOAT3 accessor_68[]; const int accessor_68__length = 2; -const int accessor_68__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_68__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_69[]; +extern const XMFLOAT3 accessor_69[]; const int accessor_69__length = 2; -const int accessor_69__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_69__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_70[]; +extern const XMFLOAT4 accessor_70[]; const int accessor_70__length = 2; -const int accessor_70__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_70__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_71[]; +extern const XMFLOAT3 accessor_71[]; const int accessor_71__length = 2; -const int accessor_71__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_71__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_72[]; +extern const XMFLOAT3 accessor_72[]; const int accessor_72__length = 2; -const int accessor_72__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_72__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_73[]; +extern const XMFLOAT4 accessor_73[]; const int accessor_73__length = 2; -const int accessor_73__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_73__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_74[]; +extern const XMFLOAT3 accessor_74[]; const int accessor_74__length = 2; -const int accessor_74__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_74__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_75[]; +extern const XMFLOAT3 accessor_75[]; const int accessor_75__length = 2; -const int accessor_75__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_75__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_76[]; +extern const XMFLOAT4 accessor_76[]; const int accessor_76__length = 2; -const int accessor_76__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_76__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_77[]; +extern const XMFLOAT3 accessor_77[]; const int accessor_77__length = 2; -const int accessor_77__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_77__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_78[]; +extern const XMFLOAT3 accessor_78[]; const int accessor_78__length = 2; -const int accessor_78__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_78__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_79[]; +extern const XMFLOAT4 accessor_79[]; const int accessor_79__length = 2; -const int accessor_79__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_79__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_80[]; +extern const XMFLOAT3 accessor_80[]; const int accessor_80__length = 2; -const int accessor_80__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_80__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_81[]; +extern const XMFLOAT3 accessor_81[]; const int accessor_81__length = 2; -const int accessor_81__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_81__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_82[]; +extern const XMFLOAT4 accessor_82[]; const int accessor_82__length = 2; -const int accessor_82__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_82__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_83[]; +extern const XMFLOAT3 accessor_83[]; const int accessor_83__length = 2; -const int accessor_83__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_83__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_84[]; +extern const XMFLOAT3 accessor_84[]; const int accessor_84__length = 2; -const int accessor_84__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_84__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_85[]; +extern const XMFLOAT4 accessor_85[]; const int accessor_85__length = 2; -const int accessor_85__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_85__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_86[]; +extern const XMFLOAT3 accessor_86[]; const int accessor_86__length = 2; -const int accessor_86__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_86__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_87[]; +extern const XMFLOAT3 accessor_87[]; const int accessor_87__length = 2; -const int accessor_87__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_87__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_88[]; +extern const XMFLOAT4 accessor_88[]; const int accessor_88__length = 2; -const int accessor_88__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_88__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_89[]; +extern const XMFLOAT3 accessor_89[]; const int accessor_89__length = 2; -const int accessor_89__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_89__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_90[]; +extern const XMFLOAT3 accessor_90[]; const int accessor_90__length = 2; -const int accessor_90__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_90__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_91[]; +extern const XMFLOAT4 accessor_91[]; const int accessor_91__length = 2; -const int accessor_91__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_91__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_92[]; +extern const XMFLOAT3 accessor_92[]; const int accessor_92__length = 2; -const int accessor_92__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_92__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_93[]; +extern const XMFLOAT3 accessor_93[]; const int accessor_93__length = 2; -const int accessor_93__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_93__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_94[]; +extern const XMFLOAT4 accessor_94[]; const int accessor_94__length = 2; -const int accessor_94__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_94__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_95[]; +extern const XMFLOAT3 accessor_95[]; const int accessor_95__length = 2; -const int accessor_95__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_95__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_96[]; +extern const XMFLOAT3 accessor_96[]; const int accessor_96__length = 2; -const int accessor_96__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_96__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_97[]; +extern const XMFLOAT4 accessor_97[]; const int accessor_97__length = 2; -const int accessor_97__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_97__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_98[]; +extern const XMFLOAT3 accessor_98[]; const int accessor_98__length = 2; -const int accessor_98__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_98__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_99[]; +extern const XMFLOAT3 accessor_99[]; const int accessor_99__length = 101; -const int accessor_99__size = (sizeof (D3DXVECTOR3)) * 101; +const int accessor_99__size = (sizeof (XMFLOAT3)) * 101; -extern const D3DXVECTOR4 accessor_100[]; +extern const XMFLOAT4 accessor_100[]; const int accessor_100__length = 2; -const int accessor_100__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_100__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_101[]; +extern const XMFLOAT3 accessor_101[]; const int accessor_101__length = 2; -const int accessor_101__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_101__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_102[]; +extern const XMFLOAT3 accessor_102[]; const int accessor_102__length = 2; -const int accessor_102__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_102__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_103[]; +extern const XMFLOAT4 accessor_103[]; const int accessor_103__length = 2; -const int accessor_103__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_103__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_104[]; +extern const XMFLOAT3 accessor_104[]; const int accessor_104__length = 2; -const int accessor_104__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_104__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_105[]; +extern const XMFLOAT3 accessor_105[]; const int accessor_105__length = 2; -const int accessor_105__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_105__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_106[]; +extern const XMFLOAT4 accessor_106[]; const int accessor_106__length = 2; -const int accessor_106__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_106__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_107[]; +extern const XMFLOAT3 accessor_107[]; const int accessor_107__length = 2; -const int accessor_107__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_107__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_108[]; +extern const XMFLOAT3 accessor_108[]; const int accessor_108__length = 2; -const int accessor_108__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_108__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_109[]; +extern const XMFLOAT4 accessor_109[]; const int accessor_109__length = 2; -const int accessor_109__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_109__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_110[]; +extern const XMFLOAT3 accessor_110[]; const int accessor_110__length = 2; -const int accessor_110__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_110__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_111[]; +extern const XMFLOAT3 accessor_111[]; const int accessor_111__length = 2; -const int accessor_111__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_111__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_112[]; +extern const XMFLOAT4 accessor_112[]; const int accessor_112__length = 2; -const int accessor_112__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_112__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_113[]; +extern const XMFLOAT3 accessor_113[]; const int accessor_113__length = 2; -const int accessor_113__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_113__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_114[]; +extern const XMFLOAT3 accessor_114[]; const int accessor_114__length = 2; -const int accessor_114__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_114__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_115[]; +extern const XMFLOAT4 accessor_115[]; const int accessor_115__length = 2; -const int accessor_115__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_115__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_116[]; +extern const XMFLOAT3 accessor_116[]; const int accessor_116__length = 2; -const int accessor_116__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_116__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_117[]; +extern const XMFLOAT3 accessor_117[]; const int accessor_117__length = 101; -const int accessor_117__size = (sizeof (D3DXVECTOR3)) * 101; +const int accessor_117__size = (sizeof (XMFLOAT3)) * 101; -extern const D3DXVECTOR4 accessor_118[]; +extern const XMFLOAT4 accessor_118[]; const int accessor_118__length = 2; -const int accessor_118__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_118__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_119[]; +extern const XMFLOAT3 accessor_119[]; const int accessor_119__length = 2; -const int accessor_119__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_119__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_120[]; +extern const XMFLOAT3 accessor_120[]; const int accessor_120__length = 2; -const int accessor_120__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_120__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_121[]; +extern const XMFLOAT4 accessor_121[]; const int accessor_121__length = 2; -const int accessor_121__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_121__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_122[]; +extern const XMFLOAT3 accessor_122[]; const int accessor_122__length = 2; -const int accessor_122__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_122__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_123[]; +extern const XMFLOAT3 accessor_123[]; const int accessor_123__length = 2; -const int accessor_123__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_123__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_124[]; +extern const XMFLOAT4 accessor_124[]; const int accessor_124__length = 2; -const int accessor_124__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_124__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_125[]; +extern const XMFLOAT3 accessor_125[]; const int accessor_125__length = 2; -const int accessor_125__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_125__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_126[]; +extern const XMFLOAT3 accessor_126[]; const int accessor_126__length = 2; -const int accessor_126__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_126__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_127[]; +extern const XMFLOAT4 accessor_127[]; const int accessor_127__length = 2; -const int accessor_127__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_127__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_128[]; +extern const XMFLOAT3 accessor_128[]; const int accessor_128__length = 2; -const int accessor_128__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_128__size = (sizeof (XMFLOAT3)) * 2; extern const float accessor_129[]; const int accessor_129__length = 91; const int accessor_129__size = (sizeof (float)) * 91; -extern const D3DXVECTOR3 accessor_130[]; +extern const XMFLOAT3 accessor_130[]; const int accessor_130__length = 91; -const int accessor_130__size = (sizeof (D3DXVECTOR3)) * 91; +const int accessor_130__size = (sizeof (XMFLOAT3)) * 91; -extern const D3DXVECTOR4 accessor_131[]; +extern const XMFLOAT4 accessor_131[]; const int accessor_131__length = 91; -const int accessor_131__size = (sizeof (D3DXVECTOR4)) * 91; +const int accessor_131__size = (sizeof (XMFLOAT4)) * 91; extern const float accessor_132[]; const int accessor_132__length = 2; const int accessor_132__size = (sizeof (float)) * 2; -extern const D3DXVECTOR3 accessor_133[]; +extern const XMFLOAT3 accessor_133[]; const int accessor_133__length = 2; -const int accessor_133__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_133__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_134[]; +extern const XMFLOAT3 accessor_134[]; const int accessor_134__length = 2; -const int accessor_134__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_134__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_135[]; +extern const XMFLOAT4 accessor_135[]; const int accessor_135__length = 2; -const int accessor_135__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_135__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_136[]; +extern const XMFLOAT3 accessor_136[]; const int accessor_136__length = 2; -const int accessor_136__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_136__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_137[]; +extern const XMFLOAT3 accessor_137[]; const int accessor_137__length = 2; -const int accessor_137__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_137__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_138[]; +extern const XMFLOAT4 accessor_138[]; const int accessor_138__length = 2; -const int accessor_138__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_138__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_139[]; +extern const XMFLOAT3 accessor_139[]; const int accessor_139__length = 2; -const int accessor_139__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_139__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_140[]; +extern const XMFLOAT3 accessor_140[]; const int accessor_140__length = 91; -const int accessor_140__size = (sizeof (D3DXVECTOR3)) * 91; +const int accessor_140__size = (sizeof (XMFLOAT3)) * 91; -extern const D3DXVECTOR4 accessor_141[]; +extern const XMFLOAT4 accessor_141[]; const int accessor_141__length = 91; -const int accessor_141__size = (sizeof (D3DXVECTOR4)) * 91; +const int accessor_141__size = (sizeof (XMFLOAT4)) * 91; -extern const D3DXVECTOR3 accessor_142[]; +extern const XMFLOAT3 accessor_142[]; const int accessor_142__length = 2; -const int accessor_142__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_142__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_143[]; +extern const XMFLOAT3 accessor_143[]; const int accessor_143__length = 91; -const int accessor_143__size = (sizeof (D3DXVECTOR3)) * 91; +const int accessor_143__size = (sizeof (XMFLOAT3)) * 91; -extern const D3DXVECTOR4 accessor_144[]; +extern const XMFLOAT4 accessor_144[]; const int accessor_144__length = 91; -const int accessor_144__size = (sizeof (D3DXVECTOR4)) * 91; +const int accessor_144__size = (sizeof (XMFLOAT4)) * 91; -extern const D3DXVECTOR3 accessor_145[]; +extern const XMFLOAT3 accessor_145[]; const int accessor_145__length = 91; -const int accessor_145__size = (sizeof (D3DXVECTOR3)) * 91; +const int accessor_145__size = (sizeof (XMFLOAT3)) * 91; -extern const D3DXVECTOR3 accessor_146[]; +extern const XMFLOAT3 accessor_146[]; const int accessor_146__length = 2; -const int accessor_146__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_146__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_147[]; +extern const XMFLOAT4 accessor_147[]; const int accessor_147__length = 2; -const int accessor_147__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_147__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_148[]; +extern const XMFLOAT3 accessor_148[]; const int accessor_148__length = 2; -const int accessor_148__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_148__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_149[]; +extern const XMFLOAT3 accessor_149[]; const int accessor_149__length = 2; -const int accessor_149__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_149__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_150[]; +extern const XMFLOAT4 accessor_150[]; const int accessor_150__length = 2; -const int accessor_150__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_150__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_151[]; +extern const XMFLOAT3 accessor_151[]; const int accessor_151__length = 2; -const int accessor_151__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_151__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_152[]; +extern const XMFLOAT3 accessor_152[]; const int accessor_152__length = 2; -const int accessor_152__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_152__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_153[]; +extern const XMFLOAT4 accessor_153[]; const int accessor_153__length = 2; -const int accessor_153__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_153__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_154[]; +extern const XMFLOAT3 accessor_154[]; const int accessor_154__length = 2; -const int accessor_154__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_154__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_155[]; +extern const XMFLOAT3 accessor_155[]; const int accessor_155__length = 91; -const int accessor_155__size = (sizeof (D3DXVECTOR3)) * 91; +const int accessor_155__size = (sizeof (XMFLOAT3)) * 91; -extern const D3DXVECTOR4 accessor_156[]; +extern const XMFLOAT4 accessor_156[]; const int accessor_156__length = 91; -const int accessor_156__size = (sizeof (D3DXVECTOR4)) * 91; +const int accessor_156__size = (sizeof (XMFLOAT4)) * 91; -extern const D3DXVECTOR3 accessor_157[]; +extern const XMFLOAT3 accessor_157[]; const int accessor_157__length = 2; -const int accessor_157__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_157__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_158[]; +extern const XMFLOAT3 accessor_158[]; const int accessor_158__length = 2; -const int accessor_158__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_158__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_159[]; +extern const XMFLOAT4 accessor_159[]; const int accessor_159__length = 2; -const int accessor_159__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_159__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_160[]; +extern const XMFLOAT3 accessor_160[]; const int accessor_160__length = 2; -const int accessor_160__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_160__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_161[]; +extern const XMFLOAT3 accessor_161[]; const int accessor_161__length = 2; -const int accessor_161__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_161__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_162[]; +extern const XMFLOAT4 accessor_162[]; const int accessor_162__length = 2; -const int accessor_162__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_162__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_163[]; +extern const XMFLOAT3 accessor_163[]; const int accessor_163__length = 2; -const int accessor_163__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_163__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_164[]; +extern const XMFLOAT3 accessor_164[]; const int accessor_164__length = 2; -const int accessor_164__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_164__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_165[]; +extern const XMFLOAT4 accessor_165[]; const int accessor_165__length = 2; -const int accessor_165__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_165__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_166[]; +extern const XMFLOAT3 accessor_166[]; const int accessor_166__length = 2; -const int accessor_166__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_166__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_167[]; +extern const XMFLOAT3 accessor_167[]; const int accessor_167__length = 2; -const int accessor_167__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_167__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_168[]; +extern const XMFLOAT4 accessor_168[]; const int accessor_168__length = 2; -const int accessor_168__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_168__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_169[]; +extern const XMFLOAT3 accessor_169[]; const int accessor_169__length = 2; -const int accessor_169__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_169__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_170[]; +extern const XMFLOAT3 accessor_170[]; const int accessor_170__length = 2; -const int accessor_170__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_170__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_171[]; +extern const XMFLOAT4 accessor_171[]; const int accessor_171__length = 2; -const int accessor_171__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_171__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_172[]; +extern const XMFLOAT3 accessor_172[]; const int accessor_172__length = 2; -const int accessor_172__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_172__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_173[]; +extern const XMFLOAT3 accessor_173[]; const int accessor_173__length = 2; -const int accessor_173__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_173__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_174[]; +extern const XMFLOAT4 accessor_174[]; const int accessor_174__length = 2; -const int accessor_174__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_174__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_175[]; +extern const XMFLOAT3 accessor_175[]; const int accessor_175__length = 2; -const int accessor_175__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_175__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_176[]; +extern const XMFLOAT3 accessor_176[]; const int accessor_176__length = 2; -const int accessor_176__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_176__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_177[]; +extern const XMFLOAT4 accessor_177[]; const int accessor_177__length = 2; -const int accessor_177__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_177__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_178[]; +extern const XMFLOAT3 accessor_178[]; const int accessor_178__length = 2; -const int accessor_178__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_178__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_179[]; +extern const XMFLOAT3 accessor_179[]; const int accessor_179__length = 2; -const int accessor_179__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_179__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_180[]; +extern const XMFLOAT4 accessor_180[]; const int accessor_180__length = 2; -const int accessor_180__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_180__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_181[]; +extern const XMFLOAT3 accessor_181[]; const int accessor_181__length = 2; -const int accessor_181__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_181__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_182[]; +extern const XMFLOAT3 accessor_182[]; const int accessor_182__length = 2; -const int accessor_182__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_182__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_183[]; +extern const XMFLOAT4 accessor_183[]; const int accessor_183__length = 2; -const int accessor_183__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_183__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_184[]; +extern const XMFLOAT3 accessor_184[]; const int accessor_184__length = 2; -const int accessor_184__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_184__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_185[]; +extern const XMFLOAT3 accessor_185[]; const int accessor_185__length = 2; -const int accessor_185__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_185__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_186[]; +extern const XMFLOAT4 accessor_186[]; const int accessor_186__length = 2; -const int accessor_186__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_186__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_187[]; +extern const XMFLOAT3 accessor_187[]; const int accessor_187__length = 2; -const int accessor_187__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_187__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_188[]; +extern const XMFLOAT3 accessor_188[]; const int accessor_188__length = 2; -const int accessor_188__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_188__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_189[]; +extern const XMFLOAT4 accessor_189[]; const int accessor_189__length = 2; -const int accessor_189__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_189__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_190[]; +extern const XMFLOAT3 accessor_190[]; const int accessor_190__length = 2; -const int accessor_190__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_190__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_191[]; +extern const XMFLOAT3 accessor_191[]; const int accessor_191__length = 2; -const int accessor_191__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_191__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_192[]; +extern const XMFLOAT4 accessor_192[]; const int accessor_192__length = 2; -const int accessor_192__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_192__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_193[]; +extern const XMFLOAT3 accessor_193[]; const int accessor_193__length = 2; -const int accessor_193__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_193__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_194[]; +extern const XMFLOAT3 accessor_194[]; const int accessor_194__length = 2; -const int accessor_194__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_194__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_195[]; +extern const XMFLOAT4 accessor_195[]; const int accessor_195__length = 2; -const int accessor_195__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_195__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_196[]; +extern const XMFLOAT3 accessor_196[]; const int accessor_196__length = 2; -const int accessor_196__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_196__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_197[]; +extern const XMFLOAT3 accessor_197[]; const int accessor_197__length = 2; -const int accessor_197__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_197__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_198[]; +extern const XMFLOAT4 accessor_198[]; const int accessor_198__length = 2; -const int accessor_198__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_198__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_199[]; +extern const XMFLOAT3 accessor_199[]; const int accessor_199__length = 2; -const int accessor_199__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_199__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_200[]; +extern const XMFLOAT3 accessor_200[]; const int accessor_200__length = 2; -const int accessor_200__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_200__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_201[]; +extern const XMFLOAT4 accessor_201[]; const int accessor_201__length = 2; -const int accessor_201__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_201__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_202[]; +extern const XMFLOAT3 accessor_202[]; const int accessor_202__length = 2; -const int accessor_202__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_202__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_203[]; +extern const XMFLOAT3 accessor_203[]; const int accessor_203__length = 2; -const int accessor_203__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_203__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_204[]; +extern const XMFLOAT4 accessor_204[]; const int accessor_204__length = 2; -const int accessor_204__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_204__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_205[]; +extern const XMFLOAT3 accessor_205[]; const int accessor_205__length = 2; -const int accessor_205__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_205__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_206[]; +extern const XMFLOAT3 accessor_206[]; const int accessor_206__length = 2; -const int accessor_206__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_206__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_207[]; +extern const XMFLOAT4 accessor_207[]; const int accessor_207__length = 2; -const int accessor_207__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_207__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_208[]; +extern const XMFLOAT3 accessor_208[]; const int accessor_208__length = 2; -const int accessor_208__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_208__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_209[]; +extern const XMFLOAT3 accessor_209[]; const int accessor_209__length = 2; -const int accessor_209__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_209__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_210[]; +extern const XMFLOAT4 accessor_210[]; const int accessor_210__length = 2; -const int accessor_210__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_210__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_211[]; +extern const XMFLOAT3 accessor_211[]; const int accessor_211__length = 2; -const int accessor_211__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_211__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_212[]; +extern const XMFLOAT3 accessor_212[]; const int accessor_212__length = 2; -const int accessor_212__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_212__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_213[]; +extern const XMFLOAT4 accessor_213[]; const int accessor_213__length = 2; -const int accessor_213__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_213__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_214[]; +extern const XMFLOAT3 accessor_214[]; const int accessor_214__length = 2; -const int accessor_214__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_214__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_215[]; +extern const XMFLOAT3 accessor_215[]; const int accessor_215__length = 2; -const int accessor_215__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_215__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_216[]; +extern const XMFLOAT4 accessor_216[]; const int accessor_216__length = 2; -const int accessor_216__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_216__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_217[]; +extern const XMFLOAT3 accessor_217[]; const int accessor_217__length = 2; -const int accessor_217__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_217__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_218[]; +extern const XMFLOAT3 accessor_218[]; const int accessor_218__length = 91; -const int accessor_218__size = (sizeof (D3DXVECTOR3)) * 91; +const int accessor_218__size = (sizeof (XMFLOAT3)) * 91; -extern const D3DXVECTOR4 accessor_219[]; +extern const XMFLOAT4 accessor_219[]; const int accessor_219__length = 2; -const int accessor_219__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_219__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_220[]; +extern const XMFLOAT3 accessor_220[]; const int accessor_220__length = 2; -const int accessor_220__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_220__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_221[]; +extern const XMFLOAT3 accessor_221[]; const int accessor_221__length = 2; -const int accessor_221__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_221__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_222[]; +extern const XMFLOAT4 accessor_222[]; const int accessor_222__length = 2; -const int accessor_222__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_222__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_223[]; +extern const XMFLOAT3 accessor_223[]; const int accessor_223__length = 2; -const int accessor_223__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_223__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_224[]; +extern const XMFLOAT3 accessor_224[]; const int accessor_224__length = 2; -const int accessor_224__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_224__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_225[]; +extern const XMFLOAT4 accessor_225[]; const int accessor_225__length = 2; -const int accessor_225__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_225__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_226[]; +extern const XMFLOAT3 accessor_226[]; const int accessor_226__length = 2; -const int accessor_226__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_226__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_227[]; +extern const XMFLOAT3 accessor_227[]; const int accessor_227__length = 2; -const int accessor_227__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_227__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_228[]; +extern const XMFLOAT4 accessor_228[]; const int accessor_228__length = 2; -const int accessor_228__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_228__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_229[]; +extern const XMFLOAT3 accessor_229[]; const int accessor_229__length = 2; -const int accessor_229__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_229__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_230[]; +extern const XMFLOAT3 accessor_230[]; const int accessor_230__length = 2; -const int accessor_230__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_230__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_231[]; +extern const XMFLOAT4 accessor_231[]; const int accessor_231__length = 2; -const int accessor_231__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_231__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_232[]; +extern const XMFLOAT3 accessor_232[]; const int accessor_232__length = 2; -const int accessor_232__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_232__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_233[]; +extern const XMFLOAT3 accessor_233[]; const int accessor_233__length = 2; -const int accessor_233__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_233__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_234[]; +extern const XMFLOAT4 accessor_234[]; const int accessor_234__length = 2; -const int accessor_234__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_234__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_235[]; +extern const XMFLOAT3 accessor_235[]; const int accessor_235__length = 2; -const int accessor_235__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_235__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_236[]; +extern const XMFLOAT3 accessor_236[]; const int accessor_236__length = 91; -const int accessor_236__size = (sizeof (D3DXVECTOR3)) * 91; +const int accessor_236__size = (sizeof (XMFLOAT3)) * 91; -extern const D3DXVECTOR4 accessor_237[]; +extern const XMFLOAT4 accessor_237[]; const int accessor_237__length = 2; -const int accessor_237__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_237__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_238[]; +extern const XMFLOAT3 accessor_238[]; const int accessor_238__length = 2; -const int accessor_238__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_238__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_239[]; +extern const XMFLOAT3 accessor_239[]; const int accessor_239__length = 2; -const int accessor_239__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_239__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_240[]; +extern const XMFLOAT4 accessor_240[]; const int accessor_240__length = 2; -const int accessor_240__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_240__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_241[]; +extern const XMFLOAT3 accessor_241[]; const int accessor_241__length = 2; -const int accessor_241__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_241__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_242[]; +extern const XMFLOAT3 accessor_242[]; const int accessor_242__length = 2; -const int accessor_242__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_242__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_243[]; +extern const XMFLOAT4 accessor_243[]; const int accessor_243__length = 2; -const int accessor_243__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_243__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_244[]; +extern const XMFLOAT3 accessor_244[]; const int accessor_244__length = 2; -const int accessor_244__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_244__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_245[]; +extern const XMFLOAT3 accessor_245[]; const int accessor_245__length = 2; -const int accessor_245__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_245__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_246[]; +extern const XMFLOAT4 accessor_246[]; const int accessor_246__length = 2; -const int accessor_246__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_246__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_247[]; +extern const XMFLOAT3 accessor_247[]; const int accessor_247__length = 2; -const int accessor_247__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_247__size = (sizeof (XMFLOAT3)) * 2; extern const float accessor_248[]; const int accessor_248__length = 63; const int accessor_248__size = (sizeof (float)) * 63; -extern const D3DXVECTOR3 accessor_249[]; +extern const XMFLOAT3 accessor_249[]; const int accessor_249__length = 63; -const int accessor_249__size = (sizeof (D3DXVECTOR3)) * 63; +const int accessor_249__size = (sizeof (XMFLOAT3)) * 63; -extern const D3DXVECTOR4 accessor_250[]; +extern const XMFLOAT4 accessor_250[]; const int accessor_250__length = 63; -const int accessor_250__size = (sizeof (D3DXVECTOR4)) * 63; +const int accessor_250__size = (sizeof (XMFLOAT4)) * 63; extern const float accessor_251[]; const int accessor_251__length = 2; const int accessor_251__size = (sizeof (float)) * 2; -extern const D3DXVECTOR3 accessor_252[]; +extern const XMFLOAT3 accessor_252[]; const int accessor_252__length = 2; -const int accessor_252__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_252__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_253[]; +extern const XMFLOAT3 accessor_253[]; const int accessor_253__length = 2; -const int accessor_253__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_253__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_254[]; +extern const XMFLOAT4 accessor_254[]; const int accessor_254__length = 2; -const int accessor_254__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_254__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_255[]; +extern const XMFLOAT3 accessor_255[]; const int accessor_255__length = 2; -const int accessor_255__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_255__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_256[]; +extern const XMFLOAT3 accessor_256[]; const int accessor_256__length = 2; -const int accessor_256__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_256__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_257[]; +extern const XMFLOAT4 accessor_257[]; const int accessor_257__length = 2; -const int accessor_257__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_257__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_258[]; +extern const XMFLOAT3 accessor_258[]; const int accessor_258__length = 2; -const int accessor_258__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_258__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_259[]; +extern const XMFLOAT3 accessor_259[]; const int accessor_259__length = 2; -const int accessor_259__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_259__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_260[]; +extern const XMFLOAT4 accessor_260[]; const int accessor_260__length = 63; -const int accessor_260__size = (sizeof (D3DXVECTOR4)) * 63; +const int accessor_260__size = (sizeof (XMFLOAT4)) * 63; -extern const D3DXVECTOR3 accessor_261[]; +extern const XMFLOAT3 accessor_261[]; const int accessor_261__length = 2; -const int accessor_261__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_261__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_262[]; +extern const XMFLOAT3 accessor_262[]; const int accessor_262__length = 2; -const int accessor_262__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_262__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_263[]; +extern const XMFLOAT4 accessor_263[]; const int accessor_263__length = 63; -const int accessor_263__size = (sizeof (D3DXVECTOR4)) * 63; +const int accessor_263__size = (sizeof (XMFLOAT4)) * 63; -extern const D3DXVECTOR3 accessor_264[]; +extern const XMFLOAT3 accessor_264[]; const int accessor_264__length = 2; -const int accessor_264__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_264__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_265[]; +extern const XMFLOAT3 accessor_265[]; const int accessor_265__length = 2; -const int accessor_265__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_265__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_266[]; +extern const XMFLOAT4 accessor_266[]; const int accessor_266__length = 2; -const int accessor_266__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_266__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_267[]; +extern const XMFLOAT3 accessor_267[]; const int accessor_267__length = 2; -const int accessor_267__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_267__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_268[]; +extern const XMFLOAT3 accessor_268[]; const int accessor_268__length = 2; -const int accessor_268__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_268__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_269[]; +extern const XMFLOAT4 accessor_269[]; const int accessor_269__length = 2; -const int accessor_269__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_269__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_270[]; +extern const XMFLOAT3 accessor_270[]; const int accessor_270__length = 2; -const int accessor_270__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_270__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_271[]; +extern const XMFLOAT3 accessor_271[]; const int accessor_271__length = 2; -const int accessor_271__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_271__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_272[]; +extern const XMFLOAT4 accessor_272[]; const int accessor_272__length = 2; -const int accessor_272__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_272__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_273[]; +extern const XMFLOAT3 accessor_273[]; const int accessor_273__length = 2; -const int accessor_273__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_273__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_274[]; +extern const XMFLOAT3 accessor_274[]; const int accessor_274__length = 2; -const int accessor_274__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_274__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_275[]; +extern const XMFLOAT4 accessor_275[]; const int accessor_275__length = 63; -const int accessor_275__size = (sizeof (D3DXVECTOR4)) * 63; +const int accessor_275__size = (sizeof (XMFLOAT4)) * 63; -extern const D3DXVECTOR3 accessor_276[]; +extern const XMFLOAT3 accessor_276[]; const int accessor_276__length = 63; -const int accessor_276__size = (sizeof (D3DXVECTOR3)) * 63; +const int accessor_276__size = (sizeof (XMFLOAT3)) * 63; -extern const D3DXVECTOR3 accessor_277[]; +extern const XMFLOAT3 accessor_277[]; const int accessor_277__length = 2; -const int accessor_277__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_277__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_278[]; +extern const XMFLOAT4 accessor_278[]; const int accessor_278__length = 2; -const int accessor_278__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_278__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_279[]; +extern const XMFLOAT3 accessor_279[]; const int accessor_279__length = 2; -const int accessor_279__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_279__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_280[]; +extern const XMFLOAT3 accessor_280[]; const int accessor_280__length = 2; -const int accessor_280__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_280__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_281[]; +extern const XMFLOAT4 accessor_281[]; const int accessor_281__length = 2; -const int accessor_281__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_281__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_282[]; +extern const XMFLOAT3 accessor_282[]; const int accessor_282__length = 2; -const int accessor_282__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_282__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_283[]; +extern const XMFLOAT3 accessor_283[]; const int accessor_283__length = 2; -const int accessor_283__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_283__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_284[]; +extern const XMFLOAT4 accessor_284[]; const int accessor_284__length = 2; -const int accessor_284__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_284__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_285[]; +extern const XMFLOAT3 accessor_285[]; const int accessor_285__length = 2; -const int accessor_285__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_285__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_286[]; +extern const XMFLOAT3 accessor_286[]; const int accessor_286__length = 2; -const int accessor_286__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_286__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_287[]; +extern const XMFLOAT4 accessor_287[]; const int accessor_287__length = 2; -const int accessor_287__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_287__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_288[]; +extern const XMFLOAT3 accessor_288[]; const int accessor_288__length = 2; -const int accessor_288__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_288__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_289[]; +extern const XMFLOAT3 accessor_289[]; const int accessor_289__length = 2; -const int accessor_289__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_289__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_290[]; +extern const XMFLOAT4 accessor_290[]; const int accessor_290__length = 2; -const int accessor_290__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_290__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_291[]; +extern const XMFLOAT3 accessor_291[]; const int accessor_291__length = 2; -const int accessor_291__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_291__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_292[]; +extern const XMFLOAT3 accessor_292[]; const int accessor_292__length = 2; -const int accessor_292__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_292__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_293[]; +extern const XMFLOAT4 accessor_293[]; const int accessor_293__length = 2; -const int accessor_293__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_293__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_294[]; +extern const XMFLOAT3 accessor_294[]; const int accessor_294__length = 2; -const int accessor_294__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_294__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_295[]; +extern const XMFLOAT3 accessor_295[]; const int accessor_295__length = 2; -const int accessor_295__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_295__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_296[]; +extern const XMFLOAT4 accessor_296[]; const int accessor_296__length = 2; -const int accessor_296__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_296__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_297[]; +extern const XMFLOAT3 accessor_297[]; const int accessor_297__length = 2; -const int accessor_297__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_297__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_298[]; +extern const XMFLOAT3 accessor_298[]; const int accessor_298__length = 2; -const int accessor_298__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_298__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_299[]; +extern const XMFLOAT4 accessor_299[]; const int accessor_299__length = 2; -const int accessor_299__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_299__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_300[]; +extern const XMFLOAT3 accessor_300[]; const int accessor_300__length = 2; -const int accessor_300__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_300__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_301[]; +extern const XMFLOAT3 accessor_301[]; const int accessor_301__length = 2; -const int accessor_301__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_301__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_302[]; +extern const XMFLOAT4 accessor_302[]; const int accessor_302__length = 2; -const int accessor_302__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_302__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_303[]; +extern const XMFLOAT3 accessor_303[]; const int accessor_303__length = 2; -const int accessor_303__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_303__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_304[]; +extern const XMFLOAT3 accessor_304[]; const int accessor_304__length = 2; -const int accessor_304__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_304__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_305[]; +extern const XMFLOAT4 accessor_305[]; const int accessor_305__length = 2; -const int accessor_305__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_305__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_306[]; +extern const XMFLOAT3 accessor_306[]; const int accessor_306__length = 2; -const int accessor_306__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_306__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_307[]; +extern const XMFLOAT3 accessor_307[]; const int accessor_307__length = 2; -const int accessor_307__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_307__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_308[]; +extern const XMFLOAT4 accessor_308[]; const int accessor_308__length = 2; -const int accessor_308__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_308__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_309[]; +extern const XMFLOAT3 accessor_309[]; const int accessor_309__length = 2; -const int accessor_309__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_309__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_310[]; +extern const XMFLOAT3 accessor_310[]; const int accessor_310__length = 2; -const int accessor_310__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_310__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_311[]; +extern const XMFLOAT4 accessor_311[]; const int accessor_311__length = 2; -const int accessor_311__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_311__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_312[]; +extern const XMFLOAT3 accessor_312[]; const int accessor_312__length = 2; -const int accessor_312__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_312__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_313[]; +extern const XMFLOAT3 accessor_313[]; const int accessor_313__length = 2; -const int accessor_313__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_313__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_314[]; +extern const XMFLOAT4 accessor_314[]; const int accessor_314__length = 2; -const int accessor_314__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_314__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_315[]; +extern const XMFLOAT3 accessor_315[]; const int accessor_315__length = 2; -const int accessor_315__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_315__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_316[]; +extern const XMFLOAT3 accessor_316[]; const int accessor_316__length = 2; -const int accessor_316__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_316__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_317[]; +extern const XMFLOAT4 accessor_317[]; const int accessor_317__length = 2; -const int accessor_317__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_317__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_318[]; +extern const XMFLOAT3 accessor_318[]; const int accessor_318__length = 2; -const int accessor_318__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_318__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_319[]; +extern const XMFLOAT3 accessor_319[]; const int accessor_319__length = 2; -const int accessor_319__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_319__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_320[]; +extern const XMFLOAT4 accessor_320[]; const int accessor_320__length = 2; -const int accessor_320__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_320__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_321[]; +extern const XMFLOAT3 accessor_321[]; const int accessor_321__length = 2; -const int accessor_321__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_321__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_322[]; +extern const XMFLOAT3 accessor_322[]; const int accessor_322__length = 2; -const int accessor_322__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_322__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_323[]; +extern const XMFLOAT4 accessor_323[]; const int accessor_323__length = 2; -const int accessor_323__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_323__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_324[]; +extern const XMFLOAT3 accessor_324[]; const int accessor_324__length = 2; -const int accessor_324__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_324__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_325[]; +extern const XMFLOAT3 accessor_325[]; const int accessor_325__length = 2; -const int accessor_325__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_325__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_326[]; +extern const XMFLOAT4 accessor_326[]; const int accessor_326__length = 2; -const int accessor_326__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_326__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_327[]; +extern const XMFLOAT3 accessor_327[]; const int accessor_327__length = 2; -const int accessor_327__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_327__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_328[]; +extern const XMFLOAT3 accessor_328[]; const int accessor_328__length = 2; -const int accessor_328__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_328__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_329[]; +extern const XMFLOAT4 accessor_329[]; const int accessor_329__length = 2; -const int accessor_329__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_329__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_330[]; +extern const XMFLOAT3 accessor_330[]; const int accessor_330__length = 2; -const int accessor_330__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_330__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_331[]; +extern const XMFLOAT3 accessor_331[]; const int accessor_331__length = 2; -const int accessor_331__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_331__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_332[]; +extern const XMFLOAT4 accessor_332[]; const int accessor_332__length = 2; -const int accessor_332__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_332__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_333[]; +extern const XMFLOAT3 accessor_333[]; const int accessor_333__length = 2; -const int accessor_333__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_333__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_334[]; +extern const XMFLOAT3 accessor_334[]; const int accessor_334__length = 2; -const int accessor_334__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_334__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_335[]; +extern const XMFLOAT4 accessor_335[]; const int accessor_335__length = 2; -const int accessor_335__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_335__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_336[]; +extern const XMFLOAT3 accessor_336[]; const int accessor_336__length = 2; -const int accessor_336__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_336__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_337[]; +extern const XMFLOAT3 accessor_337[]; const int accessor_337__length = 63; -const int accessor_337__size = (sizeof (D3DXVECTOR3)) * 63; +const int accessor_337__size = (sizeof (XMFLOAT3)) * 63; -extern const D3DXVECTOR4 accessor_338[]; +extern const XMFLOAT4 accessor_338[]; const int accessor_338__length = 2; -const int accessor_338__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_338__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_339[]; +extern const XMFLOAT3 accessor_339[]; const int accessor_339__length = 2; -const int accessor_339__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_339__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_340[]; +extern const XMFLOAT3 accessor_340[]; const int accessor_340__length = 2; -const int accessor_340__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_340__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_341[]; +extern const XMFLOAT4 accessor_341[]; const int accessor_341__length = 2; -const int accessor_341__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_341__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_342[]; +extern const XMFLOAT3 accessor_342[]; const int accessor_342__length = 2; -const int accessor_342__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_342__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_343[]; +extern const XMFLOAT3 accessor_343[]; const int accessor_343__length = 2; -const int accessor_343__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_343__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_344[]; +extern const XMFLOAT4 accessor_344[]; const int accessor_344__length = 2; -const int accessor_344__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_344__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_345[]; +extern const XMFLOAT3 accessor_345[]; const int accessor_345__length = 2; -const int accessor_345__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_345__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_346[]; +extern const XMFLOAT3 accessor_346[]; const int accessor_346__length = 2; -const int accessor_346__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_346__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_347[]; +extern const XMFLOAT4 accessor_347[]; const int accessor_347__length = 2; -const int accessor_347__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_347__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_348[]; +extern const XMFLOAT3 accessor_348[]; const int accessor_348__length = 2; -const int accessor_348__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_348__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_349[]; +extern const XMFLOAT3 accessor_349[]; const int accessor_349__length = 2; -const int accessor_349__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_349__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_350[]; +extern const XMFLOAT4 accessor_350[]; const int accessor_350__length = 2; -const int accessor_350__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_350__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_351[]; +extern const XMFLOAT3 accessor_351[]; const int accessor_351__length = 2; -const int accessor_351__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_351__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_352[]; +extern const XMFLOAT3 accessor_352[]; const int accessor_352__length = 2; -const int accessor_352__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_352__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_353[]; +extern const XMFLOAT4 accessor_353[]; const int accessor_353__length = 2; -const int accessor_353__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_353__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_354[]; +extern const XMFLOAT3 accessor_354[]; const int accessor_354__length = 2; -const int accessor_354__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_354__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_355[]; +extern const XMFLOAT3 accessor_355[]; const int accessor_355__length = 63; -const int accessor_355__size = (sizeof (D3DXVECTOR3)) * 63; +const int accessor_355__size = (sizeof (XMFLOAT3)) * 63; -extern const D3DXVECTOR4 accessor_356[]; +extern const XMFLOAT4 accessor_356[]; const int accessor_356__length = 2; -const int accessor_356__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_356__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_357[]; +extern const XMFLOAT3 accessor_357[]; const int accessor_357__length = 2; -const int accessor_357__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_357__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_358[]; +extern const XMFLOAT3 accessor_358[]; const int accessor_358__length = 2; -const int accessor_358__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_358__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_359[]; +extern const XMFLOAT4 accessor_359[]; const int accessor_359__length = 2; -const int accessor_359__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_359__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_360[]; +extern const XMFLOAT3 accessor_360[]; const int accessor_360__length = 2; -const int accessor_360__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_360__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_361[]; +extern const XMFLOAT3 accessor_361[]; const int accessor_361__length = 2; -const int accessor_361__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_361__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_362[]; +extern const XMFLOAT4 accessor_362[]; const int accessor_362__length = 2; -const int accessor_362__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_362__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_363[]; +extern const XMFLOAT3 accessor_363[]; const int accessor_363__length = 2; -const int accessor_363__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_363__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_364[]; +extern const XMFLOAT3 accessor_364[]; const int accessor_364__length = 2; -const int accessor_364__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_364__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_365[]; +extern const XMFLOAT4 accessor_365[]; const int accessor_365__length = 2; -const int accessor_365__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_365__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_366[]; +extern const XMFLOAT3 accessor_366[]; const int accessor_366__length = 2; -const int accessor_366__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_366__size = (sizeof (XMFLOAT3)) * 2; extern const float accessor_367[]; const int accessor_367__length = 2; const int accessor_367__size = (sizeof (float)) * 2; -extern const D3DXVECTOR3 accessor_368[]; +extern const XMFLOAT3 accessor_368[]; const int accessor_368__length = 2; -const int accessor_368__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_368__size = (sizeof (XMFLOAT3)) * 2; extern const float accessor_369[]; const int accessor_369__length = 36; const int accessor_369__size = (sizeof (float)) * 36; -extern const D3DXVECTOR4 accessor_370[]; +extern const XMFLOAT4 accessor_370[]; const int accessor_370__length = 36; -const int accessor_370__size = (sizeof (D3DXVECTOR4)) * 36; +const int accessor_370__size = (sizeof (XMFLOAT4)) * 36; -extern const D3DXVECTOR3 accessor_371[]; +extern const XMFLOAT3 accessor_371[]; const int accessor_371__length = 36; -const int accessor_371__size = (sizeof (D3DXVECTOR3)) * 36; +const int accessor_371__size = (sizeof (XMFLOAT3)) * 36; -extern const D3DXVECTOR3 accessor_372[]; +extern const XMFLOAT3 accessor_372[]; const int accessor_372__length = 2; -const int accessor_372__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_372__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_373[]; +extern const XMFLOAT4 accessor_373[]; const int accessor_373__length = 2; -const int accessor_373__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_373__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_374[]; +extern const XMFLOAT3 accessor_374[]; const int accessor_374__length = 2; -const int accessor_374__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_374__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_375[]; +extern const XMFLOAT3 accessor_375[]; const int accessor_375__length = 2; -const int accessor_375__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_375__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_376[]; +extern const XMFLOAT4 accessor_376[]; const int accessor_376__length = 2; -const int accessor_376__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_376__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_377[]; +extern const XMFLOAT3 accessor_377[]; const int accessor_377__length = 2; -const int accessor_377__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_377__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_378[]; +extern const XMFLOAT3 accessor_378[]; const int accessor_378__length = 36; -const int accessor_378__size = (sizeof (D3DXVECTOR3)) * 36; +const int accessor_378__size = (sizeof (XMFLOAT3)) * 36; -extern const D3DXVECTOR4 accessor_379[]; +extern const XMFLOAT4 accessor_379[]; const int accessor_379__length = 36; -const int accessor_379__size = (sizeof (D3DXVECTOR4)) * 36; +const int accessor_379__size = (sizeof (XMFLOAT4)) * 36; -extern const D3DXVECTOR3 accessor_380[]; +extern const XMFLOAT3 accessor_380[]; const int accessor_380__length = 2; -const int accessor_380__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_380__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_381[]; +extern const XMFLOAT3 accessor_381[]; const int accessor_381__length = 36; -const int accessor_381__size = (sizeof (D3DXVECTOR3)) * 36; +const int accessor_381__size = (sizeof (XMFLOAT3)) * 36; -extern const D3DXVECTOR4 accessor_382[]; +extern const XMFLOAT4 accessor_382[]; const int accessor_382__length = 36; -const int accessor_382__size = (sizeof (D3DXVECTOR4)) * 36; +const int accessor_382__size = (sizeof (XMFLOAT4)) * 36; -extern const D3DXVECTOR3 accessor_383[]; +extern const XMFLOAT3 accessor_383[]; const int accessor_383__length = 36; -const int accessor_383__size = (sizeof (D3DXVECTOR3)) * 36; +const int accessor_383__size = (sizeof (XMFLOAT3)) * 36; -extern const D3DXVECTOR3 accessor_384[]; +extern const XMFLOAT3 accessor_384[]; const int accessor_384__length = 2; -const int accessor_384__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_384__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_385[]; +extern const XMFLOAT4 accessor_385[]; const int accessor_385__length = 2; -const int accessor_385__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_385__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_386[]; +extern const XMFLOAT3 accessor_386[]; const int accessor_386__length = 2; -const int accessor_386__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_386__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_387[]; +extern const XMFLOAT3 accessor_387[]; const int accessor_387__length = 2; -const int accessor_387__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_387__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_388[]; +extern const XMFLOAT4 accessor_388[]; const int accessor_388__length = 2; -const int accessor_388__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_388__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_389[]; +extern const XMFLOAT3 accessor_389[]; const int accessor_389__length = 2; -const int accessor_389__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_389__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_390[]; +extern const XMFLOAT3 accessor_390[]; const int accessor_390__length = 2; -const int accessor_390__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_390__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_391[]; +extern const XMFLOAT4 accessor_391[]; const int accessor_391__length = 2; -const int accessor_391__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_391__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_392[]; +extern const XMFLOAT3 accessor_392[]; const int accessor_392__length = 2; -const int accessor_392__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_392__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_393[]; +extern const XMFLOAT3 accessor_393[]; const int accessor_393__length = 36; -const int accessor_393__size = (sizeof (D3DXVECTOR3)) * 36; +const int accessor_393__size = (sizeof (XMFLOAT3)) * 36; -extern const D3DXVECTOR4 accessor_394[]; +extern const XMFLOAT4 accessor_394[]; const int accessor_394__length = 36; -const int accessor_394__size = (sizeof (D3DXVECTOR4)) * 36; +const int accessor_394__size = (sizeof (XMFLOAT4)) * 36; -extern const D3DXVECTOR3 accessor_395[]; +extern const XMFLOAT3 accessor_395[]; const int accessor_395__length = 36; -const int accessor_395__size = (sizeof (D3DXVECTOR3)) * 36; +const int accessor_395__size = (sizeof (XMFLOAT3)) * 36; -extern const D3DXVECTOR3 accessor_396[]; +extern const XMFLOAT3 accessor_396[]; const int accessor_396__length = 2; -const int accessor_396__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_396__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_397[]; +extern const XMFLOAT4 accessor_397[]; const int accessor_397__length = 2; -const int accessor_397__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_397__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_398[]; +extern const XMFLOAT3 accessor_398[]; const int accessor_398__length = 2; -const int accessor_398__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_398__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_399[]; +extern const XMFLOAT3 accessor_399[]; const int accessor_399__length = 2; -const int accessor_399__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_399__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_400[]; +extern const XMFLOAT4 accessor_400[]; const int accessor_400__length = 2; -const int accessor_400__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_400__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_401[]; +extern const XMFLOAT3 accessor_401[]; const int accessor_401__length = 2; -const int accessor_401__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_401__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_402[]; +extern const XMFLOAT3 accessor_402[]; const int accessor_402__length = 2; -const int accessor_402__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_402__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_403[]; +extern const XMFLOAT4 accessor_403[]; const int accessor_403__length = 2; -const int accessor_403__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_403__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_404[]; +extern const XMFLOAT3 accessor_404[]; const int accessor_404__length = 2; -const int accessor_404__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_404__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_405[]; +extern const XMFLOAT3 accessor_405[]; const int accessor_405__length = 2; -const int accessor_405__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_405__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_406[]; +extern const XMFLOAT4 accessor_406[]; const int accessor_406__length = 2; -const int accessor_406__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_406__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_407[]; +extern const XMFLOAT3 accessor_407[]; const int accessor_407__length = 2; -const int accessor_407__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_407__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_408[]; +extern const XMFLOAT3 accessor_408[]; const int accessor_408__length = 2; -const int accessor_408__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_408__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_409[]; +extern const XMFLOAT4 accessor_409[]; const int accessor_409__length = 2; -const int accessor_409__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_409__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_410[]; +extern const XMFLOAT3 accessor_410[]; const int accessor_410__length = 2; -const int accessor_410__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_410__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_411[]; +extern const XMFLOAT3 accessor_411[]; const int accessor_411__length = 2; -const int accessor_411__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_411__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_412[]; +extern const XMFLOAT4 accessor_412[]; const int accessor_412__length = 2; -const int accessor_412__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_412__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_413[]; +extern const XMFLOAT3 accessor_413[]; const int accessor_413__length = 2; -const int accessor_413__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_413__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_414[]; +extern const XMFLOAT3 accessor_414[]; const int accessor_414__length = 2; -const int accessor_414__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_414__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_415[]; +extern const XMFLOAT4 accessor_415[]; const int accessor_415__length = 2; -const int accessor_415__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_415__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_416[]; +extern const XMFLOAT3 accessor_416[]; const int accessor_416__length = 2; -const int accessor_416__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_416__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_417[]; +extern const XMFLOAT3 accessor_417[]; const int accessor_417__length = 2; -const int accessor_417__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_417__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_418[]; +extern const XMFLOAT4 accessor_418[]; const int accessor_418__length = 2; -const int accessor_418__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_418__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_419[]; +extern const XMFLOAT3 accessor_419[]; const int accessor_419__length = 2; -const int accessor_419__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_419__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_420[]; +extern const XMFLOAT3 accessor_420[]; const int accessor_420__length = 2; -const int accessor_420__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_420__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_421[]; +extern const XMFLOAT4 accessor_421[]; const int accessor_421__length = 2; -const int accessor_421__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_421__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_422[]; +extern const XMFLOAT3 accessor_422[]; const int accessor_422__length = 2; -const int accessor_422__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_422__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_423[]; +extern const XMFLOAT3 accessor_423[]; const int accessor_423__length = 2; -const int accessor_423__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_423__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_424[]; +extern const XMFLOAT4 accessor_424[]; const int accessor_424__length = 2; -const int accessor_424__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_424__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_425[]; +extern const XMFLOAT3 accessor_425[]; const int accessor_425__length = 2; -const int accessor_425__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_425__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_426[]; +extern const XMFLOAT3 accessor_426[]; const int accessor_426__length = 2; -const int accessor_426__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_426__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_427[]; +extern const XMFLOAT4 accessor_427[]; const int accessor_427__length = 2; -const int accessor_427__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_427__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_428[]; +extern const XMFLOAT3 accessor_428[]; const int accessor_428__length = 2; -const int accessor_428__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_428__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_429[]; +extern const XMFLOAT3 accessor_429[]; const int accessor_429__length = 2; -const int accessor_429__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_429__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_430[]; +extern const XMFLOAT4 accessor_430[]; const int accessor_430__length = 2; -const int accessor_430__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_430__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_431[]; +extern const XMFLOAT3 accessor_431[]; const int accessor_431__length = 2; -const int accessor_431__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_431__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_432[]; +extern const XMFLOAT3 accessor_432[]; const int accessor_432__length = 2; -const int accessor_432__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_432__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_433[]; +extern const XMFLOAT4 accessor_433[]; const int accessor_433__length = 2; -const int accessor_433__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_433__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_434[]; +extern const XMFLOAT3 accessor_434[]; const int accessor_434__length = 2; -const int accessor_434__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_434__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_435[]; +extern const XMFLOAT3 accessor_435[]; const int accessor_435__length = 2; -const int accessor_435__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_435__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_436[]; +extern const XMFLOAT4 accessor_436[]; const int accessor_436__length = 2; -const int accessor_436__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_436__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_437[]; +extern const XMFLOAT3 accessor_437[]; const int accessor_437__length = 2; -const int accessor_437__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_437__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_438[]; +extern const XMFLOAT3 accessor_438[]; const int accessor_438__length = 2; -const int accessor_438__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_438__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_439[]; +extern const XMFLOAT4 accessor_439[]; const int accessor_439__length = 2; -const int accessor_439__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_439__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_440[]; +extern const XMFLOAT3 accessor_440[]; const int accessor_440__length = 2; -const int accessor_440__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_440__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_441[]; +extern const XMFLOAT3 accessor_441[]; const int accessor_441__length = 2; -const int accessor_441__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_441__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_442[]; +extern const XMFLOAT4 accessor_442[]; const int accessor_442__length = 2; -const int accessor_442__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_442__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_443[]; +extern const XMFLOAT3 accessor_443[]; const int accessor_443__length = 2; -const int accessor_443__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_443__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_444[]; +extern const XMFLOAT3 accessor_444[]; const int accessor_444__length = 2; -const int accessor_444__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_444__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_445[]; +extern const XMFLOAT4 accessor_445[]; const int accessor_445__length = 2; -const int accessor_445__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_445__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_446[]; +extern const XMFLOAT3 accessor_446[]; const int accessor_446__length = 2; -const int accessor_446__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_446__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_447[]; +extern const XMFLOAT3 accessor_447[]; const int accessor_447__length = 2; -const int accessor_447__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_447__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_448[]; +extern const XMFLOAT4 accessor_448[]; const int accessor_448__length = 2; -const int accessor_448__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_448__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_449[]; +extern const XMFLOAT3 accessor_449[]; const int accessor_449__length = 2; -const int accessor_449__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_449__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_450[]; +extern const XMFLOAT3 accessor_450[]; const int accessor_450__length = 2; -const int accessor_450__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_450__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_451[]; +extern const XMFLOAT4 accessor_451[]; const int accessor_451__length = 2; -const int accessor_451__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_451__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_452[]; +extern const XMFLOAT3 accessor_452[]; const int accessor_452__length = 2; -const int accessor_452__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_452__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_453[]; +extern const XMFLOAT3 accessor_453[]; const int accessor_453__length = 2; -const int accessor_453__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_453__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_454[]; +extern const XMFLOAT4 accessor_454[]; const int accessor_454__length = 2; -const int accessor_454__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_454__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_455[]; +extern const XMFLOAT3 accessor_455[]; const int accessor_455__length = 2; -const int accessor_455__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_455__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_456[]; +extern const XMFLOAT3 accessor_456[]; const int accessor_456__length = 36; -const int accessor_456__size = (sizeof (D3DXVECTOR3)) * 36; +const int accessor_456__size = (sizeof (XMFLOAT3)) * 36; -extern const D3DXVECTOR4 accessor_457[]; +extern const XMFLOAT4 accessor_457[]; const int accessor_457__length = 2; -const int accessor_457__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_457__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_458[]; +extern const XMFLOAT3 accessor_458[]; const int accessor_458__length = 2; -const int accessor_458__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_458__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_459[]; +extern const XMFLOAT3 accessor_459[]; const int accessor_459__length = 2; -const int accessor_459__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_459__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_460[]; +extern const XMFLOAT4 accessor_460[]; const int accessor_460__length = 2; -const int accessor_460__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_460__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_461[]; +extern const XMFLOAT3 accessor_461[]; const int accessor_461__length = 2; -const int accessor_461__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_461__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_462[]; +extern const XMFLOAT3 accessor_462[]; const int accessor_462__length = 2; -const int accessor_462__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_462__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_463[]; +extern const XMFLOAT4 accessor_463[]; const int accessor_463__length = 2; -const int accessor_463__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_463__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_464[]; +extern const XMFLOAT3 accessor_464[]; const int accessor_464__length = 2; -const int accessor_464__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_464__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_465[]; +extern const XMFLOAT3 accessor_465[]; const int accessor_465__length = 2; -const int accessor_465__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_465__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_466[]; +extern const XMFLOAT4 accessor_466[]; const int accessor_466__length = 2; -const int accessor_466__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_466__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_467[]; +extern const XMFLOAT3 accessor_467[]; const int accessor_467__length = 2; -const int accessor_467__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_467__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_468[]; +extern const XMFLOAT3 accessor_468[]; const int accessor_468__length = 2; -const int accessor_468__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_468__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_469[]; +extern const XMFLOAT4 accessor_469[]; const int accessor_469__length = 2; -const int accessor_469__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_469__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_470[]; +extern const XMFLOAT3 accessor_470[]; const int accessor_470__length = 2; -const int accessor_470__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_470__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_471[]; +extern const XMFLOAT3 accessor_471[]; const int accessor_471__length = 2; -const int accessor_471__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_471__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_472[]; +extern const XMFLOAT4 accessor_472[]; const int accessor_472__length = 2; -const int accessor_472__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_472__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_473[]; +extern const XMFLOAT3 accessor_473[]; const int accessor_473__length = 2; -const int accessor_473__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_473__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_474[]; +extern const XMFLOAT3 accessor_474[]; const int accessor_474__length = 36; -const int accessor_474__size = (sizeof (D3DXVECTOR3)) * 36; +const int accessor_474__size = (sizeof (XMFLOAT3)) * 36; -extern const D3DXVECTOR4 accessor_475[]; +extern const XMFLOAT4 accessor_475[]; const int accessor_475__length = 2; -const int accessor_475__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_475__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_476[]; +extern const XMFLOAT3 accessor_476[]; const int accessor_476__length = 2; -const int accessor_476__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_476__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_477[]; +extern const XMFLOAT3 accessor_477[]; const int accessor_477__length = 2; -const int accessor_477__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_477__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_478[]; +extern const XMFLOAT4 accessor_478[]; const int accessor_478__length = 2; -const int accessor_478__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_478__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_479[]; +extern const XMFLOAT3 accessor_479[]; const int accessor_479__length = 2; -const int accessor_479__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_479__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_480[]; +extern const XMFLOAT3 accessor_480[]; const int accessor_480__length = 2; -const int accessor_480__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_480__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_481[]; +extern const XMFLOAT4 accessor_481[]; const int accessor_481__length = 2; -const int accessor_481__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_481__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_482[]; +extern const XMFLOAT3 accessor_482[]; const int accessor_482__length = 2; -const int accessor_482__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_482__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_483[]; +extern const XMFLOAT3 accessor_483[]; const int accessor_483__length = 2; -const int accessor_483__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_483__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_484[]; +extern const XMFLOAT4 accessor_484[]; const int accessor_484__length = 2; -const int accessor_484__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_484__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_485[]; +extern const XMFLOAT3 accessor_485[]; const int accessor_485__length = 2; -const int accessor_485__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_485__size = (sizeof (XMFLOAT3)) * 2; extern const float accessor_486[]; const int accessor_486__length = 22; const int accessor_486__size = (sizeof (float)) * 22; -extern const D3DXVECTOR3 accessor_487[]; +extern const XMFLOAT3 accessor_487[]; const int accessor_487__length = 22; -const int accessor_487__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_487__size = (sizeof (XMFLOAT3)) * 22; -extern const D3DXVECTOR4 accessor_488[]; +extern const XMFLOAT4 accessor_488[]; const int accessor_488__length = 22; -const int accessor_488__size = (sizeof (D3DXVECTOR4)) * 22; +const int accessor_488__size = (sizeof (XMFLOAT4)) * 22; -extern const D3DXVECTOR3 accessor_489[]; +extern const XMFLOAT3 accessor_489[]; const int accessor_489__length = 22; -const int accessor_489__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_489__size = (sizeof (XMFLOAT3)) * 22; extern const float accessor_490[]; const int accessor_490__length = 2; const int accessor_490__size = (sizeof (float)) * 2; -extern const D3DXVECTOR3 accessor_491[]; +extern const XMFLOAT3 accessor_491[]; const int accessor_491__length = 2; -const int accessor_491__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_491__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_492[]; +extern const XMFLOAT4 accessor_492[]; const int accessor_492__length = 2; -const int accessor_492__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_492__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_493[]; +extern const XMFLOAT3 accessor_493[]; const int accessor_493__length = 2; -const int accessor_493__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_493__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_494[]; +extern const XMFLOAT3 accessor_494[]; const int accessor_494__length = 2; -const int accessor_494__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_494__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_495[]; +extern const XMFLOAT4 accessor_495[]; const int accessor_495__length = 2; -const int accessor_495__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_495__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_496[]; +extern const XMFLOAT3 accessor_496[]; const int accessor_496__length = 2; -const int accessor_496__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_496__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_497[]; +extern const XMFLOAT3 accessor_497[]; const int accessor_497__length = 22; -const int accessor_497__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_497__size = (sizeof (XMFLOAT3)) * 22; -extern const D3DXVECTOR4 accessor_498[]; +extern const XMFLOAT4 accessor_498[]; const int accessor_498__length = 22; -const int accessor_498__size = (sizeof (D3DXVECTOR4)) * 22; +const int accessor_498__size = (sizeof (XMFLOAT4)) * 22; -extern const D3DXVECTOR3 accessor_499[]; +extern const XMFLOAT3 accessor_499[]; const int accessor_499__length = 2; -const int accessor_499__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_499__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_500[]; +extern const XMFLOAT3 accessor_500[]; const int accessor_500__length = 22; -const int accessor_500__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_500__size = (sizeof (XMFLOAT3)) * 22; -extern const D3DXVECTOR4 accessor_501[]; +extern const XMFLOAT4 accessor_501[]; const int accessor_501__length = 22; -const int accessor_501__size = (sizeof (D3DXVECTOR4)) * 22; +const int accessor_501__size = (sizeof (XMFLOAT4)) * 22; -extern const D3DXVECTOR3 accessor_502[]; +extern const XMFLOAT3 accessor_502[]; const int accessor_502__length = 22; -const int accessor_502__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_502__size = (sizeof (XMFLOAT3)) * 22; -extern const D3DXVECTOR3 accessor_503[]; +extern const XMFLOAT3 accessor_503[]; const int accessor_503__length = 2; -const int accessor_503__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_503__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_504[]; +extern const XMFLOAT4 accessor_504[]; const int accessor_504__length = 2; -const int accessor_504__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_504__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_505[]; +extern const XMFLOAT3 accessor_505[]; const int accessor_505__length = 2; -const int accessor_505__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_505__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_506[]; +extern const XMFLOAT3 accessor_506[]; const int accessor_506__length = 2; -const int accessor_506__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_506__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_507[]; +extern const XMFLOAT4 accessor_507[]; const int accessor_507__length = 2; -const int accessor_507__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_507__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_508[]; +extern const XMFLOAT3 accessor_508[]; const int accessor_508__length = 2; -const int accessor_508__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_508__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_509[]; +extern const XMFLOAT3 accessor_509[]; const int accessor_509__length = 2; -const int accessor_509__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_509__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_510[]; +extern const XMFLOAT4 accessor_510[]; const int accessor_510__length = 2; -const int accessor_510__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_510__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_511[]; +extern const XMFLOAT3 accessor_511[]; const int accessor_511__length = 2; -const int accessor_511__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_511__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_512[]; +extern const XMFLOAT3 accessor_512[]; const int accessor_512__length = 22; -const int accessor_512__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_512__size = (sizeof (XMFLOAT3)) * 22; -extern const D3DXVECTOR4 accessor_513[]; +extern const XMFLOAT4 accessor_513[]; const int accessor_513__length = 22; -const int accessor_513__size = (sizeof (D3DXVECTOR4)) * 22; +const int accessor_513__size = (sizeof (XMFLOAT4)) * 22; -extern const D3DXVECTOR3 accessor_514[]; +extern const XMFLOAT3 accessor_514[]; const int accessor_514__length = 22; -const int accessor_514__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_514__size = (sizeof (XMFLOAT3)) * 22; -extern const D3DXVECTOR3 accessor_515[]; +extern const XMFLOAT3 accessor_515[]; const int accessor_515__length = 2; -const int accessor_515__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_515__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_516[]; +extern const XMFLOAT4 accessor_516[]; const int accessor_516__length = 2; -const int accessor_516__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_516__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_517[]; +extern const XMFLOAT3 accessor_517[]; const int accessor_517__length = 2; -const int accessor_517__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_517__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_518[]; +extern const XMFLOAT3 accessor_518[]; const int accessor_518__length = 2; -const int accessor_518__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_518__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_519[]; +extern const XMFLOAT4 accessor_519[]; const int accessor_519__length = 2; -const int accessor_519__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_519__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_520[]; +extern const XMFLOAT3 accessor_520[]; const int accessor_520__length = 2; -const int accessor_520__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_520__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_521[]; +extern const XMFLOAT3 accessor_521[]; const int accessor_521__length = 2; -const int accessor_521__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_521__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_522[]; +extern const XMFLOAT4 accessor_522[]; const int accessor_522__length = 2; -const int accessor_522__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_522__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_523[]; +extern const XMFLOAT3 accessor_523[]; const int accessor_523__length = 2; -const int accessor_523__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_523__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_524[]; +extern const XMFLOAT3 accessor_524[]; const int accessor_524__length = 2; -const int accessor_524__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_524__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_525[]; +extern const XMFLOAT4 accessor_525[]; const int accessor_525__length = 2; -const int accessor_525__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_525__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_526[]; +extern const XMFLOAT3 accessor_526[]; const int accessor_526__length = 2; -const int accessor_526__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_526__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_527[]; +extern const XMFLOAT3 accessor_527[]; const int accessor_527__length = 2; -const int accessor_527__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_527__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_528[]; +extern const XMFLOAT4 accessor_528[]; const int accessor_528__length = 2; -const int accessor_528__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_528__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_529[]; +extern const XMFLOAT3 accessor_529[]; const int accessor_529__length = 2; -const int accessor_529__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_529__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_530[]; +extern const XMFLOAT3 accessor_530[]; const int accessor_530__length = 2; -const int accessor_530__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_530__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_531[]; +extern const XMFLOAT4 accessor_531[]; const int accessor_531__length = 2; -const int accessor_531__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_531__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_532[]; +extern const XMFLOAT3 accessor_532[]; const int accessor_532__length = 2; -const int accessor_532__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_532__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_533[]; +extern const XMFLOAT3 accessor_533[]; const int accessor_533__length = 2; -const int accessor_533__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_533__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_534[]; +extern const XMFLOAT4 accessor_534[]; const int accessor_534__length = 2; -const int accessor_534__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_534__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_535[]; +extern const XMFLOAT3 accessor_535[]; const int accessor_535__length = 2; -const int accessor_535__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_535__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_536[]; +extern const XMFLOAT3 accessor_536[]; const int accessor_536__length = 2; -const int accessor_536__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_536__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_537[]; +extern const XMFLOAT4 accessor_537[]; const int accessor_537__length = 2; -const int accessor_537__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_537__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_538[]; +extern const XMFLOAT3 accessor_538[]; const int accessor_538__length = 2; -const int accessor_538__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_538__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_539[]; +extern const XMFLOAT3 accessor_539[]; const int accessor_539__length = 2; -const int accessor_539__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_539__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_540[]; +extern const XMFLOAT4 accessor_540[]; const int accessor_540__length = 2; -const int accessor_540__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_540__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_541[]; +extern const XMFLOAT3 accessor_541[]; const int accessor_541__length = 2; -const int accessor_541__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_541__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_542[]; +extern const XMFLOAT3 accessor_542[]; const int accessor_542__length = 2; -const int accessor_542__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_542__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_543[]; +extern const XMFLOAT4 accessor_543[]; const int accessor_543__length = 2; -const int accessor_543__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_543__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_544[]; +extern const XMFLOAT3 accessor_544[]; const int accessor_544__length = 2; -const int accessor_544__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_544__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_545[]; +extern const XMFLOAT3 accessor_545[]; const int accessor_545__length = 2; -const int accessor_545__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_545__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_546[]; +extern const XMFLOAT4 accessor_546[]; const int accessor_546__length = 2; -const int accessor_546__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_546__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_547[]; +extern const XMFLOAT3 accessor_547[]; const int accessor_547__length = 2; -const int accessor_547__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_547__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_548[]; +extern const XMFLOAT3 accessor_548[]; const int accessor_548__length = 2; -const int accessor_548__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_548__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_549[]; +extern const XMFLOAT4 accessor_549[]; const int accessor_549__length = 2; -const int accessor_549__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_549__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_550[]; +extern const XMFLOAT3 accessor_550[]; const int accessor_550__length = 2; -const int accessor_550__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_550__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_551[]; +extern const XMFLOAT3 accessor_551[]; const int accessor_551__length = 2; -const int accessor_551__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_551__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_552[]; +extern const XMFLOAT4 accessor_552[]; const int accessor_552__length = 2; -const int accessor_552__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_552__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_553[]; +extern const XMFLOAT3 accessor_553[]; const int accessor_553__length = 2; -const int accessor_553__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_553__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_554[]; +extern const XMFLOAT3 accessor_554[]; const int accessor_554__length = 2; -const int accessor_554__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_554__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_555[]; +extern const XMFLOAT4 accessor_555[]; const int accessor_555__length = 2; -const int accessor_555__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_555__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_556[]; +extern const XMFLOAT3 accessor_556[]; const int accessor_556__length = 2; -const int accessor_556__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_556__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_557[]; +extern const XMFLOAT3 accessor_557[]; const int accessor_557__length = 2; -const int accessor_557__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_557__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_558[]; +extern const XMFLOAT4 accessor_558[]; const int accessor_558__length = 2; -const int accessor_558__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_558__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_559[]; +extern const XMFLOAT3 accessor_559[]; const int accessor_559__length = 2; -const int accessor_559__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_559__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_560[]; +extern const XMFLOAT3 accessor_560[]; const int accessor_560__length = 2; -const int accessor_560__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_560__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_561[]; +extern const XMFLOAT4 accessor_561[]; const int accessor_561__length = 2; -const int accessor_561__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_561__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_562[]; +extern const XMFLOAT3 accessor_562[]; const int accessor_562__length = 2; -const int accessor_562__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_562__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_563[]; +extern const XMFLOAT3 accessor_563[]; const int accessor_563__length = 2; -const int accessor_563__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_563__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_564[]; +extern const XMFLOAT4 accessor_564[]; const int accessor_564__length = 2; -const int accessor_564__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_564__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_565[]; +extern const XMFLOAT3 accessor_565[]; const int accessor_565__length = 2; -const int accessor_565__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_565__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_566[]; +extern const XMFLOAT3 accessor_566[]; const int accessor_566__length = 2; -const int accessor_566__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_566__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_567[]; +extern const XMFLOAT4 accessor_567[]; const int accessor_567__length = 2; -const int accessor_567__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_567__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_568[]; +extern const XMFLOAT3 accessor_568[]; const int accessor_568__length = 2; -const int accessor_568__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_568__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_569[]; +extern const XMFLOAT3 accessor_569[]; const int accessor_569__length = 2; -const int accessor_569__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_569__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_570[]; +extern const XMFLOAT4 accessor_570[]; const int accessor_570__length = 2; -const int accessor_570__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_570__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_571[]; +extern const XMFLOAT3 accessor_571[]; const int accessor_571__length = 2; -const int accessor_571__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_571__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_572[]; +extern const XMFLOAT3 accessor_572[]; const int accessor_572__length = 2; -const int accessor_572__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_572__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_573[]; +extern const XMFLOAT4 accessor_573[]; const int accessor_573__length = 2; -const int accessor_573__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_573__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_574[]; +extern const XMFLOAT3 accessor_574[]; const int accessor_574__length = 2; -const int accessor_574__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_574__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_575[]; +extern const XMFLOAT3 accessor_575[]; const int accessor_575__length = 22; -const int accessor_575__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_575__size = (sizeof (XMFLOAT3)) * 22; -extern const D3DXVECTOR4 accessor_576[]; +extern const XMFLOAT4 accessor_576[]; const int accessor_576__length = 2; -const int accessor_576__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_576__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_577[]; +extern const XMFLOAT3 accessor_577[]; const int accessor_577__length = 2; -const int accessor_577__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_577__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_578[]; +extern const XMFLOAT3 accessor_578[]; const int accessor_578__length = 2; -const int accessor_578__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_578__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_579[]; +extern const XMFLOAT4 accessor_579[]; const int accessor_579__length = 2; -const int accessor_579__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_579__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_580[]; +extern const XMFLOAT3 accessor_580[]; const int accessor_580__length = 2; -const int accessor_580__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_580__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_581[]; +extern const XMFLOAT3 accessor_581[]; const int accessor_581__length = 2; -const int accessor_581__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_581__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_582[]; +extern const XMFLOAT4 accessor_582[]; const int accessor_582__length = 2; -const int accessor_582__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_582__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_583[]; +extern const XMFLOAT3 accessor_583[]; const int accessor_583__length = 2; -const int accessor_583__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_583__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_584[]; +extern const XMFLOAT3 accessor_584[]; const int accessor_584__length = 2; -const int accessor_584__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_584__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_585[]; +extern const XMFLOAT4 accessor_585[]; const int accessor_585__length = 2; -const int accessor_585__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_585__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_586[]; +extern const XMFLOAT3 accessor_586[]; const int accessor_586__length = 2; -const int accessor_586__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_586__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_587[]; +extern const XMFLOAT3 accessor_587[]; const int accessor_587__length = 2; -const int accessor_587__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_587__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_588[]; +extern const XMFLOAT4 accessor_588[]; const int accessor_588__length = 2; -const int accessor_588__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_588__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_589[]; +extern const XMFLOAT3 accessor_589[]; const int accessor_589__length = 2; -const int accessor_589__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_589__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_590[]; +extern const XMFLOAT3 accessor_590[]; const int accessor_590__length = 2; -const int accessor_590__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_590__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_591[]; +extern const XMFLOAT4 accessor_591[]; const int accessor_591__length = 2; -const int accessor_591__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_591__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_592[]; +extern const XMFLOAT3 accessor_592[]; const int accessor_592__length = 2; -const int accessor_592__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_592__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_593[]; +extern const XMFLOAT3 accessor_593[]; const int accessor_593__length = 22; -const int accessor_593__size = (sizeof (D3DXVECTOR3)) * 22; +const int accessor_593__size = (sizeof (XMFLOAT3)) * 22; -extern const D3DXVECTOR4 accessor_594[]; +extern const XMFLOAT4 accessor_594[]; const int accessor_594__length = 2; -const int accessor_594__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_594__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_595[]; +extern const XMFLOAT3 accessor_595[]; const int accessor_595__length = 2; -const int accessor_595__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_595__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_596[]; +extern const XMFLOAT3 accessor_596[]; const int accessor_596__length = 2; -const int accessor_596__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_596__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_597[]; +extern const XMFLOAT4 accessor_597[]; const int accessor_597__length = 2; -const int accessor_597__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_597__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_598[]; +extern const XMFLOAT3 accessor_598[]; const int accessor_598__length = 2; -const int accessor_598__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_598__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_599[]; +extern const XMFLOAT3 accessor_599[]; const int accessor_599__length = 2; -const int accessor_599__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_599__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_600[]; +extern const XMFLOAT4 accessor_600[]; const int accessor_600__length = 2; -const int accessor_600__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_600__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_601[]; +extern const XMFLOAT3 accessor_601[]; const int accessor_601__length = 2; -const int accessor_601__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_601__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR3 accessor_602[]; +extern const XMFLOAT3 accessor_602[]; const int accessor_602__length = 2; -const int accessor_602__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_602__size = (sizeof (XMFLOAT3)) * 2; -extern const D3DXVECTOR4 accessor_603[]; +extern const XMFLOAT4 accessor_603[]; const int accessor_603__length = 2; -const int accessor_603__size = (sizeof (D3DXVECTOR4)) * 2; +const int accessor_603__size = (sizeof (XMFLOAT4)) * 2; -extern const D3DXVECTOR3 accessor_604[]; +extern const XMFLOAT3 accessor_604[]; const int accessor_604__length = 2; -const int accessor_604__size = (sizeof (D3DXVECTOR3)) * 2; +const int accessor_604__size = (sizeof (XMFLOAT3)) * 2; extern const Node node_0; extern const Node node_1; @@ -3084,3 +3088,5 @@ const int animation_3__channels__length = 117; extern const AnimationChannel animation_4__channels[]; const int animation_4__channels__length = 117; +} +#endif diff --git a/models/robot_player/robot_player.glb b/models/robot_player/robot_player.glb new file mode 100644 index 0000000000000000000000000000000000000000..8c4a4cfc820ead8865b58fcc087868b86f7d3b9f GIT binary patch literal 239380 zcmeEv2b>f|_J6mS4g)64dbo<=T$brHGrh~skOc)nf*E0f1y^AgcNY*5c1@U13>Z;R zG2tC5K}oxdIh@(kGo10%Gn@g>aHiw`t)A(buI}paZ14WR-~T_mpXsjdsaN&ty>Gn= z?{&}QDWe8%CP~sRcXX15AGn!x_{d>L&MvEIYN~B6tDIdnxwgKxv8K79v8=M}h?5%| z>KmH6O$JG~zEf)JC)PH08{BhPw;6hcUSTB5%FAZdHa68Y)FZQ6ktmx}Ue+`L1(a1L z@~Nh>$`fXn)t=tmScBy3vPm^f%}Jf5P+xOh{bcKIU43(HV|~q(v6E}2){dP%ZDLJx z?N}6DR;iSi6~3HQcY5u_vHDmQw^>b7a|P8*oY+{~)HHTNLp?}P5G%dF7t1-JVe0t0 zdeGLMKCPjVH4`@rIdoh!T5TIyWN8j=HcwdrvB^zyRlGsZSetC@g{ zGF4R!qavM7solCm7!zyjo9dcp;R~fgO($??!t|!*hN)v`)=g|a8Mnc3;oix$b(2p9 zFYG2No1{$~JEd;?*ok#a@;y&(m|EL&eBG4s4LvK+ut{~3dyda3peJPCvuQ$O-L&SW zo@jeReN)ezRP(2?w!nI#O!gT&72<5HgLqLvV@>0%3ahA|M>b5X9o1M{3!WOL4XJ6I zTstb)*5Qo}(;!JSWOf;7nY_Ru)6Ia1b%4fD5SCS{Ii{OuP0Q(<)znlsb=s7|<>;o` z#^KOSZFAGG`Y9+5sAyBPT_*3$ApVoBD$fl`QwX@5H z)XZv_&LlUi@8JXbjRO0Op(y|p?hWWanB5sUVfvJ53}3i4vft<-!`aQzM~`I<;4hH1{^zT^oRjthxZwLq*c(#H4}3Lh1&Ym*Fh>d)DIsvasWymVd2M>uV2H| zX$=5`>-I=XcdncLYbVuApW{*KP3YT5k8PMV33_1P0@w`=Q*x;ZQyN%rWj9Z)oi(<3W^K)>?2_rb z9{Og|vpO^`A!F8?^~9JtV~Urv?R-OOCszzAYMQQSDcm&8w5g}=xgk~C=VW{N_C}4r=o2J-I^)M<_EuAzHX+5dq zsGxomq^fSF6+M;E6Ul^O=tj9xkw_;JDLrW>H9eU&41?tyGpBrZ*@Tnproi;T_??ho z63NMn$zk*~^dGMrz!lnSsHT=QHA7de_9~{DN@%)~PAGahWh9Mqqe4errfRB25@sZw z)@|XaX+tp-UDF`6q^`4O*@95Y@elFgPfe<7GNEg!grMDt3MHW!8WfQ>OlT#cl=pzx z6*C1%rBDGYIBi-t6J{!FFeHr*ry)1R$xj3RbLE2%4EtAi9L9rp=^B zz3B=_M}=awq-yGDgjHdp0&PKyQxLPB1Z}EZt1vYa@=vC89UP=pmeH+UwWwXzY9Y#* z5R-Ghs2I&Npkkr7T0k8pWdJea#GB!1$~$INLCoS zl1LlqrizhZUDP8QjTem&_z(?vLJ!h#G++cwBL^=vX(pJ+SO-j}Otm~+kxZx3I#8E@ zSiladw5$Rol~j_3u9-jy)2c%>dDJ`Lkb@8tgu?{n`UZNtsi1$LZ+S(-x_`n*D`_S{ zg+YgPQ5E1!qqnA$Nhl_rvr;;20c?e0q+oOuMR)d$LA5m#EBatG;mjbUpy&Wu0*EkE zuxCojC{I^eI-^q2@6eBdDcDs2%aT5X4m~Td6fn=QOK#9J`Q*DgYmlok$%+(XC4~tPnhyX% zQ$S3r0u2F!Oi>0i;S4k20B}d;=p4Y8$&4Di$rR*J5o?Uy&FwE83KZP(nm4N^fG3$s zLR1M;)1hiq1=Fp;=BWnzCavUHVmd_6Nf~JvswA&N*3bK$j5$F?Qe@!L8Z%}9g94KQ zTW&RxRSV;)!T9Mg-7um`g${K=zlwrVX zI)#Bj&8v4$+cBF^fMApg+l%g!8(yrD4p4(gk_u#_Y3ObSdKDXlp^-!?33w`)&hTS7 zn_f7^xEu;}LDQAA0^5K|0@FJH0anp))MG*o=o|PRphu+)jrbn+g8`=4?6hXFV?r9b z6a8!~kJ4d>469$d8_p!} zX}Bh8xQ@{aR#;V20DKAtMaSrogtnn^)M}&{rr1OVW3QQHRua}N!CG#QQEU)anCO5{ zPT}5mi)L?1MG7+<1G57a6Bi6DY>JeJJBC77HVmV%<%$8=VfG1Tbv6>4IEZu(6bRa` zirS9p3fc_SD<-BA5Uz#=U=p32X_?i;&<9hfSEMn4fyUVwl!oQEZs(c@bQqZKVXCX^ zT;<08oDu#Az%z@d%*sq{vm|&ps zR)+y*fJ+l-NTUNM6Ha3&0z+;bcgQ0zB1OZ>0>fL{NOU}rG@fvG_4 z*3iz7gb5J?jm3~PEHE}<;FVNbJn0?6kB8{_xk+ytuvVD)LxWYAWCd~oM=97|6~Isu zYGwALjrJN1Yi*`Jz|SFjQUo00Vf!In2EGz zzHD8<#v=n#uo~|>^%dE|oVXyd;sgPR<(GKL3K*WG0X7V*GE%BF&VevL&&3J~3rzF? zSO_*Tz%)D!vjG7KjU%Q9T$jkrbiI;bEJ3gsa$uREWK_uDpkd|*jk75Nu)t<`utzXR zK%T+Ye`cZQSW{he4~*lO5Sq^6P}Ci41EHf%1GT_%cqG9lg&Gin=3?c@nw3f+Y+K5gt>JtZ=oQbpXuQ6PU7MGK)bciALs{4oskW^d*c+ zx`{=R)ppDjta&d30UH(OptoP%b0Coq424fDI$;Kau2eU7%JrbA@F|(6z zJ*Go!@&H(bq2BHY#HBAb91K_k%z~0wji<24Ey#yWnAn;DCKCfT+p$nnY-M9$7Kd3n z47N}}!nvX*5>Rf+gheGY&X^#v2?8{M{SekGuvAOs7Q0wN1Bo54? z5}3EbC}C!VA%a;212tiyh=D7K)s@O54j^G5!H|LpHKtq`Lou@AJ_9wg{-G18Vs8;l zBbl)PGIih+ofbAQH+9P`nhXpXz-S5!2-9Lu!!|#gM50zqr7a1h-Q%SyoJT5d37}Ca z3`q$r{;=7Rg1QUl-LO6qzSWSrePa!sbH7S+M;G+5iKcHrjPu{VKw{k2IgVl_Jm+%Fj}#_NMIj3PZ~4W z=j(8VeeMvSez@=Mx$-Nk)*?~sj^5iAgv<+^Ud z66u%~Vq2aWajX%rH)*mJWqz{`Q#stnunOp7;lL&~nD}Gp#6W`~8=DNQ!Tb~q0}?9* z`6v3pk0 zt`>S0dILs06GNL|1;UI9HVZWs#bVgaZ^99_-5uTrr5xKV)&`~nUp%s4u$fh3%Sf0_ zEGzP+O^7V3M`2}z-iEyl=5f&Abbx#?5CL#(cbN@>SUob4Sv_CV;BH}iTbk;gAzDs> z!AE!&i-`)%{_t0{Nfxruv+(Dz(}@bfEe~n1N^C6w${07;-kSwitNy~Cum`Sap33Bf zX(KkrAv8A30PW~q=vcrC#L0$2;1doSj9YAV4UDh>$t{HeaNW+V5Q>3q8Kcj#%?QYs zd92t!@Xz0l#vbcFu)vtXvQ0K@7%&|HVb~Nw9c&hem4TBvW)HkLVjt86-q z2|N}?COQ(lV<-c=5%zQm>j>=iV1pCGHAV?*>W*9R#UxB@t-(xs*C0v-j61l25dkdG z5v>^~nhnik--H=h>=$AqCxvYzxX4l%2(UK`vz8E>0>@l~HEB3pdy^WlYdZoERa~9H zLNbPU%tIk6@L)|)p=0ddU?(2CP|z-e2*8glm;_>f5)!bs{{i(R<_3@>S`ASxlZ= z+OZ<2T0I3L3lO5FFg0hQ#H1mKmA!%;ZEQn9e!1;f(OoyWqgA%uD7Gl#RxE3#3KU~n z0E>WLV{IleKQ?w@um_HD7V~^CszMv+CTv!L5g*aVyF

;+wKijaHQqRu zFjmyCz{D~Zjb(**xnwJ7KhjNFAOm{^^G9TuQAQ};G5R<{9(2qj_ zBGh7aLJkFhKq=u23UE=R^W#r|iXxGaJpm>nX;=V_BApL=ssI*6A`yG42oe#b$CTpk zRYg7du_wYrp)w(R0z5=9d#V5wMLIwB1c)dS3E30C0X2tW4}*37awtFqvr^$43V`7t^y5$f3kQ{uLjfq5^$q7xfDH$s zABO^DIH-gi3UGm2p$zH*bQI})_|pZ*C=!X-L#VU|6=-BQdje<_>HOFez@kVbWKRGI zXfB*R0WOMke(VV_Q6v(wC&ELq*ZWWWbO9>h(6Jll!=NF+go8@NA-3o}`h^4CuE?K5 z#P$af`f(_Lg`+wlhXPQr=}D-L4FNVBgnk?fkl~;bawxzB8=1p76u`ql=*OV|8xATF zhe-i2*lZ(|!=wNn4niLe;ji#u!$BqFPy~#kY4XP*{6?O-5MyFD&5uJ7Hi}e24n@Ey zTBiUG1@KUcgnk?fu;HK*awq@>;=ycD_;Z*Nz{5f4!(mE*4F{EoL-;Pe%7R=&ahMXp zqe$q-p$HpADj|mgV6f?EIEMmwI0*eX6kx+aCFD>54EQu7a43L>gV2Y=v;Z3pDiMci z0Wh#f6V4&LJ|1{D2>mz|V8cNrHXvo@sY(&xQiPIXg<>W}5GmUH0Clq!n|pH(&rI0O z^JP+mk|LSSBwQAGX9k{cZ)zAjvu4Vv%)gRzRpk5@b@kKOQ~pdh)*B6`HP}yYmkk+w z|~JAeTvD9t#qMN~X}96cDVNtC6Dy40rNm5c4OA`BTLFX<~kpmS45}ViX;y zWUZ;3*1~n5aypBGBvp5|IuZp@&ni*RDpAiWQO_z-&l;!aa2;rzmLudR>R97+9HD$p z!x8dx`i+7Jjni->3ZjlRqK-A9j&-7rb)t@SqKXO)FGQ zD^yJ@R81>XO)H$HqjWhug-X=5Le;fG)iuA#6#-5B9#<3ss;>Dxt|&E7b*)l$P2J+c zBe+COW0T8zaH@V2AM_&!~ya>EJMKT}W1YVvZjfgja zbH@>|9^lMCv2I_6gV2XPk%PxUC1OzI*>QxP2XH9x?KlYiI7Danv@6(?DstEjHfU8bs)$ zWxSa(h)Sg8lmHlx(DMKe1@LeX`fyl^ba##^rAYTePbo#ZyT%m!iio!_2ahRuk*9~G z6zT32Npb5$WD!e|?zvx);#bS?ObNhMoDPU3DHvQFf#?2%3*NLXs15MWPmg>?7rdQ0 zkW9#=Xd~1SuO7gpXg}0J>C2>GH`GBUW>T;h>WEhlVp3p79F)FHifl=7erhx6j&!e^ zh~F*qiF9XA_qc<~y`k=W{B1-U&f~~Tp1U4*@H!t=8;OE*D@CH<{7R81I45Ex3eK|> ziGp(?Mxx-nh|x5{N7zQBA#0J3vyE5_=R1x_!|6UE4O#nq)NK^a=w$8lk++fR;Itow zf{(t9L_yX(AAuXG4o>fpD1L#~`AFPoNa3S#BXLI7J0Fo7sSdK<`Ka6|b?|Yyktn$E zfk+f&z4Otzk?J7#bv{BjQXOQS^HI8y>LBZ!kJOD)2Op~&iGr+iK3+Fc9b8|JL_yX$ zAGI5)4zkYq$lXYFkaf;S??$PEkKc_%LDo4R!yBm%vd;N9-bi(jXLNilZ=^cNGdez= zH%c9RL~kSt@|ey?^+u|LY}@(B-bi(j`#B%o8>tR*Kj$NSBh^9e=lBH>XVhARI`DFE zG6l7t;}HtWz>k-abx77aANCui?R?yCBnqMwqeb^Ius~l7!21RZvNA!IFhXO~HgV2vd zfvd_vCFD>5h+-st0D}Tt6zTl<6QH6Cn<&P zyW(P)r`!`1A3S=L!uHt|xe#hqdO>~O!j!`H^MWY7px$L<48g>r^n!Zsr?rZcVxNgj z=>_#pqgx8wFN{B>7u36~O(|^O*;7g{s4w)Cv|y>~cw@Z($}=rmu@)!EJ{B848t<7} zJE{{cjZ0zsuD)Cf+jsWm(hKU{QS^e=?>eUSyR{S6Sl9ir_SBs^q#PN zm3u{RL>ewoSY?Y)cRjAKeU%IQjYi=N7l}sUbS*`ra5@yDQ8-;o(I}km#Ap;wCvr3j zr#msC4!NM;NVRZ5ztJe14#lW^k@e1n{YK-9tamQ#H(DKJy>nr|(dyvz9;F|0VZV{; z;KF{RQE*z1#1&cR+>3f6B8z)jZ&Vs`f9GD=8%wV!KT=x?+-sQp~yLVu&xLG9-n7y28m4r)Kw zxX|B7b#S4-(I}|>T;tyK8?6p%Ki9Z-{YI;U+RruaZNJg#p!Rc(3;c~%2f3effxnUJ z-~xZ6QIPvN7x){k4st)|0)L~`LGI^V;BT}#$o-rP{Ebux7x){Eg51x!z~5+fko!3o z_#3SbazE$7exuFxHI3ZUxxn9OEg|=GF7P)}9bDjVGzxN0=K_DD)j{s*T;OlCI>`*9lgGg;BNN|pllYfAz9m5vDd0G|v(p4^eA z4nkj-Odyr*k>R|i0tb~Nf^sm=de468J^S^p4pMs0zH{N&)+>o>B_fclMN0 zz%fhZ{a&}Pu_sPL;^{RmR#;17`_oO~ ziS*aF*x#r$&Ip1?oTZ$;r(ZD|g)@R68imuV7>&XiKM+;EPQPMQ8fOea zB+j_#--xwvul|in!|Og0D!CZoXssb@pNj#GRtH)8TnuouI(Xej==EF-aHKl87~p6W zP&3K7t;(NZ@F7Q2V^jMFK~uL+2uaqfzkv zJ-p>Q7YQ7#4r*`Lxk%t>bx?b|&P4)8tApCxbuJP(QXO0*a5M^PZ`Zj<;AnO5eLVt7 zxJck=bx`}c&P4)8tApClbuJP(S{>wm&P4)8s)LIJjz&T5=UgOkv^vQBoQnjGRtLGC zbCJN&>LB-XE)qCW9b6=EGzxM*=OTfl)j{s(TqJO`I>`N;iv*5V2f3efalp~)Aop`F z4meUBTpVyT3UWW^;((*oLGI^V9B{Nc$o-s)1CCY)xu0`!z>(_U;(()3ko!3o2OO;q zaz8i7I_IK*qs{eo?&r9owS?T$xu4^TRtLGKb8*0t>fqvlqfwB1dXlVlE)F;vS7htX zJ(nM?4zkW&dufoNskJNJV&YaHu99P6%$UT`KjW4p+xgX?;RtH(@ z+z)a^tAng{*S=f?t-E&RqEL`~I(bJiN@%s*Rl;ZbG@%!%3k5c@;GlHxXzh9VHl;ZbYLrE!q z-`P`2@%x3IQi|Vq^^{WlzO$#4;`a+Zr4+yK>M5o8eP>T8#qXEm_e=5nh>CA6tDId{ z-!Ktxw{g7xKH$wH|J@hvTk-piwNq+qn*0Lz^)z0V5*m)La{(@)LQTo`b)Iymh|(2Bu@0Y|HYs(FJ81CCS&7X}=S zg6r|o(8UD;M?{Lk_4*XyHTg$o9bRtMMTqtw9#14pWZ3kHrx zLGJBbFmSXw$U5hOfuq$y?(JMKaI`wey`2jNj#LL13>=Mu>+Ml+#RUULtAp$5k?J7p zTqE~$E*LmkJIMW<3kHr>2f3ef!N8H~;DUjpQIPvN7YrP&4zkX>Wv zazE#Sfg{zy1p`N;Aop`F7&uxTfm%9 zZLZJ#L|3$0$Xe%qpetG(Wb4lTKv$$XxKQ9|6lCk3CTpDw1&+oQr}ZfFKJMZCXmyZv z&hNEF;)<+u?&17M?cg5Hk48b(IrngW#8o=?aDFtt$XYkaV>el)Jg_MLk$KUy7B z`<~=?e*%Ev8mp# zVM;^esM^z;r#FIXHa4p#)}CHgnV5r`@-zc$CNwuR;$C@KqfK78S7FStxTAr#G|9*=56rjT}5`@USDT8jc(`VrZWs zR;d1{0mt?mHf%)yv5B%uglYF1He}ccmd4B5b7`fl67M(9rPZ=Z#A+WtZ19nzMzT!s z2^=$E@L_{;S3o9fS=|IyBVKxL3z0R{l4^5K0=X6xoXBW#Q^WMe36Ko(p@^w9lk-g& zKYh}q+Qy^nYG;;J;&mER>!#L@nl+6H-pbT-TK#0SJ1>hyOBU0n)XYLuASJ*znhB6yQ$u5pYiH{rqzMgEr!~~q);H%wU?^Z3H>cOb zyQC+fqp3B()Cq|SHHFtj7)irSr4nY+K+`HrC6!QA-N3KCnW_q2kW1HeJ()67iiY1y zRud`IKeetNCG@b1NTt$dI-wf)c^BMEs0qC4B59hYZfa>Qg;H2fK{3ra9UVQOpH?;p z4q<0Q@Vm;P4GE$LMhWz)r+(ntOK&k%6l8q^lrp96jAC~$a-@4ogyg*?TGjm064A$g z&nT~!*e|pP(JKjL$n_w-LPf&xNt=PE$Z z01EQF^|Qy7VjxeaAkWLZ4)MmRJs$FO3i1TZQ;;W+d_3goA;{x^D_}kZcO1EZ`FN;f z4@5=QyO(+P)0UCRz04c&P-jq3Ct#j}IsxXH=H37Ds#&MSGg7ZIoJ6x0crr=U*2d_2@8DX0@LPeGlRdG-)j49TY`sPi(P zqM%N|d_2^pD5w)KPeGl4`FN;HQBWsfo`O07^YKu}9uJF*Ixq8S3hKPfr{ke6O+lT2 zc?#+T%*R6=b3n(DJOyIBTkLmk^Hj>9|!bzbJJN7{m_cP*ktB~Xy(mA;aoKu=VEJnSh63iw3)Q_v^k zKOX#)1OF>KX`ki z*DvCqf5ik-a z+h5qUn4o2EUW?r6M3SL^RwS61pk)tU$Ksy?S^@tty&p=L`-4Z8KmlP65WWCW3&Nop z*f+v_AUp%27KA{MFF@3S5DfAKh&YG|LY)FaQ3EI-6!1@ShKT56u3cifLllxfM8rSE z2nyuS-X|Bwh=jRBLIG%m0FW<0)B*|uMG}mOK$2rbz<*57h(dCW2-=^d7(o&L6aYM{_Oqaod!T^-m@X29x zC4UP3MEu9ZUy6c1k^Cw66Yx)RmWb$Mj=(Y9B?`%3BI2KdKY{$&+xH^lPgH+Q{G}=Q z6Y)>MpNRjM_#?SZ1pKoJMjZK*94CVIn-u#e;-7*)QT;LTXHxJd;-7*)5&tpqXHxLz z4|i zkKiB)+J#nQJ_DkX{2(I!DfkoM2hS1*$}iLaksKkS2839^FaX4Kgs5ST5Uzd!|6zU* zj$WVuonin*^b`Xq;y>mHtj7dk%%?$Ak`F{AzYzSf9on3WRIM=5n)8q13s%E?C9Ix6 zOB43!2w#9e(-FKSze+=60@|PeTA)ypQ$-}En1BxRse~B>gEFFv_zwYeSp6iIia`Eh zE|pN6#`LMEtb)i>0#O4(02tnYm?o9vR1q~GWK8D}AV3i5sS{ECF%d{|stEXx=~Pik zP8C7>QxpJ-_zwXfr$15sG4YoQv4Ei+Anf@Qz5r1RC^k?;PeGtS12NqyYRrdFRFYdo z#6Jar0{Nw5B9P=(5%C{_z_1=ba;u2iPr;vve+vEt^^@ExBKnxyiZR_PD#@)P;-7*) zf&592q=@Q|i9a*u2uyk^MZ|wR{Anb&idX(xOt*?ga;tdTuO%q*1jlf!h zfMpQ!#2`xmx=*4X|9!w3ttpXe1|yh<^(HMDh>8U#I~hc|k-C2(f^i0!0mo2|$t; zM8JQT7bJ}Tm`)IlDJaeW5c92DS~4a8Nj?yf{9_ux2tJUUvs9_jU2b8*6T*1^IYCVV zn1{Ja*zYC(8hnPiR9HPlgDB8Pm`{Z-K-5wSphfhQDW#|ZF##RsR0%T(VLla(Uc`S) zgPICKYN!Ikd@6haf)>Pds%T+O6`lbBgu)(H;R_J8AOwSa0U{1!f-vSAzO4_B){~;zYO#k@RrsxYR5&snYiTJ1BPgH+Q{Kb6Rmmbr#qQ`t- zMJKsd1oDsRTG2_a6@mUpt`!mg6#R+mkBL8$YemF=O#4T2tq9sra;*sXkLg;`Nv;(U z{}lTt;yMpQ!$r z_#-{9BI2KdKN0^i@uySlpMd|Eo)w+sSrN3Kxz0U~)qL=B(-P}G2!01Wel za0(RgALa<*=mq@8^n>VOeh{vH5&skbiujKSz?cu0=p-kIi2pGBL4Y}*Yoa1edSSF4 z>L1~BBjPR0IC2FDlpE?H;R+CN5Wz+A3xz{+o`^W3;8rA>n79q|orFr3vT_#jABJ0} z3tuF^FuNJrfSBMkV){;u(8p3jT401aPq+fSIxs@LCtLwuB^V_4iHJUA^ycapHGl#_ zQT_2iXvBQsmyrm=U~nfeNbVCs`$_H-f&62-PmECa2?qdC1H!BzSAeJmVF=_35OEL_ zf-&FuWsn|D5%C`rf+P=$Kmjp5CzWzBEU~f4~h}yR-1DfUmmKgok4;6J7Z z#UOc5MEq0ipNRjM_K)O25%C{${0;MQ~$`wVi!Z9K;prl!J8drdxX0L*X!9g;`Tvorl0Q|%a z`-$|Z1$di)H@veQ)2IcctO$Ll3{=Bp@578*fMT#8WpWmvX*zRTISbH&V8Aw*IC?lf z^#B4DxN=h%dOCo@dJ2oHBAw8Ks!&unw6wGyScQ^gYUAoKlgUH?Q{FnDxl}TM!qwHF zxpYvg*`pUuhLGP3DxPgL^Z5-mr~%&cu~!qoz94^KbG`gvBp{Wa)PndCC_qgbK@##R zS=Z7@k`4e=8t`-jPT+u63wTPVXnMnI{)1ZW(OX&z1hz+SX>^`|7I@2t38dxMgK**H zM^6&@3G6ps{)79CmmlnLliF(p_8S4N)4@GQfFU!j1oa(vn+-)*gHS5SA80GwGD7)* zZT6NQ+-n5+18s$uAIwzLgVbKHl_qI29g(b*O#50zWBz}@aJST~GPSWhNnj+#kMZ|N8 zi02d$&%yIOFP^aoAnA*?PBPRq5zi(O&nD46n?yXDG(6)s0D|m~cTR6;2^ykxm9*+H zbd}ZuT?<@wdJv>*q*V{%h-f`h$Ge9D5bEUz`y&DQJr+*Kv#qrJX&OHWekYZWNI6=5 z(kkA-le9E`up>s|CrMhx8+fvl$WI`i1wBj;-be8u$OxWudk};nkJJKe%m&#A4?l2x z1kF)A{3L@nMFsf-=O_Yx0&Tjte1+B{lfiRtFF(O^ZZALBoDV>>#v*l`8>k2!3O4Fk zgSaV-KoKd{^YmODsbpZcbnO`%rV`l8JZ(n)Aj}E!)5b}YxQ;P{Me5~;xQ;Q2>lib5 z9iw zED28q2}9}NT>y{H;He0jqj_|Y4&DV2vjXCK1oUy8r@yXm~agG(4NZdjKAO%mfY3rb5HBsnG1RsRXW} zJ^C^g8lFwWaZvDPsx=pL%lOp0d*emSiKSjiI znuzCMudtV&;Q6puJ|+>*!8=rfIlUR|7j|=$QWB(95Au_GKuSRc0X^8u527Ih^1C&F z@<|pQ`2*KHUVc=Pp9mqYq#l6$fqRMqeggLtz48g(<_71n!$53kBG2aKrEjoQcUiHx{Q|} zS_DSQq(xw)V5MpN1kcSpkTz)%7~m(+q`@#B-8}XIlJ4%1qIQRWlX18|sx$nuzCg;95gae&8S}$WO#`u$R|cz8Sb1D#%a7 zGafV~4B3!A3D3wMxOR5;cjPDG8Tm?2D~%tOglCjb!ZY%d z@SIL)fqOx2d?G&y&&W^0Gx8JhOnc6aJ+n&F7wtJWluyJn?K!t}A~>SMtFPea+`RdN zBRahOJxRoK@YA6Jeu#Juj_B}~pCaNpIHJQVpWt{7FF)x(`|O!;rxR%!pxLwLlmSDb zZGC}#($*L9(+sXcdq^Oy(6+vipJs3s+SXTEp>2I3KMkNt$kvxDz7pjJ?tFOwt*>n1M4aFFz(}Qzxw@0%uqre()4X(2Ur_4<5IjZ1GbnBXyyJy3WMEe+NTQzdbuF;T9uX0rdP(E4K_WkaCeh1J z@SclTpn9-F%&RiIq$CJ)9**#!Nsz|89O22tpbESkAqYMwD#%*{B6)+9;AsM$l@H>` z1EOTG84{FFn|B~8k;G3b*bI62NhJbRCdePCF^_y$Q~{+Er-IFjSHTEN2@;SOLaE^A zG`uxnu1JDV$QwI6B1#k2GHK#k1~aoD33>S;u4NDj8`SGP5JI2~kssRHJRQ82@$wTq z6Y|O@cqSw$pEgy*%$O!0q6tcyG!p~|GKn8V!jif?B4J7RfrpyL577i65|+kKum$qq z2a&Ke`KWQ`iFl^@ zw$diew}t#P`)tyDTWO+i3;6?As^0!Z^KGGgBA#i!EtuQDm8usHG~ZU*gf}B#ish9L z&9{Z}iS{{o)!^la=G#h}G~X8T6Y)&*ZDDvM*=OV@;Tic!_8Iv}cs7Z?E#xQR8TkY4 zvuDE1o}{4w+Drt$EYjV66SzqA@`9=Y1&M$k(#8VjSELCX z@&|5ZdHG2P?zMX5Lo*O29^9tM#|-ox2>2mQ;MkK`Bz}}cVA!mOA0^m8csm#(_h}3T zdkeh`(cUJDN67*$ga<&Fa0eL(FF!;B0cTRs3P8Y5;0(^o579utnMC6!c!e$CC(uB! zUomioEn@)_JmR_Erhzv_1nvT9?!ag~{1<5Auwrqsgm*;*LBibvM2=I|V0g7epoBb# zK%{w)#ylE;Hzue@3i1b(G>jFk2cw!DiY6B0bnldq9deAkg>;Hq%~yg3YvmpTJ?y zYgF`LJMD##92C5gxg0kzAAhNV2s4jHsJpP+miqz&3y+a$jE1o;D}avlg7 zw6!+2I%tqK(t%SsFFzqR$Zb;aYHAvOg8gS+erRiL6Hz`yerV$=-it;WSMdTS5{V6)QzkE-iBpawamtY-PC4+ZewutjW*qME zJsG?*^vWlA>qwBFh-c!u2X8>6$tQU0$lKqsyHCPT$hyZp{w70~Jsy39Oc34WBm9FT zpH%SHkw-qn@DCGFurz*x!^*t;(0s2ZqCW!p@l3c=3JuUHnllf+9-@Ho_-+6|y#k^+ z^GpPm1`5anIN|~Wj|Y=FScOK;sPT@TwtV#3k-yr z2g=9W!H6piM81;%gxB~4GUNtynmA)k6KAXlq^9vhoUsyr;mbtaK9L`qLBxAAY5HO_ zR{ShwQ_a+AQ)(M?zezc@X7a$gDb2NwWtE7#$L8$Ry86Nm=6)^wj%EHgEFC{wiLbFn z8#Cw5R$1yh_(g;k8j%MhvoN6=gVRI z*yVWAd0OkfD82TFip*B#;Otooy0-hJpZs@4X69e_&+hY8*LJ`3wOjVg%&i!louzio z_@zJpP0!4$AMT&sb4k~XUwYbp747frmgC1S-O|0H{it<0e*DtIdQ`L@yIqbSzw~X1 ziuRR{=J@eTZ}$4`nN3f1&py=uthS)^4#OVH*m?ZYTJP@JZLa84&C2mh&oCd&*m*?h zLFK1Z+x!HjZ5~)2zqHK*%j1`}d0=@&=}xcj-fr^~l%_@x=`enZAHMw?#>R{o$S&>A73i-+17RwxIO+760t@{N7(wvpjz3ySm@8 z-p(UR_n9%F+U6}vdWUh1)i!TIX`3IG$1iR3!}9p0N1k!pdU@xMda*pBG?VAsZ(dlv z=r5o4qNZ7UZCYxY$=v40Fa6l%2WQs)b;nHgf~(s6(vPmJs{H9->Y(&~E3Pc$@k_IEK3p!h+2x4R zjMly{O20Gv;sQTGX`2U@$1iR3W0%9~b*F6}>~icp_Wh&xJ-XeNPf*(Cf#vZ_+x*z& zu)5r7XT75IyR$DY@Dr4_d0=_`(l$SKIgB57+UCJ7$IfHlU-ZN=nd=|lF53qD1f>UE zaeN_1zuBXGn;AKN{L*)P)uX-BJ~@8;(u{Vq`W!!gX-3=UkQ_gL>9L0&UVZNG z>#}?FX>IdMU$UZK_4#d?>{G+fZSzYr+IfBIvR%ep+siL~G0M4MS|&SW-wS&ArI+8c zXPeEBU;3@xcWJZv@k=vWn;*aQ+bGB8$1jbsrhWE+CVOn+(qAl{SGey^Gdsp;*?l|B z%5kSLE)?>(({_G#-YD(`>Hdzz>(U^~mzL(|Md&_wBTe0lQ3gA9^q3ai{J4?7lN?*JbC= zW2V5HouA#8q}Hrd_=}RZGq-l+QZ|i!w!dfRLLOIME>FIGo8CSP^8}US`p&MKE62{` zI&*olxhtCwVL5glVL8U&eX5?{)PnC?v%5X;LVC9cj#{(ehDX+ZvZ*E8fABt)tb9Aq zH8(s``48l+-Ki7q7xS|`Q*Y>Bbw6mF_ub{#d4kHZ>*8r!J8a$p_~G){bp@4U=W+4F zmBZ?@vGMvBE!i7iU)77XF?Zj2%*h^kI*@0*Pacadq#Qd>P&pl}W=A>JwUZkM&vrSY zJa%1nyMporm1EKU59Y^eXNY`6d8~Vmb_M0J<-_Iq=$)4A7f1aB%t+FfZ4=ht{e4SE z!;dZ5!;kK>e)Z}X);RC8H1c;K|9W>GXF2kxoL!D5kC?X(2X7q_c;oYkdFzP48<)rC8Er3=5B|A4cH3E%w|}~M`-Lyh z$@ZVcXsHI(RGS&Rst8WTCq%#+yj*?7mN)0uSyyqBaKP;mUF1%E3<|k5kU9 zOlN*adkea-@>mBC4mlm=^5yc_<+$bPtjob$)`7!vA+u{$IjzA3+oCh$Ms!c+wHvhGgnuZ zZ}WduU;oqi$;%>8KB7GNcG$8L>cY;Grx)h2zjNjNAHz?cuRwl8dF=MM&RjY6ciG%s zTb8-2t3Nxn>MF;2ncdwPYvvzeCvmpc%c49}cj~nEC(sSguc`B8qmJA{Q8~nKIBQhZ|NXcpM7Ll=JA~ShLuge4;uHMRgUOBtCxBoH15A&m48wB;87?C z$MSEU&u^D&=V$j@Gi#^) zn8}r&qi6RRx01xlXMZ*wyPwOSE1%tG_ujGYOH#M|H9IfP-BX7?q0 zhU4?Ri_X3;WxH%P9QijD=!@mDdRV!3Il1%3pMRU(cXRuGYyNw?d{!=_W9MA{E}M1B zZoip*pXGPz;{seW{@9;&AJ;>0UAXW1-bruK^V{@x`CPqrJ!qeO-(9|4j!kcu$JOhQ54-OtpF+Ek-&vkhJ~q8okKLX; zpP;{RZ|o~GeKC5wJZJlzbh-Tf-`~6=Cr_52Yp31*d_5Or^t;bNURJ)%huwZ>xl(~2 zmOod1j-P4kj>tZ>GMFD{`7HmL-RHUZD3;IKJILZcSN`su2ItEc^OG#_U(BB?pIzJK zvUBcxmOod1uDwscH6Z)byN6}`&gHug%>MO+ff@ICRPTY=*2f2B+~-xZ24+v}b6CcG z?l)&Z_ROCKX8g`e?jMjncE*6rrAH0Su70_H#=d^@=7Cx1kApK`Z#gi#clDJf8oH4`@C_`!0f_j2WH%7KRKRy+kkAhlLvd+@5hf_ zzkRmr_bcDN@79xj?befh?befh?WZUEzOA2ZF^(&S6n;$ zt!?k@{)?{*_RO*ej{J5xoSne;?3&eU=eNHX+Qoc+Reo4+uASiN?A zHa~vyvFo+@c?YzNzFS^CjD9h$*?l(dvit08m5;NX-#$CGk1KYWZUoiI@9@JE&JB)-(-%vRJE?z{k}(jYu#sO zcHenz(>c%f_pBV7-Y$=w?ed-F+2uO(vG2b5-3TaB!Q9yN)jkZpd^8k1WFPpNuVTwk_1W;C`q6sfszDD z5-3TaB!Q9yN)jkZpd^8k1WFS4|3m^?BPi~_`9XF6(Z4-V`hW5xqx}EmXHS&>pZsiw z^8b^c15m!-XMcPl`uOE%_eJ^Hy&#$>-|dM^l+V84#qYBZz9@WMKf89_QL;a~BftIG6?yEBotK^MyzFe}W#?TTpDtTU zlKt6k+bt#g(*=3#&vwXTf9$;MZ0BWXJ1;wL=lE=n^ndcx3FZGMKih7zMQ8Te21?;R z+u|NRowwNnI>dKS9{bqWHr@8%&-y@K+y8>kZlJe5(C2Qy;L~-N9d>4)T>x6{(-rsd z*?E^8fN^{WaI=qnZPOu!oW-F#{z}&lVIWzOM@b$|36${Qlt!t4l<-i(gHr+}JUFFM zvJby59tKsOo|PW>SK)n-Vb{-$!~1`=-&*f1WO@m^{%`AR*)_AXy*po8XiwPn(9h7G zYZnyWqZD@i-)_(UwEFs8u}AidH=fSeeKPF&%3b!zzW&NHh5TXH&DZUb-Tj$j`C-@B zBmdJ+7xRZ*|F`A&;E=?)cX1jLVMa$h5jFQ{em_7x{ywZfxxWwVPwwx_vU9U?!#f3hgk4kh_W|C|F1^}Xu-9SN zhyG!1w)K;{3jBm!cRFuQws*(Ah4CZodJW#u{io_53jBm!Q`@`b8MJrW;DY@KyAG4L z`+F)sVe)o=A0}`2_hIsOe?R=qmh3AXoeS~_yQa1`Oy2JA-~K!Fwe^DqJcM1Je8Jr8 zk7rD)+G3xrER2R-Q~5u0>5OdeW4jjYZ`k!|?>1+@{75O}54%1a^519vHRhuor(10f zyQcC_%^xOj_xEA)c7IRhCrsY%@5AKn{yt3J?(eDmm-7F*>#dSJ|KFD99^GoPm-qjq zdgN>8SM|ANt19>PfG4|UzdiPW%yIL_TXPKeb@SVq>{XAJXHv87v23vW`p}8VY-QD@ znRQ!By*}7wZk7A``8x(>pKt1uIdk&cR{8Gh9d6z}`)=ENh4q;G`nt!eBJ zh5Fss|G&52jd%C88}IIGH{RXXZoIp%-FSCjyYcS6W_Ta-_xm%66W=S~-+680{f@5| zRUX{Gh=2QnnrLj1Um5!4}OEuC-IG!Yp zkS0iDq~mcsUK%HzD2GwGPUb;X!Pr6*X49Cl)bEGS!OQnl&yhxfaT_Rl}&BJk?)GD1NoiCk>0+rx znuFsU=|br~>3-=h9Pg4AN;gS2NO$6Rr*yY;kF-Wwg5whD4r!URPFjTHBIyrOo3u*0 z5yu;)#nNrk&C*pkUL{>CT`yfDt;BJqv|hShx(*b#?%JYKPm4f?}+1$@-y<&au>N1j-BL(V^ zEtknFWJ9jPu}WSguar%B0FDFX)$$toFu4zoedIQIy_}Ub95s2loRa&=)i_qmiab!R zlyw|+xwm|)e49KH$C2_-d5L_dJO;-x@=@|#@*m~na6C>PEB`^hMLq(@BjjV`Ve(*k zG>)U?6XcWRW91<@4v|O5~$BX2-@>z0=d@+s}%a_WR$(P9UaGWP! zC|@9-DW8qw+4Aq@Ir15DD~_%5Ir91Px$@~ao-WUp50H14=i@kEz6t;HA<>)gStysw z2g-|ZTqNIs|3#4c4ftFq?<@aKz8=Tx<*V_3Jv4JQK3B4LEL)C8VAP zVxPrlgS?CUC;2%XpOd#o>PevbPxw41Zz(?}KZWB{@=i!S0^~o2&r|Yd@`Li@I6f|K zjnu>F9S`F3xP0O8BM;lI^PVguY`ejO`j5c5H~VbO29z5gIO`uc$xR~;>nkmLcJGgI zv0?a-Q3JNza%;rvRPENiX}2CLyZ!F%%>F-|()s&a_uBMwpG`mYe&wr)3*YE>+=Iq0 zi!1g#BXhurs-3!Z+eM#v@QIHcu;{IBuRMPHPj4W!_uv2A@WU&szgc|EN2fHcKm3M^ z#$Q%5aM}T7dfDE4s)rtYTA!+Q(+_)N$%L<8T=@DspR8E(!o^qKGj;l~{zMsIz0`EZ_+hO_weB%`vqK*1GwxrT z@40y9HcPu!_P*lCdzT$HZt9ED`NwZCYtEaoWXnf(-gdM8hh(RJuwBg-x12Edt7CrL zafjFCmOW2DZ`LlW*LON)%2tW3zy74t2T!$Zc|fOQ7ymSIvkh;az3aGwBt>yX)@RXp1y>z>)oz^&NLMOIvHFR+cD!TC?B%_CpQV_TNf7-zV$0uDYq} zz>Z~gS70vFnmrr8U$XhYGpjZ%sAYRKtqYc&S@rog)pR zw>?_lsd}#ZkjnkCm2LN7KHZuf^m9x0CZp^6KVJN_ML*}5uIpd@b4$kqujlsnXWw*X z`p&<0X{-H9j{cgFuT{Uh{X?09etZ1-98U%QS#_)5^dyY6VwuU~!M^6&TFyg>iAMf0k%N7i-> z%+vpYwSU%-s=Wq{T*K(k_;5(oWz$cK@*A{|Vhr7CUbT*U?E7V(%*Z_W z%`w^I=k0*HDe#yOuY6bm+Ss$1l&-CAI$aih0Ll zKito5KdWw+nGH~fnB?{JTO zzwo8G?b~cz-tpOlw)Q-I$H48!XwMwFg!Qu4JA3W6a_5~}I`p6ZXtlrlK2^qXKmW0< zawn{@@coicX0-qA-7B((j(x&vKWpo=KlfX)?aK>SurQ>UJTZ{hp#){RS z_S&{>?W-2OwEoi>?Hkp`>>g+JwdmPz^L)0kQG0dyfVH5PJ~;U8Rj=Y+)r+01x&DZA z2CVIN1Jb|=EC0LCX0-SF&9T{J@)+3PGJWYSqpQEUEl1xI^&hfpCcE?a+b#V6fVz%5B)jsatxmM+pRT{RdimRPIzCz6 z$&&wvTO7ak&iguL-g|3~RsZ-mX0)I9BX;cP{vPdX&EAv4@4CHi8fEprv;RJ1-S2_b z^YZvzw`r=v@Ow=jzw?sK?Tdil$MX1P`d@zTFtz_wtG~6Lb^HbQo(-(tnaA(6*Uni! z2KY6L_-$Vg{O**;FQXsv)|JY}i*{iBt@XsapS}&Jjwd&D0CRXbbK?ceU)^-^$_H}z zm4N^DD}dj7Z+z@N#y^wa-bb(2E}pyH{fz$F&)-lOeum}obJAbhS2Y7aRXO}fe+2#+ zelB(3r{U<#iXoqVxr)&raO;)YKY^du^7!fa;mTEwpU!RDJBJ_X4$yxL{5<2p&$i1h zSTShq{g(c&88`TThM&_abNG2@(zZ?x;oUFNLZ zF*$(kAG9|8{Fd=o+^hdn4nLzN45-wv>QuFP9zQ>RHY0N!@bf{ch@Xw0c3N4z=4pms zY0ss1-TxTyb65^P(%{YiwE9p7eiYPi0zdWRa`<8R)f)b}eA8(sSoL?=;obC2@6G8r z;y4RGty?{JT;*?$?UZ@u-5h>e-k6d3?x(VB*)}=+TnGDo_d6G@JL>CyGQ785f7YyZ zTP$el*mY{o{>l=Iqy|9iD4@zSn-)w+^`5!aw_crt;JF?DcHp=NA3D z2k+Xe1NQ&oA9MEqp#8tEYFpl=?Yft9^gkc`THCzN4`s$pnrz|!&+E3a^u}<&+RyOB z&iVT+iDxl#-yzw0%@&UOXRikx3YZrYt4Vy$tnj4mbS`fiMycxrG1vS zUAF77xL2e{y_WrL+0Q%3SD<-DC7`eDs&<^EXxWI%;K4=&Ln*`3HU5 zI!W_a{%KXNeUI1l&(8m5|Ll!_?8@v<>*kN`pMCuL{vAi${yo|&wGJ41a@$sYTdVh4 z^rF>&uLS+#AI~n(KeDPNyI{L5R!+u!E{{KI_=W0hw=4|^?1U!HJ#+ZUTE*1uf& zyhVR*9#=I>&$Q&9KTEh@XfxxJ(c-W53yj}0#!qc;d-EaKm)5L9{wz;xUJtDS^3TSU zf4h(TEgV4Zt6cKG&n5phf&6vI-{K$g?{LUpg8Vxi@|Ph0YJvO<^pJmtOa6*O{;iOI zwU7KQd_ex}?36zft5g1MPWiLEcjMjRcU$i#moi9aMvorNbc6E(TrWj>1JW-e{Rz@i z_U@&fv!h3MMVZ}@{vFZ=(!G&B9O=bW{Rz@vBK-~0KOimjzI*9ry+@DUqBowB zL%K84+abLJ(mNsD73tlP-V^D4knV9M%1kx(f2GVJyE0L~7x;N5& zksg5bAfyjRdMMJvksg8cXrzxt`go+rAUzK0@krMqeKOLgAUzf7X-GFBJss)Okv;?I zIY_tSd={?HM*3W&&+Cmj=DNCuN2_-F@R2q<{jYmBSm|rFuFCYSzST;fc;Mxko31_3 zO7DHl^_lNB>`=|}U;N;LOk#%vGAzCRyt6XjpL)KPzCW>VW=fYEtn>>g|C-YlS?N2G z{^Fp0taPc~lAKGpD)l3(-Drgk>37N%>wfCe#MV*yzt*h#`pJ3gs_k#@{m0zw!dtdv zV~X{+^W$@}+iusjaJ}WgIoVe?-dV_hYqyr{>eab z@SMW+SL0@8m%g)k;ravo&bpf)_jQTClKgBumGD>Uza{(ff7d><^@Duv;4C|{PoLdK z6w=bu6AS4LEi(&gJ1;vA{nzZm{cS!tt&o--wBId1rf`4NqM?O!sownlghN&(Ip<_j z!c|TVrGAu?gV=6J5?jw-deWEX@Oc+j*KB=9<)veC>7$UY`k*Cy=~X{fK6_AY`r2zh z?&ZFg|FVCtO|R_VYcHIg>C5-{y>ilXEt&JKyl~Yg$6mNxNBa8%o>qt7+pBuB;ayh` zKj+U?+pcNJy03r6Ufrhr$C+mHox%G&Tm9K@HnKfJ>9ct+SJy0h*h(L8{zui%)Mc&o z`2FO}y(_k4`@GUIUu>Vbxb-pY`L*_()v{M+kMf6D`kqZUchAgw<5r|4>GetFnFG4- zZ>3*;vm*1u$bHta`_F@RJ$<<_Udccs|R0@+iyE*T(<#7 z4_cD^Vor9|zu#{A%i@D?9x-Cjl2>NT&7S?ug>75(S8nOE^`IpOoQnN}o6cSTe$&!h z-qH?Ra>{ot*`=9}d(D})@s=6e4P4Rydwk!+ld6w+WcynOmJL|)@ii^kgYKMP{o*!z z-dc5i|0PGA*peOJa6|QCW#3!RuI<0%hrTV@S>G?Lu6%OeTi+Vfe~FQ8$qt?L?D~Co zTX@UH@rNx@KW)jD_$$e;guhb%DA||)9roF|ci>D9+2X2-1Aly_S{__gxpC+dJr6$Y zuhnNiJE3y#k$+xx_t1~49~^Ug<-BW8>^beB@2g+9ej0B zJL}87xZ=m^vo`HkRjRin=Mt_;{fKHe`fl-RMR(Y%`vz52jyvX%%ubK=Fo!&K`PzlE z&dlES#a5YfzuMA#YQ_AuU#^>z{lkYds}C&em)?KVWoutLza{&RpZl*rsAi|t{TE)o z)q~4!C@7v9*-PfC8e_O&w!^fH4xBF`K@@H?%{LuU8RU5|lS3g1e^W-9Z z-x=q%U3keGD_{KJ>B^g$Te9wJHgC6ia;9(pN4NVn&Rm#zV#5)}QB~6NZBJjA+3kY8 zS8l%Nmeqp~el4+V?ppQjzv6ie*EK7Hwb@<|*y&Rrd9re;d3xs4>z?Q}`?rr(&KrMr z=94=Y^xA5#_R1Z4&d(fw{MA)g?RtOZf*)_pG=8_s`nyhBUfHM5lFa1S&RAc0;quB4 z_F9tZk*!}pzx#^HfxF(C8T#AI`duDhS-JD@#hHoH{p`dSI(%j50$>Eu;rls5P>~3afq&p#f9xwbtA|GxSkYd+ojoXiG1KmL6-m)?lOE?CY~vR=TfQ z91UAr{2m`?`pS>{-KYNLh3c(dNv-^2kIxf}k>2I!lULn$b7ikS8xPdZdF99z8?XoF zzGnR3xxm7jz)r8~a?A1!-CnHTV$pv0U$!uPU)g>yRzLCQ-m3=QT-)oq8})RLrN1@i zf8UaIU)$}o*B0!|K2slGrtWe7OVxLLbg!|`#wXP-NT*Nlq;GTPRlVwWy2sR?mQq#U zv}E1ae)4;H#TNJ8ddhRvyY4w~)dThWFRz&TT=h5Iztdki^O;^B-2K*?PrCdjH5Ptr z_qCfZo8O@?y`qkP{DtZhcUrXa!4a3LPd@cRb!Lk_R$P$Ur+V-QUz^8m*N|HATuat{ z?Z@AhJ&bkpj=s1$b&9n9C8RGx+CX|b){tX1{LQ>%?34!Xv->zST%~OFyjdk2R#x?S1|Ce}8%{TSL09?e!x&JJY@X_4)E;Yz=wL zgR35X{9$z-(#dTeSg{?}jG2KK-t+kC&iB5BHKhBR$&bZcu|DEV&)sj?+Pi?~yMf_< zcU!jhPNd&sc)n=B>T$sHO5l0bGr+U!nqio=^J09Q=|BB#U(I;!h3ZN5N3Gi7;oY=p zFTYTI^hwpLb_1S=oOkfbZKbq@XZN+AJ_c5NwS1ccU#uR!xZA21PJDRzzDS?arDG+- z@63HFSHJK~*(w{)&TGH&-f2;^$CtlUz2Le*#`UZJq^?2w(ax9Yy8+M5UGL{sRs7~A^=qVu>~zPf_krhk*1ct}es1dr#sSa&r@i+8 zi(={4g+UO+fQSO3APQm@$)RVe%bYWUil~^tj9DbiU4e(6>Wtr#qUjwLl+le=ZBA=a*PtmSUkStcR1YMz$!mu-b%dZ{K6i z#kVN61WW-w^F)es?(4|MU0Nx6vHYc2D3g0jK7{+n$9mrnJ0<|%Pe0wd@X7H5ho=EP zHSU1pfD$p){u}nr1%!RbO8Yd(a($4+cyN zWl{&M{Z{s6K8E$p<*>FHlQp({{7Kj42;u&bKfqWhbL}NOetsNT{o_LK!?eu$xZ*C9PnayHm%93)dw|o#C3J>IFZvVUkxzHi z5k+h1YQR_8ji+y*&Gd7?FD6V!J?r9V@>>R%7oxe(43+rz@BVJ*-~HVhk>3Z1`9Epk4c<&1?gXV-f{R?LY zw@yd;HVrBF8*7JCrv}nOnX~V;JX=q2d7EUH!!}<~j*pe&pXrC4#{usB&Y{5N#5!>& zw*n`3`&>Ha@FFafx%&A2g5aCtE37E{I1PCOXgAx^Hkor`9J4{G{jOTOCY6o3yZ=lLLw%+*J1Js*SL zK|CL%!D^qwfDdALK1d_;{SF_K_mbavaQ=Jp;E=pDVURDP;m>#lhi^@f)8N|In>6^Q z(RvNOv?WN1CGcI;Yo-R@yE{UIlapMPSPJKi9BHM&Z+|w@;KEKX<>Yrs&Wi8Gr!;uw zzHANtbS_1MNnQ)?{MO^PXz&f!y&63E9HYeKccV^S*I@Mcn+A8EZ>Z7Xrk%AClWXC0 zCfM`WP)&J#`4lB4Z58RUSc5-34%gtiM;kTx;h-1|jz9WWd;V_cM)Ke7{P*h5wK?-x zzXdbpBOJUKZJ8853!lrn44Nr_Sm(`{Cc-<)Qh#>fgBfz!!%563SjW}LZwi|-Xomd3 zx9QB=ol@-NF`CUto-TjqJde2v`{9M&aAT*unI`{Wyp)Loj%!JD6ShnEGiS&L4I=Wej6G6&#-PRW52lk{#3copHWOc#J?AIJS5(qJXKy7 z5yJ$&m*Sb;`Qq;(Q{{0%u}m|NU-dmCKCpSJd|E{eGY4dMahAAQ#Z>u-JDZrT@cpLS zQpIgs`pJ7YieyfFlH$Gv>%^-9{p7O!>zQ9)rTFQ6EPiysPwwHohB1YB#{K}AXo}f1 z`IiC-^STb^`;A`8^Btzi$8`#2Y9J?L|BN!lA**Tfc@?3|G2r;N=xEG#>Nic^qelpH z=b99|COfk)3#Q2vpDbtY9FgK_sYBU`!>7x~99hUr%9G+b?P(>dIm6CGhnXM zZaQneZid{siyxDmD#e*q0qmr@8FH)nK1@Wi6sMh=%RU}8Q~o;1o2i2P`TPFN3lHi#kUFUncJ>B>u(4NjN<9 z_?E+=5F6=k*S3I*nobA!$X?5dKNaK zbCJaHl^H!OcYAT_I~sbvJJZ|ums@0$_vrS8 z>CBU?R_?|7U!teimorzVS-PLIdydXb4rVSuc_EZf?7V{Uf%7Ba{Jirs85z{`9O{`9 z=*)CEwbG4(`l~wbDAR)9eFeY!eS@NumZ;qnAbb3iR(DfbJ@iW zC6iKB=dT7m^(s{L+y_1N)p`bio^8~>`waAa2>KL(J`HayXAGxXx?cc2i$Tv*Kh50f zk{2j*z)I%DIs|TIa%cxmQy5~VE1!A_LT^!hD;+f3UzFE}P<1n}V#FD)^YC1D~ z(;mtVI20dnL-x|Ism$EfJ1P2}6o2kesd&6qVSs!V|`*X6$~-- zQ8zj${@gKv`B1!(Is(t6wuwxVY`o#i zWNt~Kl0kpNb6*tUhEo~eZ>iLRM}*^Q%x>v5jgcPMP1#?U;%--L*&Svxn0BH2D93ZK zF0R&@{ggJ733JG%-W-wQK!#>t@14ce$qrK2N};`e-{0T&_xJey+l*iGjLOXs^YNXI zzj7>!pCGM^H|^WJnpgDPrWy^bu`D$(^(?k~djp+v99mW#QCaNkeIC7dWWyLQ=umQf z2aDc~ILl0*=T*``?=X7zP*<+KB&1}}t-Z> zHrJ~}7tSBuR8Kx>o~oX_1yzjuMpgYSZQPl>e6}p@W9C`h1I{1kJhbc@=s6MetN=X+&hJoC1$yR#p4oG} zN_60N8&~VfYZr%<3Z= zv90C1PP7rh{#T+v`2F^2xuW}r%ti0K+Kckx_qTY?5WimBS`<=lAsPqYAH7Z?W)00m z``)(}4TJg0ChF_mrM}+kHQiCRPAkz^mlh(2Q&PO>`Dm29%vj{Vp_ynF%>OpMvlJD5 zG!nIK*I2Y<6FjTRB2iv!bJ4uk`l9T4Qat%WGV0@FC|VGwE4n;Fiqn6kq00fyM3HF? zL^aM*Tojdw_MB}hvYnwVGVBQRp}+6%@B90E{QhmmFL@5+o&^c_+;=C$G!W!Rhw{zo z63pCHJuCJb8irkpS1Z>H*j9z$9vN$t-@6_hjQ51DRbm6_DxBYTo${PnmaDMe#C6JY zeJC%0a`HPj;QW2+^E*O4-Ju@Be<1a{LH*>MX!Y-sc}gG9=OE}q%E@^|M}jA*%Sjpk zd`?eNZVmN!RqJ^Zem7J7yN!AJ1n~5{K5-Rxa8l`i>*p%`;DYM9ralYCrS5B$zrEmB z2sVpUUHi}WVR*=8mCkx@61=ay3MX`s;)GvTC!#jyA= zNzo~k?(B5>q&QiWZnu@HeU(lZ+9!*iJlsY-8nTBr)J+!c^x8qa7_g6aOHUHj(rJ`9 zZ9jeZS)!=#s0`|d`2pJEj!d-AeIGS)Sq?2Tl8O9s_EQrV<QC5U!U&8GTXIz%5{ zksyM-ohfI~^B(B=`DYfDFF8by4Nee!oRCSO$+@&md4lLGn?cQ8dVn4pA`^|fyo;(D zxSx(UND_IwY^T2a?xiyxCW*!;Y^64cGU(>rlSL_@egZ^ot|L=DG z-Ohj4{(L&A9qM`|!9yfDME!A5irb7EitdexSC~5w7fsqL#dc{ysB*C>>%CAeIx|s< ze|(EWpNIIeZ$BG}Kh{cczAO#h|G9}RDjy_H-6Fwj=VhXR(R*3>`cN@FP=c?UZbc`1 z%2|1Mytqeq37%~-6X`9v$nM;=RXnY?1cwGb72n?cfUR7yMeN}z!I3+5$oEvdWp}y8 zh*!Hx@cf7E*+SP}tc6vmc+!dG7wc;w}E6 zctFqx_RGd*<)_=j`*v9fF3_)Mod>ii*PlCFoaz^XzbbyP*Zo_So92Q1S<&p(7ms*zVbg)7? zLql=y$bM{1XUp>Ui|x?!z)i$(deB!tovLh=5wIf^4DZOXe$8IEQYhT`hI zALXBFOv`bY4{CWe6c_!xCK44GmoM2KfOfqP#g9&BihC?>ClfX{%Mf-f9intISm&_>KUf;eo@-)_CSJTRUS}f3 z0gdLeX&qgVS;$%O_1?e(Q#iAh!2^+TL4kPW3@JWy`G;bKu{YAn-638b0z9r~5sG17 zXCTYn8^wJiAfI`>t-OW(BILeihWHT3ij%#?-_8Xim)mY)-%yx$3aAnXy;zMNu5K!3 zp}w$BZINWpdUQVIv?!z>yfak}LfiI7Aji`yMg6Q`o~y$ObW>*&$`zlbj6X`S&%w>e zQxT13O>f9}oPhh&Z5O(JKL%Y)XelpDm0-t&NiEhW*>}270;&2@7GDN)|8uK$=X;nuhK|vI8=()pRJZpX&Z}HUTMGt`$@6m zvF_|(zZm42P)c3t3VX?GZDSt}jYfBxdx=V+Jv&UOVtuQlkY&hr(IEo~{(AQZd&DUc zU1^~ZH3@_JbzZmJ*K;Fk{Nb4BSN|}4sjE(TAE)(b*!Lol$M`VZ@5G<1P01Sc{MTO5 zGHDoIZufz;*(XIeBr;KYaTtCzx0-GIB?zJBQqk;hVR&A%OROJWjJzwnM5~bm|5(GY zlV;6E4VL#1Em$wXZdQBPE`6q;!Q*sA)CCEC-7%Vt+cOp^Y*=bWbGRQ{X0d)=15m@= zi>cD#QtTaN&sOQXqE+i^J%+3Sp5(8O3WZlk7Lw%Mx{$8(z_(R_L7Qiw4Z zY$lC#9@bG`F}fv=emGiekH2~Be+=`M0{hIp zcXID<+`Amf{b0n)1Hs{)kO#u?U9Vi1Q~JF~9tfe#Jxh}JR6hO&c_3cFT6wLx2_im5 zkOyLrIuFEN$O9pi315gjL&v~}k9D=Ro$O(){H>OU9r-u|*3Fv-bapyBNtFjeD0A0I zuA7h3Z{KvBYjXvi3tOGf$Ae(qd>-V1IFqKz10j@29i+{;cH?kJl!w#E!&PXI&5c4n z=JGzQIrne#KyWgtkL0G{^y6@|gBuRl23|t#9}F(yW0Lpb0px*bZunOo2)<0}Blm*4 z9~}Pl$<4{}OeH$9bBr?|le`ZDAP>a#rhnyu;LD^w^5OL3a7#NEc^u?^=mohS_;@+I zEBQbkh(D&xFWC+6NVK-1TzpM`VJzJP_~T9fXgAczGZ4 zArFL5=I4Ik;}13O4zImhiB2bPap7YV=+C{kbSa!r=BmsCA(XlH=k(<6%i+s2^Uqmb zK?OHlUHF*ffry4Y5PJ*$$^*fdxq7(%Ok{%l-0JF-T2O^zM|rsLu{q>{SOR$Yrbsh+z%w4bWco}j(kUS7H#^q}gIujnPxb?lBDRfIl zBZp2<Y3Zpo-gf|Y!0Q9ZM+b274ve$Gq;J@Y#yqt7GHGFx-%nN?8UCa{{>`lz1Khx4&v zEBSB}E%{-)WF&nuLSEycCEx3kjN10!EdOJymRw3FqbqhN-sec_aBQ*&r z+UF^}57jXyYm!iOZ@I!mx1PDNC<%EoClyJf>Y1;zl2B3ld4<>ddd3FY^XW5a&)@C* zyPf~0{aKiQK`3*!R(LFLKKO7ftbdKX*1Cv~h4~kRGUr!n!8Y=7&5aH&39$avrt?Ne zJ{IO*5XxK~#Ln_DKL-OJ3-d1sWl|5Zubl1X@V|OKhIu?2HIL^Dy7PR&>qm8rp4u0@ zimqdF)xKa3_<~nzU+{wG3!d|QL4n#AECpX617EO8?F&44zF-{qf&pq@kO{s3t9?Nx zZ|nr{d_gA97i98$!8q^*`D$PAiRTNdz!yls7gT{S_{8%CRp1MDsC~hHo-dfq^95Pp z3tp;yK{xOPGu6IeF8G30YG2SDe8Ek%FE{|c;G5bPbm#ek-|ejOxxd?4^=$KZ>d$K1 zugQZTl!ZQklP&Ds3l!iBY^H82;N$;nUW6r6{PX94FL-c%sv{r&&Wn(Sy}b6~1KHW& zqYeQs0?YtD3b+FBIlz|zUjuv_@I$~)0lxzL9`F~yKLBg>K9Jp@x0hE#SbNq8aAUyD z02=`|25bVjHQ+XYEdjR&Yz^49H@D`WzyAOKG}g-cLlhLaS(#HH{M&Pp|MT@qtd-S7 zY%Q6i!WVmqhgi6%&S^439Fl*HA=i=?oha@#IJ=bKhsREcC#=;G5o~`+8<~&Vslw)V zC}@0hF)2SbHxT7(FI3{*MY(8c@hK(#u>S$-a#atJbN)`);pklyT&kpnz&1>IzO^K8Zzyh zvTducd=kHzyHSb5=|b`H);Sbu!=t0ui1qSkEALVGPD*UG$W~6ud(Unpp5n4liM4j# z5FM%7q{Jj|#s6RZ@K^Jt1zMWFaC~QZbLF1Rr06C*FMhvZsNA=i$ZLV8d9^rIBXVcOB!Qk)J4}_}Z`e8rd}g=DnsyYvk%#FhAXYhei&iVV(Hb{TeyB zy%c{rn5U7KK)mMSrNgXtU`%^2YgxvIMzE%e%4OT0S=GLujVxonu@nzJnarPvW(BuU`^GO$!t5F{b{oHvJCe$cpgT&Y@?0TZfyfu#&zJy zejCzNo^PJ9+xS94#u>Q&*3B7ovW8B-7Mb03e1oW@WJgiYvH(j+YCpKr5Uw;lX3Z!ceK2wZN|{8 zt1=D^pN`vIy-w4%uS!`8kfO54a24J zhfuZDBmL8;_>7-Tpns?MptkK>rn}e2XQZY}vETd}@ypINyMOp6WUPYt$)R1t#LvA? z?LL^6kntXP(sjZ8MK7EW?S8LF$S8vTBph%5-7*=c#Mg22Y{FwKti>J$d)IrxT5LY% z)@5IYz3chwvN@T*Up*h&&F=1)1oM{CYbd~J#UNiQTYCCSGJ_RmZ=y0IXSx=)@H77W-nNUo1AbQZsrUBzggLX^{~`R ze$q{0x~D>lOm4=Od2)?eE%5P282ZS&YlSD#T4~7{&>qe87K)qeJxtEli7+(9F2@6(glPYc1LoJZ5!f5n=m zweD}$G&306WQ?ab8aO8h1)MaU*EbjkTaTh&8GSL0Y}Vh@qbL}E^mU;pjrZJgq$=O^ z^`{U#$F>#C>@YVA_SH7oFft5Zo>q~y6n6HMde(RYZO?~*CzLvUD=8h2!@> zl7)7QlWSh;_iPVq;C9UG-jk0TH}|)1?xW*a@lM8kh2Ja8K#bz_grcW4GUfxwSyq%> zf0%c1*$Wwy5AvuE=j2hwFN=3q%a~%&e{;WXikcPuOP1c2G4p>(@fh!wil7s*B~@2t zj0D;vJT6_K&B#j%DrJlz*o&O}QwsAnPfJ>Zocl(K>9bE29pt*D-dAKyPgpCxcyXN~ z=1=2N34DJ7*bk)Bh%M@3S(7o3cg6txKCheKh>PgRUOC)wylyJB5sScvp(g7JO1RuakI^4@z1%bZh05tnXhoabEdRM$v>UkOGd;q#eYa~V#_%6rrREO(dRfOc`~fe z`Sk&<8r{TWLf?4ip|b>!yGQpC`Fn_?a@f4q23C7HgqZdZbF2IYe)n$W`7V)Ju{F#KGMhIltl#7 zuR{pl;bTr)yL9vz%SJE{Ey8fB=q~Cp>xuho=P2g=S_z)nDHb*E9OK@Qie_HIbF+1@ zDQe>0*xh+$EK^nm`R1p07x$M`x%zL7WBj1MSe$FF@R;vea&|6Aj5x2%iZk3wx2zn?)Cj%T4Poa0tKJA{K@Hey17lbr*c zY(A#coa_hzC!3QAuYk-63HK~CiK%vIb7==1ofTcg#}>ddXdH8=uu{#*7Rn^fPIMyk z`g|;&*4EJucm}m?LY(PIo6{^X9$S z&%8_7y**%$z6Xn!1bd*&=hYr9!5@8_zCh7PdpXJvX&}m2E5(&&omd~6ICLv+jp%JB zDVBMxVCPLwK{~%mL`$y#k24&2js4=#eoq7gcGjI5q(-(Y`N=+a$r6z8UN{{YX@je^s<>n*;|` z?qgf}`=U{OibP*4B=}0)0Tx-Bh)bC7)CE)6v(ho2J!Q4L?ADcdYK}hm+W-Z7@Jn!+ z<$-w0@RbB7S)F7Pr%20|M8{Jn;QC}B$NvwKM`j&m8`K4t^-YVX#@&T zc-c0bJ^ZzG8Ty$(MMgb@nVA)J}vc|D5I+O0k7>{Kh`L= zqEsF%quv$5x+_a(w$LN1G%ZO+#l!P=NsKKU*(|Pf)&Utc`f*-K076*B5i zm=_2$Y0R!YJ*>3jc^Op%&);3#zAJnRx|B}1E~6e)OL5S;+X~+jgVK-pWz-(XjWR}? zQM9~$w}g5sqm14`E|Tp@il#pfmMnQGqZ)yKb(i@le2#>c_`i`+9pHT=sAprvqbjSC z`EO;^Zm_R$z7cZ2J3ES(z<8Yrvz zMv)H}hvaMwXLGo6@_nuju8cc}97!4XT@Lfl<9>@fht$Pkem(ylCjYCi->CdF&Pg#YV3o%4vfLw;B&KViN;2&>ioBH zSiau>YM5&uq8E4XxNG3IC+B;(dnP=Ge_iBU{&zXdFXv=g{T3$+_2jOXUrzQSGXV~&0oi|yhY9EO`B~rYcOPnHa~xE@On&PheZIl|V}yhH z5qv2hTktrjWG^zIOn7?a`9ZiBSs!!vd>Zy2OYe1{kdOBPXK%^-PEMU~s5q!Xndrvt zKgP$tGjHer3Hy(AJAmey%(8K=B|~ycSI&Q=Ba+cP}r}@d*{XiKIS;6 zE`q(tIGNldu1-GoJ#Z=?Exd$o%9|GQG2x&-1`g^A-d<#UncP!S4|nf5Tq(Vre;7D> zc845__&5qUsDpunIsx_~%Lk6LQ0Dg$KK29-YP6bz%Euh%G!^zD<8x3snap=_yi-1& zf9h<$IdD)tXTL4vW5PLgvUuVg!{eaxWv=~+ZhV~j*|}gctknNa>meTC?nG5=-$0P8C#zqZ1Wj|V`1UcuXoOek~p3H$Tfh3y@uL4O|MpW@8N zgmbzAIH)e_y~upu_x2(q{h7H}k<^?lgF1J*4 z%#n}D-eY<^4ysTldhy$h!+hQ;9}79BLYa(6YL=biGgkaH@Oh3&@4WZ#M2z!CL? zeDDOn9F&Y+wcM`6Px>aKhL20-Z=hTOe4bw;o++`GHgHD=G*`Ta^4yXn)Z}XqCB8B# z3F#dktRQi|Wxo>9gZ6%kmw-=YCL$MyB}yC-kcdhSN)#`kJh68on)Ygq65n-7MDk|q zl;179kce9OuT~I!_T&R!<>F$+XTUv-w;@9ZPX*a0jkVZ?o=)$mAiA|R*oV5^Js~IM z-&*Fux|!;-FMyXy86+S3OhooC%8x&T@)m6plX@Ivw~&6}fWO!q(pLu@`CH-da9uii&FK9DSVY={Y0;GK`>Gg`{eMDtC^{Yw5Q|pIux0+0g?S`_U?Xc{1ouk z>BmvnjT~j1`0b}cw93^}+2&&uS*WDbMG>i|!eBSLlu<~L{e=QPZ9~1JT5_^4(E7Ps z(1~7Q%D%_%lmFFN$dMGv94|(AG*4@II2?E~X^XEF^0AO3DU>-|AmmJT)q7Ro20R(N z`U{SHEaXTEWv)JABlwulljLI|M^Y$rKA0T&_+OnfN%lYNqqbcHbGD1%$7F0TB+eI*EyGWnpY}Yry6>8f>@J6-mBIAg&T?BKsi+s00ZM%pLd)2m!*eK3+ zk^Zkx+b*IHv0dH4c9HV$V7pAgb`iVE*)Fp04QIPZJ&tPIMaCd!yU5t#Y!?~JU)8qj z?}UlJ;A|IZ6V7&#{WCe+McR$CU8FrZ+ePdcXS;~a=WG|*SF^v`cHIHY*)DP)->GdE zX>-nYk$O1WMSL7*yNK`DptfDYKFRN&|0P(p|K9&L$1kNOFW(z%mrqbvXFmSVa{RiU zyzjIMY**`AeV}gV*#=^x4_f`jar$^)id!$R z)x`2Q!d&3)KEpI}zdR}4K4pnU?iK^TclIyN)7P0&yz%WW&HwMaL7rhMUn5U~x#1%V zPiW+ts}hX;t2FZ4-4eW{$26DV9)onsx%P5>9EMjr zG}6cwWns8Hsi{T|l!al-Fe8mTdQljj`M#w_UNJfhU;AdNk?%N#VY5_ojU3bl@>Ny0 z)yN*YVL0uwrAF5O5Q?1xtu*qfhoQLt@%9>dQdKA}xY$7>XPybgH7V8_dDGEQtYvGX zk)JW4IAz6Oa%xE^ZXN%Z98nDC!}|ySlK}ItxzI02S>;Z^&?g99YAMAv5PKpxbgBeT zgn3tjpZSI1C(thl-VZqxA|akdaLx8$JPi5=!PNaA?D1TMpVzFwVGz?L<)OEi;Xx3e zCHPF?5`3jng%`vu!rst#NO}H}g;@WD3QrE4j~gjectg}&ybNL^`U!jUbfVOgRI_jdKe3GphN zuw*KJ8LPqtr>5YWis$Ds1KGi+4d>hkUm|OJD4$#?@NB_!Gp7NV)dUN%#R^KJSRb19*EwaGWE; zNs58JBFcf26c2kv@Oejk%;z21U;lb|0ldQ=UTx*TAV441k5t;Gyj2Et$lITQ!+Ry$8_-!_ zhJsBB@8xr~5glNdth7%fXEmZfMO;?mW>59$=*Eq~4u$7MdF#`r@x7Eb=`*WG*OiYa z<-OXq*Q3W}hAYc2u5L*8k|rs!)pK3?>fJ0Q?&7IS8%BdZZoDLy3Qktw$q!_f+C`ed^Jz>ndFAT92aa zR9q`hr+TD2%#IauNxH;m{2TwFpPQxeA0Od9r=1?4@fU?4TU;*E$aP>Vwgwq1+lusq z=@5&lPMn~T2ZGJ$9hR(-KLWSHkUFiAcS7u&o${wf-UoKVJ-e;yTA{xEmtpU!^uIW? zSzrsc=8o6=-()Z3ywP8-k$1z`Y#JS*k$WtZVx96Njl5;F6wj#Jt&y`GVK1z{IU2dS zi4;GIEYZlrAl7#K?oo|={H_FV8F*GB%a21&oU4~L^6YFlKkvD!gUEhpSykA|FGj(t=()zJd@oT?L-s$7HXllzi#e32kyyh3HzdX(LznTq+)iVh-D|_Qu!jvh2p9 zWFuR)_n6jpUa_N5RHsht@IjV&yT*+|zrJ;14{pPG<3dKFrjeal`|JI36~9KHxO<)1 zG^ZVQDODp-gWArlkR!6|LA6J=b48(R*O|fJ}WB{jTAQ(Z-r-rTNRDaZCj~$;4wRK1;iyv(|e-p*A9wDo%a+s01k!pvp35A zSj6;L6(QcR9da7$tVScR9#@q0J|KSO3ml70iD;*!Qq<*UzIb|H;AEO-Lauicw8y(h zJoBXlPjFM9lP7~kBk~`Kzh9PML+{JzYR@-{fX0oH^E?TTyLcassp>ASMY>4WOM)l& zc#W>@8ir1N{vx)15Qd$8{X*Xdj}RT*+y)ih4#VB{>e2P*9N4R^M6d=T3~!&+nD%bd zRXlEKM|5pL7&g&wMh`x-2CdXFK;s{W;%#kP(dl2uiMtHzf+n^I!*BYup_|vXWetz@ zMLJtUvDMeMbalZX@h#oP=-SOteD$^^{bhIv+Hluf{AB?=7us3V{bm@5hu-%VpH+n7 z!|yxNx;vfOlczU|H%~{5 z)mv`l8Hy+7_n=D+*NCF5?lSwwg<|<1J?LZkZ9PmkCNRxG=Egm-2V)Jw@hphzPXK># z5b~Jntp<)1@Mb&uz+CqY3I6#`isgI5@i}PM&5xv5zsYLcMHY%5v^U{z_a@=>O(b}5=q7AG#0T&C z5{7kNZ^Ci+Cg2a%fJa8-u$1xG_i`9E+82!{uk^;nN8x^dipGaldg1E)Fg)5l23u_z zhZT^M>)Fy6EUEIuowkSJ{+TiOR5wqo1Me9Ps$y`b)UlY}1n&snV(?q*u~@M#3@4hz z;>4q4@H}Z49^Nw+o{z_1mk{WWI2Jc_AA|LR!f@2ESnSt)48FEJ3?CaCiw}JpjboOD z;nrTUSo{5GYz>&t?>Y#%IKzSWwHtU})IZ^Rtp)DaM#wQk;vN4H*Q>K#l0yl^_L44) z$rp0Fh`cH8FYXtKLy^8i<|8>w`VgPr^&)GHT`I)(%1l{jA-9X<#U4LMU%w$0@A;8SrOC4CzUc?VZ#EpFPJYa!kMwycmXFS-5?=11 zd#*P`ej^I0Mp3)z(?jgh;|0ak{F)tf{^D_n+FeR%J8YwCZ>&N?t}|5W;$%7^XEXAu zQ&6+cB+zZN(@~7!5$g1y&GhGp0yLxkDCO~M6K$`55>;L}PVIRUK~oc}(EQ~msj9Xc zXzRt*$fLG`Iu^N>ZaV5Mik)zpN^xFIzx!E>0+Y{BJ)9);hrSKyfP%AB#oS<;t<$5! z^UqP*1}kY}stMgJ={!}hw~XHYry+fC@&&5(l*ROcF~)Rxvp*=UMuBuQ+JqjM`3F^B zH=ou&XGZVuevvwW=Fx+HnbRY;U!<&$%%R`jZc9h~xJdn!%%&6PS<>4EU7{MT3ZN$x zw4)2wU7~ho`P0?KR`iMdOVs3M{`9=y_Vk`Bmnh5Kv*^d#9q9f~;r!{d=yRhx&_iEe zqBf71MK74!ffjOfCRA+nSS(rNTnrqZFK}OLu0~TYw?w(D1^ErgmpXM6rFmGpT7i5F zWV2_?tS~UEM_?Q1>w}#a+YVs>7PIcBsj~i>b4+PHN zga?po%%lxUINaL3%T*bb0PnFejhs-#0tfeI7i5&@F)3aWFbriS4RP;zLPoWPch!=j z>8M4XpZl#c86|~xQLEiS=*+AY?wtV6%Vd;ELwL{EEk&u*i`~C108Z9d2_Cjkfr2X- z_u;c;)KBoA14kZ3CsWJae@&NB0rw?%TVLlu;)Y61?jA1vE71j#V2i4NIuz=}VLA9-!i8ZX;&5GORe96;INY|Y>inF5 zcx>J+T*)U&I1!J(0wy*yy=4Msx~tC5nvj6~dZ=)qECIg-OziaTlL^?rr>gwXn*@AX zjgK^y;Z}C4dX9IL;SqpI`^e^))%2la66M~{1xs)Loe)k6bVLaQbnW z&p}$HD=V;r{%ro_fwPc*Gz|K45b%=%E~#?larKdQ|A^$oyAL_?wyXI`L_hL< zPEW2s@;OLn8hJY=L%zojHvNl)d?b<+?<3^Mi&yiLxO%uFS2v%7bU9L$9}fXH>EGic zJ%k*2vWv<^PJ9mGp5}W>! zgbI&l$Vtq%!%WDV=a{d|BhdC*B3d-BLQdkvmn;&|ho$%ABo02=LxzSn`ynTM6_@S_ z$Ypdh1&I+q`x1vG+m8uZQ8G>hVql+!%_K; z14=#TER-Uv5xGhp)Y-8T^fUdCg2c5~`-CB{%zUN(>(_>&-qQ+|`2P72RDZolL1Nqm z4k5_YutbRm7X+h#CZ!4zL$4bdjHoIV9-|YCCeA5SmfyIz3RzT^Dcb=bT7_0MW0ZLG zzE!A22Sy>}&In~8hlZ2+{29Z&j~!DWHvN8Pg0ql2Bb2#5{qJ#UMn$)FNru?8erajG zkUJxkx%x=|=3_p8hL45Z8KF$-A$^)0`S|}fF3nE$xFZ}DZrl-Uq8@jIGh?kDcVt{} zI1p+_)o{8+W7~xN%3YVBGzG;(;_qdiiT$ z+&vxJ)8)T{2V(GtdC^T6cQa3l3jVctAl(q;TtX0fqJi^4dwF@$Fh=2+1adauqkyjg zeg#+yax685TuUY}j@tw72G|8~Z@@zVdja0N*Y3Y`FB_8{y7XEP5NST)>t^7MZ%Db@=FATj;Uv~JQJg0DB5nY!eSK{={ zaC&&vA|)J-&qc{Hx2vmvEc`g11Nd;)`m&!PT587^^u9SG8v1a+qHy=X?!ahIL^5 zJi+|Wi0>g9x$)D*8mxU`rUrk{AFshhyGCfRcFGV9Ue|k+2JbKT)!>P@=4)`+_+Sm@ z>+qk)A>>hT@9X4xxMyLN zzt_^@V)8`e6%eZ*RpF?Ng?mk?M`_h2%KkjdK#Q)rW~s#4BXsDcS+AA-deS>XdTsbA z5&3SNerx)Bt8gXGncJQ=y5OV4JG?v7=*@T~?$D$My)@QQiI<-4Nsqg0N|EnMjg08Y z<1FO_KmBn7EsBd&)<4+tA{t6vRpQFmw^6}>93_4mP>1FpXrMahALO1P_58&L`H!(M zz8t(|wq~66P6mFNLAdg_NWJynU{4OWBO2Kj*2*_Zzod~r!aQv@4evw1y@j}@kUz0)Jnzx_(oajyWm+-`1}8nO!^)F8Iv1V z{8(_G&zF1oq>n?nTeB5+^gZpr{jm^DKH6Dq*J-%D^Sdo5ICQSKu-3HK*er;NN`u6^ zhPvg^;T`C3_a5T%)HuSLpupxxc4Gk zSlg`Z(EJ2O0&DXX{;Swg`%jf_-xAOG0#6LT&r%%SIllDNuQ=xJ2jJVxvXrm9cfG{x zZX9z8?)N*V*7Bj+drQhr$1-O@p0>hFe!An%lEi{&WiANAiKFwXCjFJmrOl@X^timH z9`pOhF^?c;SFaqJc7NB$BQ-pZIk7$jU$|pRZ_6?BD9PT;+;a}Y_H~x%Q$|zwdU+gU ze+l*)K2xo*U)-Q{Cggf)0de|?eKHk(t&WvU|0-jKL;rl={gr%CbN!O2_cCS(#8rem z0-?;0K`J0ldDL(Wz5sE`uMo>zyJ!qH4GF`izQ^FsR%3Al#5+Cr$KauzJ@Ms)Ff4P6 z!5g=Z!?z)Bx_4JJo*(UvZ^At&zrG3P#wX$^xIT-EQMko=UmOeEmE;AHct_)D*berp zN{QHr9s31fpDYRXVAo;iMT_vVE>e8JF%};iI2$jv+JvdD>#+BhRrpGbY7bBHUMCzc zWbANrHvDlU#Qb+c@;_lc{Fp007E7t)uUx#9lmDY!8~m87FxLh@-YSg8l564g<1o>k zA9Ll$VukTmVLX<9-P|>kBR{q`dB`Qocz*yr>tj3m&g~17f8ioJcbN%ocj*+hed}7f zs^lkfGApE>&fiCeIe4O5x|vjNc{%Nw{Fu7z3HuPXD5qyFD;Ief?57saQP8GA5^-ha zeo8cfp&R$VFP`j^Ma3^Hr9(fMqL@)x)I?N5Ps-?wPRC?X=~Ij73k|f|HYJN* zZ9YppGUNnx=Jf_T%PvXuDfc+_HYtK`F8L|G(DoRmw`3C?zReyjuu@Q?v=iy)n_42% zzK5y$9s6h#;|<~ikVC`)a)?}ob)tFdd?F77`9%1!XJMS`ws8cTeg0ml4&+McNbE)O z4be&0?{3)ziBuGfC-f;DeR?p?qZ)FF*xN(gZq7{h_>VPieae!kUt59qlHRv`qS@lI zvL}!W!VC6D8!sv^kMJp*m6Sl;$qB)(-)zcXthOxM?wUwV$`8fC-k;eS$s0>iO%nB> zJMg=@FJ$j$c5pX6kxUr_-}OPiO7ZuiwjK>$CQ%9CKNmdsh934??6D#-p0aBv!Tq9p z&^tXBc-++9Ox^Pi#pVIK3hF=pl`Cb!K}?s&`Fc3m##4J%K`yV!(e%8y-5zgsVkuLH z6}aue>2x>A8V_l+II7FORk-Sy3^}=FyH~G}Q9ZrDpE>MMSeF--oPQyso&ry(@8j8u zrh7M(e1JIZczA9%)6bB*<|vAXL!7r0^iyG+nDk*TrbPO6l{)T3utjqjZmW(#MVwE- zD}cV>R4If=s3JW9Yb?;-;6JJRE?W^H)C*k z2NiZ{8;u89s=gb4DH6-gROQr^4LG2=3Xhq+23JBXk<>plBpi2B#~#UVlW{`Ixeqs% zxN=S=*TRii4)c%Xe0~g#A6p~wG=BU{7+)jb<#ZvK`|x9E{Mec>ekP2s@ndNG*cv~E z#*eKD<7dM78qtTe4|n}sY>gj7S`B1e8Z z@?&WH*qShYCXBCf`fxguI?0D0L*vKRgz+6aL*vKRgz+ekP2s5uNz`j?_!a_%SqoY)u$H6UNts*UDWpKZeGSt?}b& zB!0%1U#R10TnvpIx!4*%hQ^Pr3FBwN_!_xZ()YOg!nGkkhQ`Oj_?b{9_3`HoILwcs z@v$&|CX~7M5+2E%H#cWY=CEfD3rAc1yp-{%`=0AiEzDDrn48sxGFs71SGg|S*~9>E z3hs}{cNc}Xzzbdd(0jmpKR3nwjaB9E4D0E#hFZ!s;zqeubjSq{<$C&|`%-E9nHQBY zw+>4@XeXa73NkmJ@pds94LJr#d>~6dNQudOw&9Sqn(}pDH=(g`4k>3vG9?zq1OKyY z!G&=tq0GglghzgCiXXEQ#-)TZ*FS`DDt>J0_nL3j3=Z^V{QtH;^J5?+KEw5A66fLi zGg*Vn^=A@~<@z)E?mz0!q@3%|B(}r#XA;}t`ZLK1!S!cSC)b}zoSEy-WV~?wnUr(= hnUt5S`}2R<`r!ZMI$(ax=f7edFhAz=uec8Qe*v& -#include "gltf.hpp" -#include "cube.hpp" -namespace cube { -const D3DXVECTOR3 accessor_0[] = { - D3DXVECTOR3(-1.0000000f, -1.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, -1.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, -1.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, -1.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, -1.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, -1.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 1.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 1.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 1.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, -1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, -1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, -1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, -1.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, -1.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, -1.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, -1.0000000f), -}; - -const D3DXVECTOR3 accessor_1[] = { - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), -}; - -const D3DXVECTOR2 accessor_2[] = { - D3DXVECTOR2( 0.3750000f, 0.0000000f), - D3DXVECTOR2( 0.1250000f, 0.2500000f), - D3DXVECTOR2( 0.3750000f, 1.0000000f), - D3DXVECTOR2( 0.6250000f, 0.0000000f), - D3DXVECTOR2( 0.8750000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 1.0000000f), - D3DXVECTOR2( 0.3750000f, 0.7500000f), - D3DXVECTOR2( 0.1250000f, 0.5000000f), - D3DXVECTOR2( 0.3750000f, 0.7500000f), - D3DXVECTOR2( 0.6250000f, 0.7500000f), - D3DXVECTOR2( 0.8750000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.7500000f), - D3DXVECTOR2( 0.3750000f, 0.2500000f), - D3DXVECTOR2( 0.3750000f, 0.2500000f), - D3DXVECTOR2( 0.3750000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.3750000f, 0.5000000f), - D3DXVECTOR2( 0.3750000f, 0.5000000f), - D3DXVECTOR2( 0.3750000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), -}; - -const DWORD accessor_3[] = { - 2, - 5, - 11, - 2, - 11, - 8, - 6, - 9, - 21, - 6, - 21, - 18, - 20, - 23, - 17, - 20, - 17, - 14, - 12, - 15, - 3, - 12, - 3, - 0, - 7, - 19, - 13, - 7, - 13, - 1, - 22, - 10, - 4, - 22, - 4, - 16, -}; - -const Mesh mesh_0 = { - accessor_0, // position - accessor_0__size, - accessor_1, // normal - accessor_1__size, - accessor_2, // texcoord_0 - accessor_2__size, - NULL, // weights_0 - 0, - NULL, // joints_0 - 0, - accessor_3, // indices - accessor_3__size, -}; - -const Node node_0 = { - (DWORD)-1, // parent_ix - NULL, // skin - &mesh_0, // mesh - D3DXVECTOR3( 0.0000000f, 0.0000000f, 0.0000000f), // translation - D3DXQUATERNION( 0.0000000f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node * nodes[] = { - &node_0, -}; - -} +#include "directxmath/directxmath.h" +#include "gltf.hpp" +#include "cube.hpp" +namespace cube { +const XMFLOAT3 accessor_0[] = { + XMFLOAT3(-1.0000000f, -1.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, -1.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, -1.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, -1.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, -1.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, -1.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 1.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 1.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 1.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, -1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, -1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, -1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, -1.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, -1.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, -1.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, -1.0000000f), +}; + +const XMFLOAT3 accessor_1[] = { + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), +}; + +const XMFLOAT2 accessor_2[] = { + XMFLOAT2( 0.3750000f, 0.0000000f), + XMFLOAT2( 0.1250000f, 0.2500000f), + XMFLOAT2( 0.3750000f, 1.0000000f), + XMFLOAT2( 0.6250000f, 0.0000000f), + XMFLOAT2( 0.8750000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 1.0000000f), + XMFLOAT2( 0.3750000f, 0.7500000f), + XMFLOAT2( 0.1250000f, 0.5000000f), + XMFLOAT2( 0.3750000f, 0.7500000f), + XMFLOAT2( 0.6250000f, 0.7500000f), + XMFLOAT2( 0.8750000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.7500000f), + XMFLOAT2( 0.3750000f, 0.2500000f), + XMFLOAT2( 0.3750000f, 0.2500000f), + XMFLOAT2( 0.3750000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.3750000f, 0.5000000f), + XMFLOAT2( 0.3750000f, 0.5000000f), + XMFLOAT2( 0.3750000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.5000000f), +}; + +const int accessor_3[] = { + 2, + 5, + 11, + 2, + 11, + 8, + 6, + 9, + 21, + 6, + 21, + 18, + 20, + 23, + 17, + 20, + 17, + 14, + 12, + 15, + 3, + 12, + 3, + 0, + 7, + 19, + 13, + 7, + 13, + 1, + 22, + 10, + 4, + 22, + 4, + 16, +}; + +const Mesh mesh_0 = { + accessor_0, // position + accessor_0__size, + accessor_1, // normal + accessor_1__size, + accessor_2, // texcoord_0 + accessor_2__size, + NULL, // weights_0 + 0, + NULL, // joints_0 + 0, + accessor_3, // indices + accessor_3__size, +}; + +const Node node_0 = { + (int)-1, // parent_ix + NULL, // skin + &mesh_0, // mesh + XMFLOAT3( 0.0000000f, 0.0000000f, 0.0000000f), // translation + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node * nodes[] = { + &node_0, +}; + +} diff --git a/src/main.cpp b/src/main.cpp index bb7edf3..32b1253 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1,9 +1,10 @@ #include #include -#include #include #include +#include "directxmath/directxmath.h" + #include "globals.hpp" #include "print.hpp" #include "render_state.hpp" @@ -12,7 +13,7 @@ #include "gltf_instance.hpp" #include "robot_player.hpp" -#define ROOT_MESH_NODE node_39 +#define ROOT_MESH_NODE robot_player::node_39 #include "cube.hpp" HINSTANCE g_hInstance = NULL; @@ -40,10 +41,10 @@ ID3D10EffectMatrixVariable * g_pJointVariable = NULL; ID3D10EffectVectorVariable * g_pLightDirVariable = NULL; ID3D10EffectVectorVariable * g_pLightColorVariable = NULL; ID3D10EffectShaderResourceVariable * g_pDiffuseVariable = NULL; -D3DXMATRIX g_World1; -D3DXMATRIX g_World2; -D3DXMATRIX g_View; -D3DXMATRIX g_Projection; +XMMATRIX g_World1; +XMMATRIX g_World2; +XMMATRIX g_View; +XMMATRIX g_Projection; // bloom ID3D10RenderTargetView * g_pRenderTargetViewTexture[2] = { NULL, NULL }; @@ -67,7 +68,7 @@ int g_bloomPasses = 4; float g_exposure = 3.4f; #endif -typedef D3DXVECTOR2 BloomVertex; +typedef XMFLOAT2 BloomVertex; // font ID3D10Effect * g_pEffectFont = NULL; @@ -82,7 +83,7 @@ ID3D10EffectVectorVariable * g_pTexScaleVariableFont = NULL; ID3D10EffectShaderResourceVariable * g_pDiffuseVariableFont = NULL; const int g_iFontBufferLength = 512; -typedef D3DXVECTOR4 FontVertex; +typedef XMFLOAT4 FontVertex; // perlin ID3D10Effect * g_pEffectVolume = NULL; @@ -112,10 +113,10 @@ ID3D10Buffer * g_pVertexBufferCube[g_dwVertexBufferCountCube]; ID3D10Buffer * g_pIndexBufferCube = NULL; // lights -D3DXVECTOR4 g_vLightDirs[2]; -D3DXVECTOR4 g_vLightColors[2] = { - D3DXVECTOR4(0.0f, 0.9f, 0.9f, 1.0f), - D3DXVECTOR4(0.9f, 0.0f, 0.0f, 1.0f) +XMFLOAT4 g_vLightDirs[2]; +XMFLOAT4 g_vLightColors[2] = { + XMFLOAT4(0.0f, 0.9f, 0.9f, 1.0f), + XMFLOAT4(0.9f, 0.0f, 0.0f, 1.0f) }; // forward declarations @@ -608,16 +609,16 @@ HRESULT InitVolumeBuffers() D3D10_BUFFER_DESC bd; D3D10_SUBRESOURCE_DATA initData; - const D3DXVECTOR2 position[] = { - D3DXVECTOR2(-1, 1), - D3DXVECTOR2( 1, 1), - D3DXVECTOR2(-1, -1), - D3DXVECTOR2( 1, -1), + const XMFLOAT2 position[] = { + XMFLOAT2(-1, 1), + XMFLOAT2( 1, 1), + XMFLOAT2(-1, -1), + XMFLOAT2( 1, -1), }; // position bd.Usage = D3D10_USAGE_DEFAULT; - bd.ByteWidth = (sizeof (D3DXVECTOR2)) * 4; + bd.ByteWidth = (sizeof (XMFLOAT2)) * 4; bd.BindFlags = D3D10_BIND_VERTEX_BUFFER; bd.CPUAccessFlags = 0; bd.MiscFlags = 0; @@ -632,11 +633,11 @@ HRESULT InitVolumeBuffers() } // +Y is up -D3DXVECTOR2 vtxBloom[] = { - D3DXVECTOR2(-1, -1), // top left - D3DXVECTOR2(-1, 1), // top right - D3DXVECTOR2( 1, -1), // bottom left - D3DXVECTOR2( 1, 1), // bottom right +XMFLOAT2 vtxBloom[] = { + XMFLOAT2(-1, -1), // top left + XMFLOAT2(-1, 1), // top right + XMFLOAT2( 1, -1), // bottom left + XMFLOAT2( 1, 1), // bottom right }; HRESULT InitBloomBuffers() @@ -1038,23 +1039,22 @@ HRESULT InitDirect3DDevice() // transform matrices ////////////////////////////////////////////////////////////////////// - D3DXMatrixIdentity(&g_World1); - D3DXMatrixIdentity(&g_World2); + g_World1 = XMMatrixIdentity(); + g_World2 = XMMatrixIdentity(); - D3DXVECTOR3 Eye(0.0f, 1.0f, -2.0f); - D3DXVECTOR3 At(0.0f, 1.0f, 0.0f); - D3DXVECTOR3 Up(0.0f, 1.0f, 0.0f); - D3DXMatrixLookAtLH(&g_View, &Eye, &At, &Up); + XMVECTOR Eye = XMVectorSet(0.0f, 1.0f, -2.0f, 0.0f); + XMVECTOR At = XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f); + XMVECTOR Up = XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f); + g_View = XMMatrixLookAtLH(Eye, At, Up); - float fFov = (float)D3DX_PI * 0.5f; + float fFov = XM_PI * 0.5f; float fAspect = width / (float)height; float fNear = 0.1f; float fFar = 100.0f; - D3DXMatrixPerspectiveFovLH(&g_Projection, - fFov, - fAspect, - fNear, - fFar); + g_Projection = XMMatrixPerspectiveFovLH(fFov, + fAspect, + fNear, + fFar); return S_OK; } @@ -1086,31 +1086,27 @@ BOOL Resize() InitDirect3DViews(); - float fFov = (float)D3DX_PI * 0.5f; + float fFov = XM_PI * 0.5f; float fAspect = width / (float)height; float fNear = 0.1f; float fFar = 100.0f; - D3DXMatrixPerspectiveFovLH(&g_Projection, - fFov, - fAspect, - fNear, - fFar); + g_Projection = XMMatrixPerspectiveFovLH(fFov, + fAspect, + fNear, + fFar); return true; } -static inline D3DXMATRIX MatrixTRS(const D3DXVECTOR3 * translation, - const D3DXQUATERNION * rotation, - const D3DXVECTOR3 * scaling) +static inline XMMATRIX MatrixTRS(FXMVECTOR translation, + FXMVECTOR rotation, + FXMVECTOR scaling) { - D3DXMATRIX mTranslation; - D3DXMatrixTranslation(&mTranslation, translation->x, translation->y, translation->z); + XMMATRIX mTranslation = XMMatrixTranslationFromVector(translation); - D3DXMATRIX mRotation; - D3DXMatrixRotationQuaternion(&mRotation, rotation); + XMMATRIX mRotation = XMMatrixRotationQuaternion(rotation); - D3DXMATRIX mScaling; - D3DXMatrixScaling(&mScaling, scaling->x, scaling->y, scaling->z); + XMMATRIX mScaling = XMMatrixScalingFromVector(scaling); //return mScaling * mRotation * mTranslation; return mScaling * mRotation * mTranslation; @@ -1141,34 +1137,27 @@ static inline float Lerp(const float * frames, float t, int frame_ix) return (t - frames[frame_ix]) / (frames[frame_ix + 1] - frames[frame_ix]); } -const int joints_length = skin_0__joints__length; -D3DXMATRIX mJoints[joints_length]; -NodeInstance node_inst[nodes__length]; +const int joints_length = robot_player::skin_0__joints__length; +XMMATRIX mJoints[joints_length]; +NodeInstance node_inst[robot_player::nodes__length]; void InitializeNodeInstances() { - for (int i = 0; i < nodes__length; i++) { - node_inst[i].translation = nodes[i]->translation; - node_inst[i].rotation = nodes[i]->rotation; - node_inst[i].scale = nodes[i]->scale; + const Node ** nodes = robot_player::nodes; + for (int i = 0; i < robot_player::nodes__length; i++) { + node_inst[i].translation = XMLoadFloat3((const XMFLOAT3 *)&nodes[i]->translation); + node_inst[i].rotation = XMLoadFloat4((const XMFLOAT4 *)&nodes[i]->rotation); + node_inst[i].scale = XMLoadFloat3((const XMFLOAT3 *)&nodes[i]->scale); } } -void VectorLerp(D3DXVECTOR3 * output, - const D3DXVECTOR3 * a, - const D3DXVECTOR3 * b, - const float t) -{ - *output = *a + t * (*b - *a); -} - -D3DXMATRIX GlobalTransform(int node_ix) +XMMATRIX GlobalTransform(int node_ix) { const NodeInstance * instance = &node_inst[node_ix]; - const Node * node = nodes[node_ix]; - D3DXMATRIX local_transform = MatrixTRS(&instance->translation, - &instance->rotation, - &instance->scale); + const Node * node = robot_player::nodes[node_ix]; + XMMATRIX local_transform = MatrixTRS(XMLoadFloat3((const XMFLOAT3*)&instance->translation), + XMLoadFloat4((const XMFLOAT4*)&instance->rotation), + XMLoadFloat3((const XMFLOAT3*)&instance->scale)); if (((int)node->parent_ix) != 40) { return local_transform * GlobalTransform(node->parent_ix); } else { @@ -1178,8 +1167,8 @@ D3DXMATRIX GlobalTransform(int node_ix) void Animate(float t) { - const AnimationChannel * channels = animation_1__channels; - const int channels_length = animation_1__channels__length; + const AnimationChannel * channels = robot_player::animation_1__channels; + const int channels_length = robot_player::animation_1__channels__length; t = loop(t, 3.75); @@ -1197,29 +1186,26 @@ void Animate(float t) switch (channels[i].target.path) { case ACP__TRANSLATION: { - const D3DXVECTOR3 * output = (const D3DXVECTOR3 *)sampler->output; - VectorLerp(&instance->translation, - &output[frame_ix], - &output[frame_ix+1], - lerp); + const XMFLOAT3 * output = (const XMFLOAT3 *)sampler->output; + instance->translation = XMVectorLerp(XMLoadFloat3(&output[frame_ix]), + XMLoadFloat3(&output[frame_ix+1]), + lerp); break; } case ACP__ROTATION: { - const D3DXQUATERNION * output = (const D3DXQUATERNION *)sampler->output; - D3DXQuaternionSlerp(&instance->rotation, - &output[frame_ix], - &output[frame_ix+1], - lerp); + const XMFLOAT4 * output = (const XMFLOAT4 *)sampler->output; + instance->rotation = XMQuaternionSlerp(XMLoadFloat4(&output[frame_ix]), + XMLoadFloat4(&output[frame_ix+1]), + lerp); break; } case ACP__SCALE: { - const D3DXVECTOR3 * output = (const D3DXVECTOR3 *)sampler->output; - VectorLerp(&instance->scale, - &output[frame_ix], - &output[frame_ix+1], - lerp); + const XMFLOAT3 * output = (const XMFLOAT3 *)sampler->output; + instance->scale = XMVectorLerp(XMLoadFloat3(&output[frame_ix]), + XMLoadFloat3(&output[frame_ix+1]), + lerp); break; } default: @@ -1230,11 +1216,11 @@ void Animate(float t) // transform all joints const Skin * skin = ROOT_MESH_NODE.skin; - for (DWORD i = 0; i < skin->joints_length; i++) { + for (int i = 0; i < skin->joints_length; i++) { const int joint_ix = skin->joints[i]; assert(joint_ix >= 0); - const D3DXMATRIX& inverse_bind_matrix = skin->inverse_bind_matrices[i]; + const XMMATRIX inverse_bind_matrix = XMMATRIX((const float*)&skin->inverse_bind_matrices[i]); mJoints[i] = inverse_bind_matrix * GlobalTransform(joint_ix); } } @@ -1242,17 +1228,13 @@ void Animate(float t) void RenderModel(float t) { for (int i = 0; i < joints_length; i++) { - D3DXMatrixIdentity(&mJoints[i]); + mJoints[i] = XMMatrixIdentity(); } Animate(t); - D3DXMATRIX rx; - D3DXMATRIX ry; - D3DXMatrixRotationY(&ry, (float)D3DX_PI * -1.0f + t); - D3DXMatrixRotationX(&rx, (float)D3DX_PI * -0.0f); - D3DXMatrixMultiply(&g_World1, - &rx, - &ry); + XMMATRIX rx = XMMatrixRotationX(XM_PI * -0.0f); + XMMATRIX ry = XMMatrixRotationY(XM_PI * -1.0f + t); + g_World1 = XMMatrixMultiply(rx, ry); // matrices g_pViewVariable->SetMatrix((float *)&g_View); @@ -1313,20 +1295,18 @@ void RenderMeshStatic(const Mesh * mesh, float t) int indices_length = mesh->indices_size / (sizeof (DWORD)); for (int m = 0; m < 2; m++) { - D3DXMATRIX mLight; - D3DXMATRIX mLightScale; - D3DXVECTOR3 vDir = D3DXVECTOR3(g_vLightDirs[m]); - D3DXVECTOR3 vLightPos = vDir * (1.25f * (m + 1)); - D3DXMATRIX mLightRotate; - D3DXMatrixRotationX(&mLightRotate, t * (1 + -2 * m)); - D3DXMatrixTranslation(&mLight, vLightPos.x, vLightPos.y, vLightPos.z); - D3DXMatrixScaling(&mLightScale, 0.05f, 0.05f, 0.05f); - mLight = mLightRotate * mLightScale * mLight; + XMVECTOR vDir = XMLoadFloat4(&g_vLightDirs[m]); + XMVECTOR vLightPos = vDir * (1.25f * (m + 1)); + + XMMATRIX mLightRotate = XMMatrixRotationX(t * (1 + -2 * m)); + XMMATRIX mLightTranslation = XMMatrixTranslationFromVector(vLightPos); + XMMATRIX mLightScale = XMMatrixScaling(0.05f, 0.05f, 0.05f); + + XMMATRIX mLight = mLightRotate * mLightScale * mLightTranslation; g_pWorldVariableStatic->SetMatrix((float *)&mLight); - D3DXMATRIX mLightNormal; - D3DXMatrixTranspose(&mLightNormal, D3DXMatrixInverse(&mLightNormal, NULL, &mLight)); + XMMATRIX mLightNormal = XMMatrixTranspose(XMMatrixInverse(NULL, mLight)); g_pWorldNormalVariableStatic->SetMatrix((float *)&mLightNormal); g_pOutputColorVariableStatic->SetFloatVector((float *)&g_vLightColors[m]); @@ -1412,16 +1392,14 @@ void RenderFont() // effect variables ////////////////////////////////////////////////////////////////////// - D3DXVECTOR2 invScreenSize = D3DXVECTOR2(2.0f / (float)g_ViewportSize.Width, - 2.0f / (float)g_ViewportSize.Height); + XMFLOAT2 invScreenSize = XMFLOAT2(2.0f / (float)g_ViewportSize.Width, + 2.0f / (float)g_ViewportSize.Height); - D3DXVECTOR2 position = D3DXVECTOR2(6, 0); - D3DXVECTOR2 glyphScale = D3DXVECTOR2((float)g_FontSize.Glyph.Width, - (float)g_FontSize.Glyph.Height); + XMFLOAT2 glyphScale = XMFLOAT2((float)g_FontSize.Glyph.Width, + (float)g_FontSize.Glyph.Height); - D3DXVECTOR2 charCoord = D3DXVECTOR2(16, 0); - D3DXVECTOR2 texScale = D3DXVECTOR2(glyphScale.x / (float)g_FontSize.Texture.Width, - glyphScale.y / (float)g_FontSize.Texture.Height); + XMFLOAT2 texScale = XMFLOAT2(glyphScale.x / (float)g_FontSize.Texture.Width, + glyphScale.y / (float)g_FontSize.Texture.Height); g_pInvScreenSizeVariableFont->SetFloatVector((float *)&invScreenSize); g_pGlyphScaleVariableFont->SetFloatVector((float *)&glyphScale); @@ -1453,8 +1431,8 @@ void RenderBloom() // effect variables ////////////////////////////////////////////////////////////////////// - D3DXVECTOR2 invScreenSize = D3DXVECTOR2(1.0f / (float)g_ViewportSize.Width, - 1.0f / (float)g_ViewportSize.Height); + XMFLOAT2 invScreenSize = XMFLOAT2(1.0f / (float)g_ViewportSize.Width, + 1.0f / (float)g_ViewportSize.Height); g_pInvScreenSizeVariableBloom->SetFloatVector((float *)&invScreenSize); @@ -1473,11 +1451,11 @@ void RenderBloom() g_pExposureVariableBloom->SetFloat(g_exposure); - D3DXVECTOR2 dirHorizontal = D3DXVECTOR2(1.0, 0.0); - D3DXVECTOR2 dirVertical = D3DXVECTOR2(0.0, 1.0); + XMFLOAT2 dirHorizontal = XMFLOAT2(1.0, 0.0); + XMFLOAT2 dirVertical = XMFLOAT2(0.0, 1.0); // horizontal - g_pDirVariableBloom->SetFloatVector((float *)dirHorizontal); + g_pDirVariableBloom->SetFloatVector((float *)&dirHorizontal); g_pDiffuseAVariableBloom->SetResource(g_pRenderTargetShaderResourceViewTexture[0]); g_pd3dDevice->OMSetRenderTargets(1, &g_pRenderTargetViewTexture[1], NULL); for (UINT p = 0; p < techDesc.Passes; p++) { @@ -1492,7 +1470,7 @@ void RenderBloom() g_pd3dDevice->PSSetShaderResources(0, 1, srv); // vertical - g_pDirVariableBloom->SetFloatVector((float *)dirVertical); + g_pDirVariableBloom->SetFloatVector((float *)&dirVertical); g_pDiffuseAVariableBloom->SetResource(g_pRenderTargetShaderResourceViewTexture[1]); g_pd3dDevice->OMSetRenderTargets(1, &g_pRenderTargetViewTexture[0], NULL); for (UINT p = 0; p < techDesc.Passes; p++) { @@ -1503,7 +1481,7 @@ void RenderBloom() g_pd3dDevice->PSSetShaderResources(0, 1, srv); // horizontal - g_pDirVariableBloom->SetFloatVector((float *)dirHorizontal); + g_pDirVariableBloom->SetFloatVector((float *)&dirHorizontal); g_pDiffuseAVariableBloom->SetResource(g_pRenderTargetShaderResourceViewTexture[0]); g_pd3dDevice->OMSetRenderTargets(1, &g_pRenderTargetViewTexture[1], NULL); for (UINT p = 0; p < techDesc.Passes; p++) { @@ -1516,7 +1494,7 @@ void RenderBloom() g_pTechniqueBloomBlend->GetDesc(&techDescBlend); // vertical - g_pDirVariableBloom->SetFloatVector((float *)dirVertical); + g_pDirVariableBloom->SetFloatVector((float *)&dirVertical); g_pDiffuseAVariableBloom->SetResource(g_pRenderTargetShaderResourceViewTexture[1]); g_pd3dDevice->OMSetRenderTargets(1, &g_pRenderTargetView, NULL); @@ -1528,27 +1506,25 @@ void RenderBloom() void Update(float t) { - D3DXVECTOR4 vLightDirs[2] = { - D3DXVECTOR4(-0.577f, 0.577f, 0.0, 1.0f), - D3DXVECTOR4(1.0f, 1.5f, 0.0f, 1.0f), + XMVECTOR vLightDirs[2] = { + {-0.577f, 0.577f, 0.0, 1.0}, + {1.0f, 1.5f, 0.0f, 1.0}, }; - D3DXVec4Normalize(&vLightDirs[0], &vLightDirs[0]); - D3DXVec4Normalize(&vLightDirs[1], &vLightDirs[1]); - D3DXMATRIX mRotate; - D3DXVECTOR4 vOutDir; - D3DXMatrixRotationY(&mRotate, -1.0f * t); - D3DXVec3Transform(&g_vLightDirs[1], (D3DXVECTOR3 *)&vLightDirs[1], &mRotate); + XMMATRIX mRotate1 = XMMatrixRotationY(-1.0f * t); + XMVECTOR lightDir1 = XMVector4Transform(vLightDirs[1], mRotate1); + XMStoreFloat4(&g_vLightDirs[1], lightDir1); - D3DXMatrixRotationY(&mRotate, 0.4f * t); - D3DXVec3Transform(&g_vLightDirs[0], (D3DXVECTOR3 *)&vLightDirs[0], &mRotate); + XMMATRIX mRotate0 = XMMatrixRotationY(0.4f * t); + XMVECTOR lightDir0 = XMVector4Transform(vLightDirs[0], mRotate0); + XMStoreFloat4(&g_vLightDirs[0], lightDir0); } void RenderVolume(float t) { UINT stride[] = { - (sizeof (D3DXVECTOR2)), + (sizeof (XMFLOAT2)), }; UINT offset[] = { 0 }; g_pd3dDevice->IASetInputLayout(g_pVertexLayoutVolume); @@ -1590,16 +1566,14 @@ void RenderVolumeMesh() D3D10_TECHNIQUE_DESC techDesc; g_pTechniqueStatic->GetDesc(&techDesc); - D3DXMATRIX mWorldScale; - D3DXMATRIX mWorldTranslate; - D3DXMatrixScaling(&mWorldScale, 0.2f, 0.2f, 0.2f); - D3DXMatrixTranslation(&mWorldTranslate, 0.5f, 0.5f, 0.5f); - D3DXMATRIX mWorld = mWorldScale * mWorldTranslate; + XMMATRIX mWorldScale = XMMatrixScaling(0.2f, 0.2f, 0.2f); + XMMATRIX mWorldTranslate = XMMatrixTranslation(0.5f, 0.5f, 0.5f); + XMMATRIX mWorld = mWorldScale * mWorldTranslate; g_pWorldVariableStatic->SetMatrix((float *)&mWorld); - D3DXMatrixIdentity(&mWorld); - g_pWorldNormalVariableStatic->SetMatrix((float *)&mWorld); - D3DXVECTOR4 vColor = D3DXVECTOR4(0.0f, 0.9f, 0.0f, 1.0f) ; + XMMATRIX mWorldNormal = XMMatrixIdentity(); + g_pWorldNormalVariableStatic->SetMatrix((float *)&mWorldNormal); + XMVECTOR vColor = XMVectorSet(0.0f, 0.9f, 0.0f, 1.0f); g_pOutputColorVariableStatic->SetFloatVector((float *)&vColor); for (UINT p = 0; p < techDesc.Passes; p++) { @@ -1612,7 +1586,7 @@ void Render() { static float t = 0.0f; #ifdef _DEBUG - t += (float)D3DX_PI * 0.0125f * 0.5; + t += XM_PI * 0.0125f * 0.5f; #else static DWORD dwTimeStart = 0; DWORD dwTimeCur = GetTickCount(); @@ -1646,7 +1620,7 @@ void Render() //RenderBloom(); //print("%f\n", t); //RenderVolume(t); - RenderVolumeMesh(); + //RenderVolumeMesh(); // present g_pSwapChain->Present(0, 0); diff --git a/src/render_state.cpp b/src/render_state.cpp index 543977f..53d75f2 100644 --- a/src/render_state.cpp +++ b/src/render_state.cpp @@ -2,7 +2,6 @@ #include #include -#include #include "globals.hpp" #include "print.hpp" diff --git a/src/robot_player.cpp b/src/robot_player.cpp index 81d2b46..e7824b7 100644 --- a/src/robot_player.cpp +++ b/src/robot_player.cpp @@ -1,18127 +1,18130 @@ -#include -#include "gltf.hpp" -#include "robot_player.hpp" -const D3DXVECTOR3 accessor_0[] = { - D3DXVECTOR3( 0.2478682f, 1.9170125f, -0.1466553f), - D3DXVECTOR3( 0.2478682f, 1.4493124f, -0.0112957f), - D3DXVECTOR3( 0.2478682f, 1.8967085f, -0.2168102f), - D3DXVECTOR3( 0.2478682f, 1.4290085f, -0.0814507f), - D3DXVECTOR3( 0.2434468f, 1.7109127f, -0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.7109127f, -0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.7109127f, -0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.2240191f, -0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.2240191f, -0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.2240191f, -0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.7109127f, 0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.7109127f, 0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.7109127f, 0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.2240191f, 0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.2240191f, 0.2434468f), - D3DXVECTOR3( 0.2434468f, 1.2240191f, 0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.7109127f, -0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.7109127f, -0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.7109127f, -0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.2240191f, -0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.2240191f, -0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.2240191f, -0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.7109127f, 0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.7109127f, 0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.7109127f, 0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.2240191f, 0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.2240191f, 0.2434468f), - D3DXVECTOR3(-0.2434468f, 1.2240191f, 0.2434468f), - D3DXVECTOR3( 0.1708783f, 0.7860264f, -0.1460781f), - D3DXVECTOR3( 0.1708783f, 0.7860264f, -0.1460781f), - D3DXVECTOR3( 0.1708783f, 0.7860264f, -0.1460781f), - D3DXVECTOR3( 0.1708938f, 0.6400828f, -0.1464314f), - D3DXVECTOR3( 0.1708938f, 0.6400828f, -0.1464314f), - D3DXVECTOR3( 0.1708938f, 0.6400828f, -0.1464314f), - D3DXVECTOR3( 0.1709236f, 0.7858657f, 0.1457004f), - D3DXVECTOR3( 0.1709236f, 0.7858657f, 0.1457004f), - D3DXVECTOR3( 0.1709236f, 0.7858657f, 0.1457004f), - D3DXVECTOR3( 0.1709363f, 0.6402411f, 0.1461338f), - D3DXVECTOR3( 0.1709363f, 0.6402411f, 0.1461338f), - D3DXVECTOR3( 0.1709363f, 0.6402411f, 0.1461338f), - D3DXVECTOR3(-0.1700494f, 0.7855206f, -0.1460781f), - D3DXVECTOR3(-0.1700494f, 0.7855206f, -0.1460781f), - D3DXVECTOR3(-0.1700494f, 0.7855206f, -0.1460781f), - D3DXVECTOR3(-0.1700649f, 0.6395770f, -0.1464314f), - D3DXVECTOR3(-0.1700649f, 0.6395770f, -0.1464314f), - D3DXVECTOR3(-0.1700649f, 0.6395770f, -0.1464314f), - D3DXVECTOR3(-0.1700947f, 0.7853599f, 0.1457004f), - D3DXVECTOR3(-0.1700947f, 0.7853599f, 0.1457004f), - D3DXVECTOR3(-0.1700947f, 0.7853599f, 0.1457004f), - D3DXVECTOR3(-0.1701074f, 0.6397355f, 0.1461338f), - D3DXVECTOR3(-0.1701074f, 0.6397355f, 0.1461338f), - D3DXVECTOR3(-0.1701074f, 0.6397355f, 0.1461338f), - D3DXVECTOR3( 0.1492691f, 0.6476287f, -0.0631194f), - D3DXVECTOR3( 0.1492691f, 0.6476287f, -0.0631194f), - D3DXVECTOR3( 0.1492691f, 0.6476287f, -0.0631194f), - D3DXVECTOR3( 0.1492691f, 0.3873219f, -0.0631194f), - D3DXVECTOR3( 0.1492691f, 0.3873219f, -0.0631194f), - D3DXVECTOR3( 0.1492691f, 0.3873219f, -0.0631194f), - D3DXVECTOR3( 0.1492691f, 0.6455109f, 0.0321217f), - D3DXVECTOR3( 0.1492691f, 0.6455109f, 0.0321217f), - D3DXVECTOR3( 0.1492691f, 0.6455109f, 0.0321217f), - D3DXVECTOR3( 0.1492691f, 0.3852040f, 0.0321217f), - D3DXVECTOR3( 0.1492691f, 0.3852040f, 0.0321217f), - D3DXVECTOR3( 0.1492691f, 0.3852040f, 0.0321217f), - D3DXVECTOR3( 0.0571711f, 0.6476287f, -0.0631194f), - D3DXVECTOR3( 0.0571711f, 0.6476287f, -0.0631194f), - D3DXVECTOR3( 0.0571711f, 0.6476287f, -0.0631194f), - D3DXVECTOR3( 0.0571711f, 0.3873219f, -0.0631194f), - D3DXVECTOR3( 0.0571711f, 0.3873219f, -0.0631194f), - D3DXVECTOR3( 0.0571711f, 0.3873219f, -0.0631194f), - D3DXVECTOR3( 0.0571711f, 0.6455109f, 0.0321217f), - D3DXVECTOR3( 0.0571711f, 0.6455109f, 0.0321217f), - D3DXVECTOR3( 0.0571711f, 0.6455109f, 0.0321217f), - D3DXVECTOR3( 0.0571711f, 0.3852040f, 0.0321217f), - D3DXVECTOR3( 0.0571711f, 0.3852040f, 0.0321217f), - D3DXVECTOR3( 0.0571711f, 0.3852040f, 0.0321217f), - D3DXVECTOR3(-0.1492691f, 0.6476287f, -0.0631194f), - D3DXVECTOR3(-0.1492691f, 0.6476287f, -0.0631194f), - D3DXVECTOR3(-0.1492691f, 0.6476287f, -0.0631194f), - D3DXVECTOR3(-0.1492691f, 0.3873219f, -0.0631194f), - D3DXVECTOR3(-0.1492691f, 0.3873219f, -0.0631194f), - D3DXVECTOR3(-0.1492691f, 0.3873219f, -0.0631194f), - D3DXVECTOR3(-0.1492691f, 0.6455109f, 0.0321217f), - D3DXVECTOR3(-0.1492691f, 0.6455109f, 0.0321217f), - D3DXVECTOR3(-0.1492691f, 0.6455109f, 0.0321217f), - D3DXVECTOR3(-0.1492691f, 0.3852040f, 0.0321217f), - D3DXVECTOR3(-0.1492691f, 0.3852040f, 0.0321217f), - D3DXVECTOR3(-0.1492691f, 0.3852040f, 0.0321217f), - D3DXVECTOR3(-0.0571711f, 0.6476287f, -0.0631194f), - D3DXVECTOR3(-0.0571711f, 0.6476287f, -0.0631194f), - D3DXVECTOR3(-0.0571711f, 0.6476287f, -0.0631194f), - D3DXVECTOR3(-0.0571711f, 0.3873219f, -0.0631194f), - D3DXVECTOR3(-0.0571711f, 0.3873219f, -0.0631194f), - D3DXVECTOR3(-0.0571711f, 0.3873219f, -0.0631194f), - D3DXVECTOR3(-0.0571711f, 0.6455109f, 0.0321217f), - D3DXVECTOR3(-0.0571711f, 0.6455109f, 0.0321217f), - D3DXVECTOR3(-0.0571711f, 0.6455109f, 0.0321217f), - D3DXVECTOR3(-0.0571711f, 0.3852040f, 0.0321217f), - D3DXVECTOR3(-0.0571711f, 0.3852040f, 0.0321217f), - D3DXVECTOR3(-0.0571711f, 0.3852040f, 0.0321217f), - D3DXVECTOR3( 0.1492691f, 0.3870092f, -0.0631194f), - D3DXVECTOR3( 0.1492691f, 0.3870092f, -0.0631194f), - D3DXVECTOR3( 0.1492691f, 0.3870092f, -0.0631194f), - D3DXVECTOR3( 0.1461064f, 0.1179388f, -0.0593852f), - D3DXVECTOR3( 0.1461064f, 0.1179388f, -0.0593852f), - D3DXVECTOR3( 0.1461064f, 0.1179388f, -0.0593852f), - D3DXVECTOR3( 0.1492691f, 0.3840750f, 0.0321217f), - D3DXVECTOR3( 0.1492691f, 0.3840750f, 0.0321217f), - D3DXVECTOR3( 0.1492691f, 0.3840750f, 0.0321217f), - D3DXVECTOR3( 0.1461064f, 0.1153777f, 0.0237443f), - D3DXVECTOR3( 0.1461064f, 0.1153777f, 0.0237443f), - D3DXVECTOR3( 0.1461064f, 0.1153777f, 0.0237443f), - D3DXVECTOR3( 0.0571711f, 0.3870092f, -0.0631194f), - D3DXVECTOR3( 0.0571711f, 0.3870092f, -0.0631194f), - D3DXVECTOR3( 0.0571711f, 0.3870092f, -0.0631194f), - D3DXVECTOR3( 0.0603339f, 0.1179388f, -0.0593852f), - D3DXVECTOR3( 0.0603339f, 0.1179388f, -0.0593852f), - D3DXVECTOR3( 0.0603339f, 0.1179388f, -0.0593852f), - D3DXVECTOR3( 0.0571711f, 0.3840750f, 0.0321217f), - D3DXVECTOR3( 0.0571711f, 0.3840750f, 0.0321217f), - D3DXVECTOR3( 0.0571711f, 0.3840750f, 0.0321217f), - D3DXVECTOR3( 0.0603339f, 0.1153777f, 0.0237443f), - D3DXVECTOR3( 0.0603339f, 0.1153777f, 0.0237443f), - D3DXVECTOR3( 0.0603339f, 0.1153777f, 0.0237443f), - D3DXVECTOR3(-0.1492691f, 0.3870092f, -0.0631194f), - D3DXVECTOR3(-0.1492691f, 0.3870092f, -0.0631194f), - D3DXVECTOR3(-0.1492691f, 0.3870092f, -0.0631194f), - D3DXVECTOR3(-0.1461064f, 0.1179388f, -0.0593852f), - D3DXVECTOR3(-0.1461064f, 0.1179388f, -0.0593852f), - D3DXVECTOR3(-0.1461064f, 0.1179388f, -0.0593852f), - D3DXVECTOR3(-0.1492691f, 0.3840749f, 0.0321217f), - D3DXVECTOR3(-0.1492691f, 0.3840749f, 0.0321217f), - D3DXVECTOR3(-0.1492691f, 0.3840749f, 0.0321217f), - D3DXVECTOR3(-0.1461064f, 0.1153776f, 0.0237443f), - D3DXVECTOR3(-0.1461064f, 0.1153776f, 0.0237443f), - D3DXVECTOR3(-0.1461064f, 0.1153776f, 0.0237443f), - D3DXVECTOR3(-0.0571711f, 0.3870092f, -0.0631194f), - D3DXVECTOR3(-0.0571711f, 0.3870092f, -0.0631194f), - D3DXVECTOR3(-0.0571711f, 0.3870092f, -0.0631194f), - D3DXVECTOR3(-0.0603339f, 0.1179388f, -0.0593852f), - D3DXVECTOR3(-0.0603339f, 0.1179388f, -0.0593852f), - D3DXVECTOR3(-0.0603339f, 0.1179388f, -0.0593852f), - D3DXVECTOR3(-0.0571711f, 0.3840749f, 0.0321217f), - D3DXVECTOR3(-0.0571711f, 0.3840749f, 0.0321217f), - D3DXVECTOR3(-0.0571711f, 0.3840749f, 0.0321217f), - D3DXVECTOR3(-0.0603339f, 0.1153776f, 0.0237443f), - D3DXVECTOR3(-0.0603339f, 0.1153776f, 0.0237443f), - D3DXVECTOR3(-0.0603339f, 0.1153776f, 0.0237443f), - D3DXVECTOR3( 0.1549126f, 0.0675215f, -0.0596355f), - D3DXVECTOR3( 0.1549126f, 0.0675215f, -0.0596355f), - D3DXVECTOR3( 0.1549126f, 0.0675215f, -0.0596355f), - D3DXVECTOR3( 0.1549126f, 0.0675215f, -0.0596355f), - D3DXVECTOR3( 0.1512716f, -0.0199962f, -0.0600954f), - D3DXVECTOR3( 0.1512716f, -0.0199962f, -0.0600954f), - D3DXVECTOR3( 0.1512716f, -0.0199962f, -0.0600954f), - D3DXVECTOR3( 0.1512716f, 0.0296853f, 0.1167542f), - D3DXVECTOR3( 0.1512716f, 0.0296853f, 0.1167542f), - D3DXVECTOR3( 0.1512716f, 0.0296853f, 0.1167542f), - D3DXVECTOR3( 0.1512716f, -0.0204037f, 0.1167542f), - D3DXVECTOR3( 0.1512716f, -0.0204037f, 0.1167542f), - D3DXVECTOR3( 0.1512716f, -0.0204037f, 0.1167542f), - D3DXVECTOR3( 0.0555327f, 0.0675215f, -0.0596355f), - D3DXVECTOR3( 0.0555327f, 0.0675215f, -0.0596355f), - D3DXVECTOR3( 0.0555327f, 0.0675215f, -0.0596355f), - D3DXVECTOR3( 0.0555327f, 0.0675215f, -0.0596355f), - D3DXVECTOR3( 0.0591736f, -0.0199962f, -0.0600954f), - D3DXVECTOR3( 0.0591736f, -0.0199962f, -0.0600954f), - D3DXVECTOR3( 0.0591736f, -0.0199962f, -0.0600954f), - D3DXVECTOR3( 0.0591736f, 0.0296853f, 0.1167542f), - D3DXVECTOR3( 0.0591736f, 0.0296853f, 0.1167542f), - D3DXVECTOR3( 0.0591736f, 0.0296853f, 0.1167542f), - D3DXVECTOR3( 0.0591736f, -0.0204037f, 0.1167542f), - D3DXVECTOR3( 0.0591736f, -0.0204037f, 0.1167542f), - D3DXVECTOR3( 0.0591736f, -0.0204037f, 0.1167542f), - D3DXVECTOR3( 0.0591736f, -0.0201999f, 0.0283294f), - D3DXVECTOR3( 0.0591736f, -0.0201999f, 0.0283294f), - D3DXVECTOR3( 0.0591736f, -0.0201999f, 0.0283294f), - D3DXVECTOR3( 0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3( 0.1512716f, -0.0201999f, 0.0283294f), - D3DXVECTOR3( 0.1512716f, -0.0201999f, 0.0283294f), - D3DXVECTOR3( 0.1512716f, -0.0201999f, 0.0283294f), - D3DXVECTOR3( 0.1646186f, 0.1424436f, -0.0725503f), - D3DXVECTOR3( 0.1646186f, 0.1424436f, -0.0725503f), - D3DXVECTOR3( 0.1646186f, 0.1424436f, -0.0725503f), - D3DXVECTOR3( 0.0458267f, 0.1424436f, -0.0725503f), - D3DXVECTOR3( 0.0458267f, 0.1424436f, -0.0725503f), - D3DXVECTOR3( 0.0458267f, 0.1424436f, -0.0725503f), - D3DXVECTOR3( 0.1646186f, 0.1421875f, 0.0385886f), - D3DXVECTOR3( 0.1646186f, 0.1421875f, 0.0385886f), - D3DXVECTOR3( 0.1646186f, 0.1421875f, 0.0385886f), - D3DXVECTOR3( 0.0458267f, 0.1421875f, 0.0385886f), - D3DXVECTOR3( 0.0458267f, 0.1421875f, 0.0385886f), - D3DXVECTOR3( 0.0458267f, 0.1421875f, 0.0385886f), - D3DXVECTOR3(-0.1549126f, 0.0675215f, -0.0596355f), - D3DXVECTOR3(-0.1549126f, 0.0675215f, -0.0596355f), - D3DXVECTOR3(-0.1549126f, 0.0675215f, -0.0596355f), - D3DXVECTOR3(-0.1549126f, 0.0675215f, -0.0596355f), - D3DXVECTOR3(-0.1512717f, -0.0199962f, -0.0600954f), - D3DXVECTOR3(-0.1512717f, -0.0199962f, -0.0600954f), - D3DXVECTOR3(-0.1512717f, -0.0199962f, -0.0600954f), - D3DXVECTOR3(-0.1512717f, 0.0296853f, 0.1167542f), - D3DXVECTOR3(-0.1512717f, 0.0296853f, 0.1167542f), - D3DXVECTOR3(-0.1512717f, 0.0296853f, 0.1167542f), - D3DXVECTOR3(-0.1512717f, -0.0204037f, 0.1167542f), - D3DXVECTOR3(-0.1512717f, -0.0204037f, 0.1167542f), - D3DXVECTOR3(-0.1512717f, -0.0204037f, 0.1167542f), - D3DXVECTOR3(-0.0555327f, 0.0675215f, -0.0596355f), - D3DXVECTOR3(-0.0555327f, 0.0675215f, -0.0596355f), - D3DXVECTOR3(-0.0555327f, 0.0675215f, -0.0596355f), - D3DXVECTOR3(-0.0555327f, 0.0675215f, -0.0596355f), - D3DXVECTOR3(-0.0591736f, -0.0199962f, -0.0600954f), - D3DXVECTOR3(-0.0591736f, -0.0199962f, -0.0600954f), - D3DXVECTOR3(-0.0591736f, -0.0199962f, -0.0600954f), - D3DXVECTOR3(-0.0591736f, 0.0296853f, 0.1167542f), - D3DXVECTOR3(-0.0591736f, 0.0296853f, 0.1167542f), - D3DXVECTOR3(-0.0591736f, 0.0296853f, 0.1167542f), - D3DXVECTOR3(-0.0591736f, -0.0204037f, 0.1167542f), - D3DXVECTOR3(-0.0591736f, -0.0204037f, 0.1167542f), - D3DXVECTOR3(-0.0591736f, -0.0204037f, 0.1167542f), - D3DXVECTOR3(-0.0591736f, -0.0201999f, 0.0283294f), - D3DXVECTOR3(-0.0591736f, -0.0201999f, 0.0283294f), - D3DXVECTOR3(-0.0591736f, -0.0201999f, 0.0283294f), - D3DXVECTOR3(-0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.1549126f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.0555327f, 0.0673177f, 0.0287893f), - D3DXVECTOR3(-0.1512717f, -0.0201999f, 0.0283294f), - D3DXVECTOR3(-0.1512717f, -0.0201999f, 0.0283294f), - D3DXVECTOR3(-0.1512717f, -0.0201999f, 0.0283294f), - D3DXVECTOR3(-0.1646186f, 0.1424436f, -0.0725503f), - D3DXVECTOR3(-0.1646186f, 0.1424436f, -0.0725503f), - D3DXVECTOR3(-0.1646186f, 0.1424436f, -0.0725503f), - D3DXVECTOR3(-0.0458267f, 0.1424436f, -0.0725503f), - D3DXVECTOR3(-0.0458267f, 0.1424436f, -0.0725503f), - D3DXVECTOR3(-0.0458267f, 0.1424436f, -0.0725503f), - D3DXVECTOR3(-0.1646186f, 0.1421874f, 0.0385886f), - D3DXVECTOR3(-0.1646186f, 0.1421874f, 0.0385886f), - D3DXVECTOR3(-0.1646186f, 0.1421874f, 0.0385886f), - D3DXVECTOR3(-0.0458267f, 0.1421874f, 0.0385886f), - D3DXVECTOR3(-0.0458267f, 0.1421874f, 0.0385886f), - D3DXVECTOR3(-0.0458267f, 0.1421874f, 0.0385886f), - D3DXVECTOR3( 0.2091001f, 0.8484664f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 0.8484664f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 0.8484664f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 1.1539041f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 1.1539041f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 1.1539041f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 0.8484664f, -0.0316648f), - D3DXVECTOR3( 0.2091001f, 0.8484664f, -0.0316648f), - D3DXVECTOR3( 0.2091001f, 0.8484664f, -0.0316648f), - D3DXVECTOR3( 0.2091001f, 1.1539041f, -0.0316648f), - D3DXVECTOR3( 0.2091001f, 1.1539041f, -0.0316648f), - D3DXVECTOR3( 0.2091001f, 1.1539041f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 0.8484664f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 0.8484664f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 0.8484664f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 1.1539041f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 1.1539041f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 1.1539041f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 0.8484664f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 0.8484664f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 0.8484664f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 1.1539041f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 1.1539041f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 1.1539041f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 0.8484664f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 0.8484664f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 0.8484664f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 1.1539041f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 1.1539041f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 1.1539041f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 0.8484664f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 0.8484664f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 0.8484664f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 1.1539041f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 1.1539041f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 1.1539041f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 0.8484664f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 0.8484664f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 0.8484664f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 1.1539041f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 1.1539041f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 1.1539041f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 0.8484664f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 0.8484664f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 0.8484664f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 1.1539041f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 1.1539041f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 1.1539041f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.5538269f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.5538269f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.5538269f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.8537946f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.8537946f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.8537946f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.5538269f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.5538269f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.5538269f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.8537946f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.8537946f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.8537946f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.5538269f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.5538269f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.5538269f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.8537946f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.8537946f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.8537946f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.5538269f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.5538269f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.5538269f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.8537946f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.8537946f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.8537946f, -0.0316648f), - D3DXVECTOR3(-0.2107263f, 0.5538269f, 0.0316646f), - D3DXVECTOR3(-0.2107263f, 0.5538269f, 0.0316646f), - D3DXVECTOR3(-0.2107263f, 0.5538269f, 0.0316646f), - D3DXVECTOR3(-0.2107263f, 0.8537946f, 0.0316646f), - D3DXVECTOR3(-0.2107263f, 0.8537946f, 0.0316646f), - D3DXVECTOR3(-0.2107263f, 0.8537946f, 0.0316646f), - D3DXVECTOR3(-0.2107263f, 0.5538269f, -0.0316648f), - D3DXVECTOR3(-0.2107263f, 0.5538269f, -0.0316648f), - D3DXVECTOR3(-0.2107263f, 0.5538269f, -0.0316648f), - D3DXVECTOR3(-0.2107263f, 0.8537946f, -0.0316648f), - D3DXVECTOR3(-0.2107263f, 0.8537946f, -0.0316648f), - D3DXVECTOR3(-0.2107263f, 0.8537946f, -0.0316648f), - D3DXVECTOR3(-0.2740558f, 0.5538269f, 0.0316646f), - D3DXVECTOR3(-0.2740558f, 0.5538269f, 0.0316646f), - D3DXVECTOR3(-0.2740558f, 0.5538269f, 0.0316646f), - D3DXVECTOR3(-0.2740558f, 0.8537946f, 0.0316646f), - D3DXVECTOR3(-0.2740558f, 0.8537946f, 0.0316646f), - D3DXVECTOR3(-0.2740558f, 0.8537946f, 0.0316646f), - D3DXVECTOR3(-0.2740558f, 0.5538269f, -0.0316648f), - D3DXVECTOR3(-0.2740558f, 0.5538269f, -0.0316648f), - D3DXVECTOR3(-0.2740558f, 0.5538269f, -0.0316648f), - D3DXVECTOR3(-0.2740558f, 0.8537946f, -0.0316648f), - D3DXVECTOR3(-0.2740558f, 0.8537946f, -0.0316648f), - D3DXVECTOR3(-0.2740558f, 0.8537946f, -0.0316648f), - D3DXVECTOR3( 0.1706610f, 1.1755617f, -0.1459927f), - D3DXVECTOR3( 0.1706610f, 1.1755617f, -0.1459927f), - D3DXVECTOR3( 0.1706610f, 1.1755617f, -0.1459927f), - D3DXVECTOR3( 0.1706989f, 0.8830866f, -0.1470453f), - D3DXVECTOR3( 0.1706989f, 0.8830866f, -0.1470453f), - D3DXVECTOR3( 0.1706989f, 0.8830866f, -0.1470453f), - D3DXVECTOR3( 0.1706610f, 1.1753165f, 0.1454204f), - D3DXVECTOR3( 0.1706610f, 1.1753165f, 0.1454204f), - D3DXVECTOR3( 0.1706610f, 1.1753165f, 0.1454204f), - D3DXVECTOR3( 0.1706989f, 0.8832734f, 0.1454337f), - D3DXVECTOR3( 0.1706989f, 0.8832734f, 0.1454337f), - D3DXVECTOR3( 0.1706989f, 0.8832734f, 0.1454337f), - D3DXVECTOR3(-0.1697909f, 1.1754774f, -0.1459927f), - D3DXVECTOR3(-0.1697909f, 1.1754774f, -0.1459927f), - D3DXVECTOR3(-0.1697909f, 1.1754774f, -0.1459927f), - D3DXVECTOR3(-0.1698710f, 0.8832552f, -0.1470453f), - D3DXVECTOR3(-0.1698710f, 0.8832552f, -0.1470453f), - D3DXVECTOR3(-0.1698710f, 0.8832552f, -0.1470453f), - D3DXVECTOR3(-0.1697909f, 1.1752322f, 0.1454204f), - D3DXVECTOR3(-0.1697909f, 1.1752322f, 0.1454204f), - D3DXVECTOR3(-0.1697909f, 1.1752322f, 0.1454204f), - D3DXVECTOR3(-0.1698710f, 0.8834420f, 0.1454337f), - D3DXVECTOR3(-0.1698710f, 0.8834420f, 0.1454337f), - D3DXVECTOR3(-0.1698710f, 0.8834420f, 0.1454337f), - D3DXVECTOR3( 0.0493873f, 0.9123309f, -0.0626242f), - D3DXVECTOR3( 0.0493873f, 0.9123309f, -0.0626242f), - D3DXVECTOR3( 0.0493873f, 0.9123309f, -0.0626242f), - D3DXVECTOR3( 0.0477787f, 0.7450795f, -0.0646072f), - D3DXVECTOR3( 0.0477787f, 0.7450795f, -0.0646072f), - D3DXVECTOR3( 0.0477787f, 0.7450795f, -0.0646072f), - D3DXVECTOR3( 0.0493873f, 0.9111561f, 0.0185991f), - D3DXVECTOR3( 0.0493873f, 0.9111561f, 0.0185991f), - D3DXVECTOR3( 0.0493873f, 0.9111561f, 0.0185991f), - D3DXVECTOR3( 0.0477786f, 0.7439046f, 0.0166160f), - D3DXVECTOR3( 0.0477786f, 0.7439046f, 0.0166160f), - D3DXVECTOR3( 0.0477786f, 0.7439046f, 0.0166160f), - D3DXVECTOR3(-0.0377652f, 0.9123309f, -0.0626242f), - D3DXVECTOR3(-0.0377652f, 0.9123309f, -0.0626242f), - D3DXVECTOR3(-0.0377652f, 0.9123309f, -0.0626242f), - D3DXVECTOR3(-0.0361565f, 0.7450795f, -0.0646072f), - D3DXVECTOR3(-0.0361565f, 0.7450795f, -0.0646072f), - D3DXVECTOR3(-0.0361565f, 0.7450795f, -0.0646072f), - D3DXVECTOR3(-0.0377652f, 0.9111561f, 0.0185991f), - D3DXVECTOR3(-0.0377652f, 0.9111561f, 0.0185991f), - D3DXVECTOR3(-0.0377652f, 0.9111561f, 0.0185991f), - D3DXVECTOR3(-0.0361565f, 0.7439046f, 0.0166160f), - D3DXVECTOR3(-0.0361565f, 0.7439046f, 0.0166160f), - D3DXVECTOR3(-0.0361565f, 0.7439046f, 0.0166160f), - D3DXVECTOR3(-0.2091001f, 0.9502789f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 0.9502789f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 1.0520915f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 1.0520915f, 0.0316646f), - D3DXVECTOR3(-0.2091001f, 1.0520915f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 1.0520915f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 0.9502789f, -0.0316648f), - D3DXVECTOR3(-0.2091001f, 0.9502789f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 1.0520915f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 1.0520915f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 0.9502789f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 0.9502789f, -0.0316648f), - D3DXVECTOR3(-0.2724295f, 1.0520915f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 1.0520915f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 0.9502789f, 0.0316646f), - D3DXVECTOR3(-0.2724295f, 0.9502789f, 0.0316646f), - D3DXVECTOR3(-0.2107262f, 0.6538162f, 0.0316646f), - D3DXVECTOR3(-0.2107262f, 0.6538162f, 0.0316646f), - D3DXVECTOR3(-0.2107262f, 0.7538054f, 0.0316646f), - D3DXVECTOR3(-0.2107262f, 0.7538054f, 0.0316646f), - D3DXVECTOR3(-0.2107262f, 0.7538053f, -0.0316648f), - D3DXVECTOR3(-0.2107262f, 0.7538053f, -0.0316648f), - D3DXVECTOR3(-0.2107262f, 0.6538161f, -0.0316648f), - D3DXVECTOR3(-0.2107262f, 0.6538161f, -0.0316648f), - D3DXVECTOR3(-0.2740557f, 0.7538053f, -0.0316648f), - D3DXVECTOR3(-0.2740557f, 0.7538053f, -0.0316648f), - D3DXVECTOR3(-0.2740557f, 0.6538161f, -0.0316648f), - D3DXVECTOR3(-0.2740557f, 0.6538161f, -0.0316648f), - D3DXVECTOR3(-0.2740557f, 0.7538053f, 0.0316646f), - D3DXVECTOR3(-0.2740557f, 0.7538053f, 0.0316646f), - D3DXVECTOR3(-0.2740557f, 0.6538161f, 0.0316646f), - D3DXVECTOR3(-0.2740557f, 0.6538161f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.6538162f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.6538162f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.7538054f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.7538054f, 0.0316646f), - D3DXVECTOR3( 0.2107262f, 0.7538053f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.7538053f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.6538161f, -0.0316648f), - D3DXVECTOR3( 0.2107262f, 0.6538161f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.7538053f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.7538053f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.6538161f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.6538161f, -0.0316648f), - D3DXVECTOR3( 0.2740557f, 0.7538053f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.7538053f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.6538161f, 0.0316646f), - D3DXVECTOR3( 0.2740557f, 0.6538161f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 0.9502789f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 0.9502789f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 1.0520915f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 1.0520915f, 0.0316646f), - D3DXVECTOR3( 0.2091001f, 1.0520915f, -0.0316648f), - D3DXVECTOR3( 0.2091001f, 1.0520915f, -0.0316648f), - D3DXVECTOR3( 0.2091001f, 0.9502789f, -0.0316648f), - D3DXVECTOR3( 0.2091001f, 0.9502789f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 1.0520915f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 1.0520915f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 0.9502789f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 0.9502789f, -0.0316648f), - D3DXVECTOR3( 0.2724295f, 1.0520915f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 1.0520915f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 0.9502789f, 0.0316646f), - D3DXVECTOR3( 0.2724295f, 0.9502789f, 0.0316646f), -}; - -const D3DXVECTOR3 accessor_1[] = { - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0024000f, -0.9999971f), - D3DXVECTOR3( 1.0000000f, 0.0001000f, -0.0002000f), - D3DXVECTOR3(-0.0015000f, 0.9999987f, 0.0006000f), - D3DXVECTOR3( 0.0000000f, 0.0024000f, -0.9999971f), - D3DXVECTOR3( 0.0015000f, -0.9999988f, 0.0005000f), - D3DXVECTOR3( 1.0000000f, 0.0001000f, -0.0002000f), - D3DXVECTOR3( 0.0000000f, 0.0030000f, 0.9999956f), - D3DXVECTOR3( 1.0000000f, 0.0001000f, -0.0002000f), - D3DXVECTOR3(-0.0015000f, 0.9999987f, 0.0006000f), - D3DXVECTOR3( 0.0000000f, 0.0030000f, 0.9999956f), - D3DXVECTOR3( 0.0015000f, -0.9999988f, 0.0005000f), - D3DXVECTOR3( 1.0000000f, 0.0001000f, -0.0002000f), - D3DXVECTOR3( 0.0000000f, 0.0024000f, -0.9999971f), - D3DXVECTOR3(-0.0015000f, 0.9999987f, 0.0006000f), - D3DXVECTOR3(-1.0000000f, 0.0001000f, -0.0002000f), - D3DXVECTOR3( 0.0000000f, 0.0024000f, -0.9999971f), - D3DXVECTOR3( 0.0015000f, -0.9999988f, 0.0005000f), - D3DXVECTOR3(-1.0000000f, 0.0001000f, -0.0002000f), - D3DXVECTOR3( 0.0000000f, 0.0030000f, 0.9999956f), - D3DXVECTOR3(-0.0015000f, 0.9999987f, 0.0006000f), - D3DXVECTOR3(-1.0000000f, 0.0001000f, -0.0002000f), - D3DXVECTOR3( 0.0000000f, 0.0030000f, 0.9999956f), - D3DXVECTOR3( 0.0015000f, -0.9999988f, 0.0005000f), - D3DXVECTOR3(-1.0000000f, 0.0001000f, -0.0002000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9997536f, 0.0221990f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9997536f, -0.0221990f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9997536f, 0.0221990f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9997536f, -0.0221990f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9997536f, 0.0221990f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9997536f, -0.0221990f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9997536f, 0.0221990f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9997536f, -0.0221990f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9997536f, 0.0221990f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9997536f, -0.0221990f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9997536f, 0.0221990f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9997536f, -0.0221990f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9997536f, 0.0221990f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9997536f, -0.0221990f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9997536f, 0.0221990f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9997536f, -0.0221990f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9995255f, 0.0308008f), - D3DXVECTOR3( 0.0000000f, -0.0139000f, -0.9999034f), - D3DXVECTOR3( 0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, -0.0139000f, -0.9999034f), - D3DXVECTOR3( 0.0000000f, -0.9995255f, -0.0308008f), - D3DXVECTOR3( 0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, 0.9995255f, 0.0308008f), - D3DXVECTOR3( 0.0000000f, -0.0312004f, 0.9995131f), - D3DXVECTOR3( 0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, -0.0312004f, 0.9995131f), - D3DXVECTOR3( 0.0000000f, -0.9995255f, -0.0308008f), - D3DXVECTOR3( 0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, 0.9995255f, 0.0308008f), - D3DXVECTOR3( 0.0000000f, -0.0139000f, -0.9999034f), - D3DXVECTOR3(-0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, -0.0139000f, -0.9999034f), - D3DXVECTOR3( 0.0000000f, -0.9995255f, -0.0308008f), - D3DXVECTOR3(-0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, 0.9995255f, 0.0308008f), - D3DXVECTOR3( 0.0000000f, -0.0312004f, 0.9995131f), - D3DXVECTOR3(-0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, -0.0312004f, 0.9995131f), - D3DXVECTOR3( 0.0000000f, -0.9995255f, -0.0308008f), - D3DXVECTOR3(-0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, 0.9995255f, 0.0308008f), - D3DXVECTOR3( 0.0000000f, -0.0139000f, -0.9999034f), - D3DXVECTOR3(-0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, -0.0139000f, -0.9999034f), - D3DXVECTOR3( 0.0000000f, -0.9995255f, -0.0308008f), - D3DXVECTOR3(-0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, 0.9995255f, 0.0308008f), - D3DXVECTOR3( 0.0000000f, -0.0312004f, 0.9995131f), - D3DXVECTOR3(-0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, -0.0312004f, 0.9995131f), - D3DXVECTOR3( 0.0000000f, -0.9995255f, -0.0308008f), - D3DXVECTOR3(-0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, 0.9995255f, 0.0308008f), - D3DXVECTOR3( 0.0000000f, -0.0139000f, -0.9999034f), - D3DXVECTOR3( 0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, -0.0139000f, -0.9999034f), - D3DXVECTOR3( 0.0000000f, -0.9995255f, -0.0308008f), - D3DXVECTOR3( 0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, 0.9995255f, 0.0308008f), - D3DXVECTOR3( 0.0000000f, -0.0312004f, 0.9995131f), - D3DXVECTOR3( 0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, -0.0312004f, 0.9995131f), - D3DXVECTOR3( 0.0000000f, -0.9995255f, -0.0308008f), - D3DXVECTOR3( 0.9999303f, -0.0118004f, -0.0004000f), - D3DXVECTOR3( 0.0000000f, 0.0052999f, -0.9999860f), - D3DXVECTOR3( 0.0000000f, -0.1698935f, -0.9854625f), - D3DXVECTOR3( 0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0052999f, -0.9999860f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3( 0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9194086f, 0.3933037f), - D3DXVECTOR3( 0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3( 0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.0052999f, -0.9999860f), - D3DXVECTOR3( 0.0000000f, -0.1698935f, -0.9854625f), - D3DXVECTOR3(-0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3(-0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0052999f, -0.9999860f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3(-0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9194086f, 0.3933037f), - D3DXVECTOR3(-0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3(-0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3(-0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3(-0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.9194086f, 0.3933037f), - D3DXVECTOR3( 0.0000000f, -0.1298052f, 0.9915395f), - D3DXVECTOR3( 0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.9194086f, 0.3933037f), - D3DXVECTOR3( 0.0000000f, -0.1298052f, 0.9915395f), - D3DXVECTOR3(-0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3(-0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3(-0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3( 0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.9999974f, 0.0023000f), - D3DXVECTOR3( 0.0000000f, -0.1698935f, -0.9854625f), - D3DXVECTOR3( 0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.0000000f, 0.9999974f, 0.0023000f), - D3DXVECTOR3( 0.0000000f, -0.1698935f, -0.9854625f), - D3DXVECTOR3(-0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.0000000f, 0.9999974f, 0.0023000f), - D3DXVECTOR3( 0.0000000f, -0.1298052f, 0.9915395f), - D3DXVECTOR3( 0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.0000000f, 0.9999974f, 0.0023000f), - D3DXVECTOR3( 0.0000000f, -0.1298052f, 0.9915395f), - D3DXVECTOR3(-0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.0000000f, 0.0052999f, -0.9999860f), - D3DXVECTOR3( 0.0000000f, -0.1698935f, -0.9854625f), - D3DXVECTOR3(-0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3(-0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0052999f, -0.9999860f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3(-0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9194086f, 0.3933037f), - D3DXVECTOR3(-0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3(-0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.0052999f, -0.9999860f), - D3DXVECTOR3( 0.0000000f, -0.1698935f, -0.9854625f), - D3DXVECTOR3( 0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0052999f, -0.9999860f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3( 0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 0.9194086f, 0.3933037f), - D3DXVECTOR3( 0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3( 0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3( 0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.9194086f, 0.3933037f), - D3DXVECTOR3( 0.0000000f, -0.1298052f, 0.9915395f), - D3DXVECTOR3(-0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3(-0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3(-0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.9194086f, 0.3933037f), - D3DXVECTOR3( 0.0000000f, -0.1298052f, 0.9915395f), - D3DXVECTOR3( 0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3( 0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, -0.9999974f, -0.0023000f), - D3DXVECTOR3(-0.9991343f, -0.0416014f, -0.0001000f), - D3DXVECTOR3(-0.9995378f, -0.0265010f, 0.0149006f), - D3DXVECTOR3( 0.0000000f, 0.9999974f, 0.0023000f), - D3DXVECTOR3( 0.0000000f, -0.1698935f, -0.9854625f), - D3DXVECTOR3(-0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.0000000f, 0.9999974f, 0.0023000f), - D3DXVECTOR3( 0.0000000f, -0.1698935f, -0.9854625f), - D3DXVECTOR3( 0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.0000000f, 0.9999974f, 0.0023000f), - D3DXVECTOR3( 0.0000000f, -0.1298052f, 0.9915395f), - D3DXVECTOR3(-0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.0000000f, 0.9999974f, 0.0023000f), - D3DXVECTOR3( 0.0000000f, -0.1298052f, 0.9915395f), - D3DXVECTOR3( 0.9917093f, -0.1285012f, -0.0003000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, -1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 0.0000000f, 1.0000000f, 0.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0036000f, -0.9999936f), - D3DXVECTOR3( 1.0000000f, 0.0001000f, 0.0000000f), - D3DXVECTOR3(-0.0002000f, 0.9999997f, 0.0008000f), - D3DXVECTOR3( 0.0000000f, 0.0036000f, -0.9999936f), - D3DXVECTOR3( 1.0000000f, 0.0001000f, 0.0000000f), - D3DXVECTOR3(-0.0005000f, -0.9999998f, 0.0006000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0001000f, 0.0000000f), - D3DXVECTOR3(-0.0002000f, 0.9999997f, 0.0008000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0001000f, 0.0000000f), - D3DXVECTOR3(-0.0005000f, -0.9999998f, 0.0006000f), - D3DXVECTOR3( 0.0000000f, 0.0036000f, -0.9999936f), - D3DXVECTOR3(-0.0002000f, 0.9999997f, 0.0008000f), - D3DXVECTOR3(-1.0000000f, 0.0003000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0036000f, -0.9999936f), - D3DXVECTOR3(-0.0005000f, -0.9999998f, 0.0006000f), - D3DXVECTOR3(-1.0000000f, 0.0003000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-0.0002000f, 0.9999997f, 0.0008000f), - D3DXVECTOR3(-1.0000000f, 0.0003000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-0.0005000f, -0.9999998f, 0.0006000f), - D3DXVECTOR3(-1.0000000f, 0.0003000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0119003f, -0.9999292f), - D3DXVECTOR3( 0.0000000f, 0.9998949f, 0.0144999f), - D3DXVECTOR3( 0.9999540f, -0.0095996f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0119003f, -0.9999292f), - D3DXVECTOR3( 0.0000000f, -0.9998949f, -0.0144999f), - D3DXVECTOR3( 0.9999540f, -0.0095996f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.9998949f, 0.0144999f), - D3DXVECTOR3( 0.0000000f, -0.0119003f, 0.9999292f), - D3DXVECTOR3( 0.9999540f, -0.0095996f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, -0.0119003f, 0.9999292f), - D3DXVECTOR3( 0.0000000f, -0.9998949f, -0.0144999f), - D3DXVECTOR3( 0.9999540f, -0.0095996f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0119003f, -0.9999292f), - D3DXVECTOR3( 0.0000000f, 0.9998949f, 0.0144999f), - D3DXVECTOR3(-0.9999540f, -0.0095996f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0119003f, -0.9999292f), - D3DXVECTOR3( 0.0000000f, -0.9998949f, -0.0144999f), - D3DXVECTOR3(-0.9999540f, -0.0095996f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.9998949f, 0.0144999f), - D3DXVECTOR3( 0.0000000f, -0.0119003f, 0.9999292f), - D3DXVECTOR3(-0.9999540f, -0.0095996f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, -0.0119003f, 0.9999292f), - D3DXVECTOR3( 0.0000000f, -0.9998949f, -0.0144999f), - D3DXVECTOR3(-0.9999540f, -0.0095996f, -0.0001000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3(-1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, -1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.0000000f, 0.0000000f), -}; - -const D3DXVECTOR2 accessor_2[] = { - D3DXVECTOR2( 0.7500000f, 0.0000000f), - D3DXVECTOR2( 0.7500000f, 0.2031250f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2031250f), - D3DXVECTOR2( 0.7500000f, 0.0000000f), - D3DXVECTOR2( 0.7500000f, 0.0468750f), - D3DXVECTOR2( 0.7500000f, 0.0000000f), - D3DXVECTOR2( 0.7500000f, 0.2500000f), - D3DXVECTOR2( 0.5478383f, 0.2441072f), - D3DXVECTOR2( 0.7500000f, 0.2500000f), - D3DXVECTOR2( 0.5000000f, 0.0000000f), - D3DXVECTOR2( 0.7031250f, 0.0468750f), - D3DXVECTOR2( 0.5000000f, 0.0000000f), - D3DXVECTOR2( 0.5000000f, 0.2500000f), - D3DXVECTOR2( 0.5478383f, 0.1873633f), - D3DXVECTOR2( 0.5000000f, 0.2500000f), - D3DXVECTOR2( 0.5156250f, 0.0000000f), - D3DXVECTOR2( 0.7500000f, 0.0000000f), - D3DXVECTOR2( 0.5000000f, 0.0000000f), - D3DXVECTOR2( 0.5156250f, 0.2500000f), - D3DXVECTOR2( 0.4910944f, 0.2441072f), - D3DXVECTOR2( 0.5000000f, 0.2500000f), - D3DXVECTOR2( 0.2500000f, 0.0000000f), - D3DXVECTOR2( 0.7031250f, 0.0000000f), - D3DXVECTOR2( 0.7500000f, 0.0000000f), - D3DXVECTOR2( 0.2500000f, 0.2500000f), - D3DXVECTOR2( 0.4910944f, 0.1873633f), - D3DXVECTOR2( 0.7500000f, 0.2500000f), - D3DXVECTOR2( 0.4375000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.3125000f), - D3DXVECTOR2( 0.4375000f, 0.5000000f), - D3DXVECTOR2( 0.8124686f, 0.3124686f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.8125000f, 0.4062500f), - D3DXVECTOR2( 0.4375000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.8125000f, 0.5000000f), - D3DXVECTOR2( 0.8125314f, 0.4999686f), - D3DXVECTOR2( 0.4375000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.4375000f, 0.3125000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.6249686f, 0.3125314f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.4062500f), - D3DXVECTOR2( 0.4375000f, 0.5000000f), - D3DXVECTOR2( 0.4375000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.6250314f, 0.5000314f), - D3DXVECTOR2( 0.4375000f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2(-0.0095541f, 0.5000000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2(-0.0095541f, -0.5191081f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2(-1.0286622f, 0.5000000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2(-1.0286622f, -0.5191081f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7958288f, 0.2513793f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2(-0.0095541f, 0.5000000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7958288f, 0.2620047f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2(-0.0095541f, -0.5191083f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7852035f, 0.2513793f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2(-1.0286624f, 0.5000000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7852035f, 0.2620047f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2(-1.0286624f, -0.5191083f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7738063f, 0.1937098f), - D3DXVECTOR2( 0.7738063f, 0.1937098f), - D3DXVECTOR2( 0.7738063f, 0.1937098f), - D3DXVECTOR2( 0.7744191f, 0.1960735f), - D3DXVECTOR2( 0.7744191f, 0.1960735f), - D3DXVECTOR2( 0.7744191f, 0.1960735f), - D3DXVECTOR2( 0.7752946f, 0.1945853f), - D3DXVECTOR2( 0.7752946f, 0.1945853f), - D3DXVECTOR2( 0.7752946f, 0.1945853f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7723181f, 0.1928343f), - D3DXVECTOR2( 0.7746819f, 0.1922215f), - D3DXVECTOR2( 0.7723181f, 0.1928343f), - D3DXVECTOR2( 0.7759073f, 0.1969490f), - D3DXVECTOR2( 0.7735436f, 0.1975617f), - D3DXVECTOR2( 0.7699544f, 0.1934471f), - D3DXVECTOR2( 0.7767828f, 0.1954608f), - D3DXVECTOR2( 0.7761701f, 0.1930971f), - D3DXVECTOR2( 0.7708299f, 0.1919588f), - D3DXVECTOR2( 0.7754260f, 0.1926593f), - D3DXVECTOR2( 0.7715740f, 0.1923965f), - D3DXVECTOR2( 0.7715740f, 0.1923965f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7727995f, 0.1971240f), - D3DXVECTOR2( 0.7727995f, 0.1971240f), - D3DXVECTOR2( 0.7706985f, 0.1938848f), - D3DXVECTOR2( 0.7706985f, 0.1938848f), - D3DXVECTOR2( 0.7706985f, 0.1938848f), - D3DXVECTOR2( 0.7745505f, 0.1941475f), - D3DXVECTOR2( 0.7745505f, 0.1941475f), - D3DXVECTOR2( 0.7745505f, 0.1941475f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7720553f, 0.1966863f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7727995f, 0.1971240f), - D3DXVECTOR2( 0.7727995f, 0.1971240f), - D3DXVECTOR2( 0.7706985f, 0.1938848f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7738063f, 0.1937098f), - D3DXVECTOR2( 0.7738063f, 0.1937098f), - D3DXVECTOR2( 0.7738063f, 0.1937098f), - D3DXVECTOR2( 0.7744191f, 0.1960735f), - D3DXVECTOR2( 0.7744191f, 0.1960735f), - D3DXVECTOR2( 0.7744191f, 0.1960735f), - D3DXVECTOR2( 0.7752946f, 0.1945853f), - D3DXVECTOR2( 0.7752946f, 0.1945853f), - D3DXVECTOR2( 0.7752946f, 0.1945853f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7723181f, 0.1928343f), - D3DXVECTOR2( 0.7746819f, 0.1922215f), - D3DXVECTOR2( 0.7723181f, 0.1928343f), - D3DXVECTOR2( 0.7759073f, 0.1969490f), - D3DXVECTOR2( 0.7735436f, 0.1975617f), - D3DXVECTOR2( 0.7699544f, 0.1934471f), - D3DXVECTOR2( 0.7767828f, 0.1954608f), - D3DXVECTOR2( 0.7761701f, 0.1930971f), - D3DXVECTOR2( 0.7708299f, 0.1919588f), - D3DXVECTOR2( 0.7754260f, 0.1926593f), - D3DXVECTOR2( 0.7715740f, 0.1923965f), - D3DXVECTOR2( 0.7715740f, 0.1923965f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7727995f, 0.1971240f), - D3DXVECTOR2( 0.7727995f, 0.1971240f), - D3DXVECTOR2( 0.7706985f, 0.1938848f), - D3DXVECTOR2( 0.7706985f, 0.1938848f), - D3DXVECTOR2( 0.7706985f, 0.1938848f), - D3DXVECTOR2( 0.7745505f, 0.1941475f), - D3DXVECTOR2( 0.7745505f, 0.1941475f), - D3DXVECTOR2( 0.7745505f, 0.1941475f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7729309f, 0.1951980f), - D3DXVECTOR2( 0.7720553f, 0.1966863f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7714426f, 0.1943225f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7736750f, 0.1956358f), - D3DXVECTOR2( 0.7727995f, 0.1971240f), - D3DXVECTOR2( 0.7727995f, 0.1971240f), - D3DXVECTOR2( 0.7706985f, 0.1938848f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.0000000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.0000000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2031250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2031250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7968750f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.8125000f, 0.2656250f), - D3DXVECTOR2( 0.7968750f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.8125000f, 0.1250000f), - D3DXVECTOR2( 0.7812500f, 0.2656250f), - D3DXVECTOR2( 0.7812500f, 0.1250000f), - D3DXVECTOR2( 0.4375000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.3125000f), - D3DXVECTOR2( 0.4375000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.8125000f, 0.5000000f), - D3DXVECTOR2( 0.4375000f, 0.2500000f), - D3DXVECTOR2( 0.4375000f, 0.2500000f), - D3DXVECTOR2( 0.8125000f, 0.5000000f), - D3DXVECTOR2( 0.4375000f, 0.5000000f), - D3DXVECTOR2( 0.4375000f, 0.5000000f), - D3DXVECTOR2( 0.8125000f, 0.3125000f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.3125000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.2500000f), - D3DXVECTOR2( 0.2500000f, 0.2500000f), - D3DXVECTOR2( 0.6250000f, 0.5000000f), - D3DXVECTOR2( 0.4375000f, 0.5000000f), - D3DXVECTOR2( 0.2500000f, 0.5000000f), - D3DXVECTOR2( 0.6250000f, 0.3125000f), - D3DXVECTOR2( 0.4375000f, 0.2500000f), - D3DXVECTOR2( 0.6951088f, 0.4779186f), - D3DXVECTOR2( 0.6998297f, 0.4787053f), - D3DXVECTOR2( 0.6974693f, 0.4779186f), - D3DXVECTOR2( 0.6951088f, 0.4810658f), - D3DXVECTOR2( 0.6998297f, 0.4810658f), - D3DXVECTOR2( 0.6974693f, 0.4810658f), - D3DXVECTOR2( 0.6998297f, 0.4810658f), - D3DXVECTOR2( 0.6951088f, 0.4779186f), - D3DXVECTOR2( 0.6951088f, 0.4779186f), - D3DXVECTOR2( 0.6951088f, 0.4810658f), - D3DXVECTOR2( 0.6998297f, 0.4787053f), - D3DXVECTOR2( 0.6951088f, 0.4810658f), - D3DXVECTOR2( 0.6974693f, 0.4779186f), - D3DXVECTOR2( 0.6974693f, 0.4787053f), - D3DXVECTOR2( 0.6974693f, 0.4810658f), - D3DXVECTOR2( 0.6974693f, 0.4810658f), - D3DXVECTOR2( 0.6974693f, 0.4810658f), - D3DXVECTOR2( 0.6974693f, 0.4779186f), - D3DXVECTOR2( 0.6974693f, 0.4810658f), - D3DXVECTOR2( 0.6927484f, 0.4779186f), - D3DXVECTOR2( 0.6951088f, 0.4810658f), - D3DXVECTOR2( 0.6927484f, 0.4810658f), - D3DXVECTOR2( 0.6974693f, 0.4787053f), - D3DXVECTOR2( 0.6951088f, 0.4779186f), - D3DXVECTOR2( 0.7812500f, 0.0833334f), - D3DXVECTOR2( 0.8125000f, 0.0833334f), - D3DXVECTOR2( 0.7812500f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0416667f), - D3DXVECTOR2( 0.7812500f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0833334f), - D3DXVECTOR2( 0.7812500f, 0.0833334f), - D3DXVECTOR2( 0.7812500f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0416667f), - D3DXVECTOR2( 0.7812500f, 0.0833334f), - D3DXVECTOR2( 0.8125000f, 0.0833334f), - D3DXVECTOR2( 0.8125000f, 0.0416667f), - D3DXVECTOR2( 0.7812500f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0833334f), - D3DXVECTOR2( 0.7812500f, 0.0833334f), - D3DXVECTOR2( 0.8125000f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.8125000f, 0.1718750f), - D3DXVECTOR2( 0.7812500f, 0.1718750f), - D3DXVECTOR2( 0.7812500f, 0.1718750f), - D3DXVECTOR2( 0.8125000f, 0.1718750f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.8125000f, 0.2187500f), - D3DXVECTOR2( 0.8125000f, 0.1718750f), - D3DXVECTOR2( 0.7812500f, 0.1718750f), - D3DXVECTOR2( 0.8125000f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.1718750f), - D3DXVECTOR2( 0.8125000f, 0.1718750f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.8125000f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.8125000f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.1718750f), - D3DXVECTOR2( 0.8125000f, 0.1718750f), - D3DXVECTOR2( 0.8125000f, 0.1718750f), - D3DXVECTOR2( 0.7812500f, 0.1718750f), - D3DXVECTOR2( 0.8125000f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.1718750f), - D3DXVECTOR2( 0.8125000f, 0.1718750f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.8125000f, 0.2187500f), - D3DXVECTOR2( 0.8125000f, 0.1718750f), - D3DXVECTOR2( 0.7812500f, 0.1718750f), - D3DXVECTOR2( 0.8125000f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.2187500f), - D3DXVECTOR2( 0.7812500f, 0.0833334f), - D3DXVECTOR2( 0.8125000f, 0.0833334f), - D3DXVECTOR2( 0.7812500f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0416667f), - D3DXVECTOR2( 0.7812500f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0833334f), - D3DXVECTOR2( 0.7812500f, 0.0833334f), - D3DXVECTOR2( 0.7812500f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0416667f), - D3DXVECTOR2( 0.7812500f, 0.0833334f), - D3DXVECTOR2( 0.8125000f, 0.0833334f), - D3DXVECTOR2( 0.8125000f, 0.0416667f), - D3DXVECTOR2( 0.7812500f, 0.0416667f), - D3DXVECTOR2( 0.8125000f, 0.0833334f), - D3DXVECTOR2( 0.7812500f, 0.0833334f), -}; - -const D3DXVECTOR4 accessor_3[] = { - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), - D3DXVECTOR4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), -}; - -const D3DXVECTOR4 accessor_4[] = { - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65347.0000000f, 65333.0000000f, 65385.0000000f, 65535.0000000f), - D3DXVECTOR4(65347.0000000f, 65333.0000000f, 65385.0000000f, 65535.0000000f), - D3DXVECTOR4(65347.0000000f, 65333.0000000f, 65385.0000000f, 65535.0000000f), - D3DXVECTOR4(48822.0000000f, 48754.0000000f, 53781.0000000f, 65535.0000000f), - D3DXVECTOR4(48822.0000000f, 48754.0000000f, 53781.0000000f, 65535.0000000f), - D3DXVECTOR4(48822.0000000f, 48754.0000000f, 53781.0000000f, 65535.0000000f), - D3DXVECTOR4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), - D3DXVECTOR4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), - D3DXVECTOR4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), - D3DXVECTOR4(62584.0000000f, 62561.0000000f, 63372.0000000f, 65535.0000000f), - D3DXVECTOR4(62584.0000000f, 62561.0000000f, 63372.0000000f, 65535.0000000f), - D3DXVECTOR4(62584.0000000f, 62561.0000000f, 63372.0000000f, 65535.0000000f), - D3DXVECTOR4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), - D3DXVECTOR4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), - D3DXVECTOR4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), - D3DXVECTOR4( 4.0000000f, 65526.0000000f, 65526.0000000f, 65535.0000000f), - D3DXVECTOR4( 4.0000000f, 65526.0000000f, 65526.0000000f, 65535.0000000f), - D3DXVECTOR4( 4.0000000f, 65526.0000000f, 65526.0000000f, 65535.0000000f), - D3DXVECTOR4(65531.0000000f, 65517.0000000f, 65519.0000000f, 65535.0000000f), - D3DXVECTOR4(65531.0000000f, 65517.0000000f, 65519.0000000f, 65535.0000000f), - D3DXVECTOR4(65531.0000000f, 65517.0000000f, 65519.0000000f, 65535.0000000f), - D3DXVECTOR4(64996.0000000f, 64981.0000000f, 65128.0000000f, 65535.0000000f), - D3DXVECTOR4(64996.0000000f, 64981.0000000f, 65128.0000000f, 65535.0000000f), - D3DXVECTOR4(64996.0000000f, 64981.0000000f, 65128.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), -}; - -const D3DXVECTOR4 accessor_5[] = { - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), - D3DXVECTOR4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), -}; - -const D3DXVECTOR4 accessor_6[] = { - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 20.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 20.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 20.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 11.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 11.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 11.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(22.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(13.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 11.0000000f, 5.0000000f, 10.0000000f), - D3DXVECTOR4(12.0000000f, 11.0000000f, 5.0000000f, 10.0000000f), - D3DXVECTOR4(11.0000000f, 10.0000000f, 5.0000000f, 0.0000000f), - D3DXVECTOR4(11.0000000f, 10.0000000f, 5.0000000f, 0.0000000f), - D3DXVECTOR4(11.0000000f, 10.0000000f, 5.0000000f, 0.0000000f), - D3DXVECTOR4(11.0000000f, 10.0000000f, 5.0000000f, 0.0000000f), - D3DXVECTOR4(11.0000000f, 12.0000000f, 5.0000000f, 10.0000000f), - D3DXVECTOR4(11.0000000f, 12.0000000f, 5.0000000f, 10.0000000f), - D3DXVECTOR4(10.0000000f, 11.0000000f, 5.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 11.0000000f, 5.0000000f, 0.0000000f), - D3DXVECTOR4(11.0000000f, 12.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(11.0000000f, 12.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 11.0000000f, 5.0000000f, 0.0000000f), - D3DXVECTOR4(10.0000000f, 11.0000000f, 5.0000000f, 0.0000000f), - D3DXVECTOR4(12.0000000f, 11.0000000f, 5.0000000f, 10.0000000f), - D3DXVECTOR4(12.0000000f, 11.0000000f, 5.0000000f, 10.0000000f), - D3DXVECTOR4(14.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 20.0000000f, 16.0000000f, 19.0000000f), - D3DXVECTOR4(21.0000000f, 20.0000000f, 16.0000000f, 19.0000000f), - D3DXVECTOR4(20.0000000f, 19.0000000f, 16.0000000f, 0.0000000f), - D3DXVECTOR4(20.0000000f, 19.0000000f, 16.0000000f, 0.0000000f), - D3DXVECTOR4(20.0000000f, 19.0000000f, 16.0000000f, 0.0000000f), - D3DXVECTOR4(20.0000000f, 19.0000000f, 16.0000000f, 0.0000000f), - D3DXVECTOR4(20.0000000f, 21.0000000f, 16.0000000f, 19.0000000f), - D3DXVECTOR4(20.0000000f, 21.0000000f, 16.0000000f, 19.0000000f), - D3DXVECTOR4(19.0000000f, 20.0000000f, 16.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 20.0000000f, 16.0000000f, 0.0000000f), - D3DXVECTOR4(20.0000000f, 21.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(20.0000000f, 21.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 20.0000000f, 16.0000000f, 0.0000000f), - D3DXVECTOR4(19.0000000f, 20.0000000f, 16.0000000f, 0.0000000f), - D3DXVECTOR4(21.0000000f, 20.0000000f, 16.0000000f, 19.0000000f), - D3DXVECTOR4(21.0000000f, 20.0000000f, 16.0000000f, 19.0000000f), -}; - -const D3DXVECTOR4 accessor_7[] = { - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9696393f, 0.0303607f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9696393f, 0.0303607f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9696393f, 0.0303607f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9975708f, 0.0024292f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9975708f, 0.0024292f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9975708f, 0.0024292f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9827832f, 0.0172168f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9827832f, 0.0172168f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9827832f, 0.0172168f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997147f, 0.0002853f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997147f, 0.0002853f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997147f, 0.0002853f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9986997f, 0.0013003f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9986997f, 0.0013003f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9986997f, 0.0013003f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9971964f, 0.0028036f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9971964f, 0.0028036f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9971964f, 0.0028036f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9648562f, 0.0351438f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9648562f, 0.0351438f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9648562f, 0.0351438f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9995227f, 0.0004773f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9995227f, 0.0004773f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9995227f, 0.0004773f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9994707f, 0.0005293f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9994707f, 0.0005293f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9994707f, 0.0005293f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), - D3DXVECTOR4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), - D3DXVECTOR4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), - D3DXVECTOR4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), - D3DXVECTOR4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), - D3DXVECTOR4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), - D3DXVECTOR4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), - D3DXVECTOR4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), - D3DXVECTOR4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.8793960f, 0.1143886f, 0.0041458f, 0.0020696f), - D3DXVECTOR4( 0.8793960f, 0.1143886f, 0.0041458f, 0.0020696f), - D3DXVECTOR4( 0.6280711f, 0.3581006f, 0.0138284f, 0.0000000f), - D3DXVECTOR4( 0.6280711f, 0.3581006f, 0.0138284f, 0.0000000f), - D3DXVECTOR4( 0.7455971f, 0.1687225f, 0.0856804f, 0.0000000f), - D3DXVECTOR4( 0.7455971f, 0.1687225f, 0.0856804f, 0.0000000f), - D3DXVECTOR4( 0.7124659f, 0.2758425f, 0.0078146f, 0.0038770f), - D3DXVECTOR4( 0.7124659f, 0.2758425f, 0.0078146f, 0.0038770f), - D3DXVECTOR4( 0.7838106f, 0.1889184f, 0.0272710f, 0.0000000f), - D3DXVECTOR4( 0.7838106f, 0.1889184f, 0.0272710f, 0.0000000f), - D3DXVECTOR4( 0.5571451f, 0.4428549f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5571451f, 0.4428549f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.6358581f, 0.3572480f, 0.0068939f, 0.0000000f), - D3DXVECTOR4( 0.6358581f, 0.3572480f, 0.0068939f, 0.0000000f), - D3DXVECTOR4( 0.7581828f, 0.2399969f, 0.0012200f, 0.0006002f), - D3DXVECTOR4( 0.7581828f, 0.2399969f, 0.0012200f, 0.0006002f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9619964f, 0.0380036f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9619964f, 0.0380036f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9689394f, 0.0310606f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9689394f, 0.0310606f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931825f, 0.0068175f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931825f, 0.0068175f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9847520f, 0.0152480f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9847520f, 0.0152480f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9990904f, 0.0009096f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9990904f, 0.0009096f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9590796f, 0.0409204f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9590796f, 0.0409204f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9945605f, 0.0054395f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9945605f, 0.0054395f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9619964f, 0.0380036f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9619964f, 0.0380036f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9689394f, 0.0310606f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9689394f, 0.0310606f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931825f, 0.0068175f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9931825f, 0.0068175f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9847520f, 0.0152480f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9847520f, 0.0152480f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9990904f, 0.0009096f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9990904f, 0.0009096f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9590796f, 0.0409204f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9590796f, 0.0409204f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9945605f, 0.0054395f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.9945605f, 0.0054395f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.8793960f, 0.1143886f, 0.0041458f, 0.0020696f), - D3DXVECTOR4( 0.8793960f, 0.1143886f, 0.0041458f, 0.0020696f), - D3DXVECTOR4( 0.6280711f, 0.3581006f, 0.0138284f, 0.0000000f), - D3DXVECTOR4( 0.6280711f, 0.3581006f, 0.0138284f, 0.0000000f), - D3DXVECTOR4( 0.7455971f, 0.1687225f, 0.0856804f, 0.0000000f), - D3DXVECTOR4( 0.7455971f, 0.1687225f, 0.0856804f, 0.0000000f), - D3DXVECTOR4( 0.7124659f, 0.2758425f, 0.0078146f, 0.0038770f), - D3DXVECTOR4( 0.7124659f, 0.2758425f, 0.0078146f, 0.0038770f), - D3DXVECTOR4( 0.7838106f, 0.1889184f, 0.0272710f, 0.0000000f), - D3DXVECTOR4( 0.7838106f, 0.1889184f, 0.0272710f, 0.0000000f), - D3DXVECTOR4( 0.5571451f, 0.4428549f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.5571451f, 0.4428549f, 0.0000000f, 0.0000000f), - D3DXVECTOR4( 0.6358581f, 0.3572339f, 0.0069080f, 0.0000000f), - D3DXVECTOR4( 0.6358581f, 0.3572339f, 0.0069080f, 0.0000000f), - D3DXVECTOR4( 0.7581828f, 0.2399969f, 0.0012203f, 0.0005999f), - D3DXVECTOR4( 0.7581828f, 0.2399969f, 0.0012203f, 0.0005999f), -}; - -const DWORD accessor_8[] = { - 0, - 1, - 3, - 0, - 3, - 2, - 5, - 17, - 23, - 5, - 23, - 11, - 13, - 10, - 22, - 13, - 22, - 25, - 27, - 24, - 18, - 27, - 18, - 21, - 20, - 8, - 14, - 20, - 14, - 26, - 9, - 6, - 12, - 9, - 12, - 15, - 19, - 16, - 4, - 19, - 4, - 7, - 30, - 41, - 47, - 30, - 47, - 36, - 37, - 34, - 46, - 37, - 46, - 49, - 51, - 48, - 42, - 51, - 42, - 45, - 44, - 32, - 38, - 44, - 38, - 50, - 33, - 29, - 35, - 33, - 35, - 39, - 43, - 40, - 28, - 43, - 28, - 31, - 53, - 65, - 71, - 53, - 71, - 59, - 61, - 58, - 70, - 61, - 70, - 73, - 75, - 72, - 66, - 75, - 66, - 69, - 68, - 56, - 62, - 68, - 62, - 74, - 57, - 54, - 60, - 57, - 60, - 63, - 67, - 64, - 52, - 67, - 52, - 55, - 77, - 83, - 95, - 77, - 95, - 89, - 85, - 97, - 94, - 85, - 94, - 82, - 99, - 93, - 90, - 99, - 90, - 96, - 92, - 98, - 86, - 92, - 86, - 80, - 81, - 87, - 84, - 81, - 84, - 78, - 91, - 79, - 76, - 91, - 76, - 88, - 100, - 112, - 118, - 100, - 118, - 106, - 109, - 107, - 119, - 109, - 119, - 121, - 123, - 120, - 114, - 123, - 114, - 117, - 116, - 104, - 110, - 116, - 110, - 122, - 105, - 102, - 108, - 105, - 108, - 111, - 115, - 113, - 101, - 115, - 101, - 103, - 124, - 130, - 142, - 124, - 142, - 136, - 133, - 145, - 143, - 133, - 143, - 131, - 147, - 141, - 138, - 147, - 138, - 144, - 140, - 146, - 134, - 140, - 134, - 128, - 129, - 135, - 132, - 129, - 132, - 126, - 139, - 127, - 125, - 139, - 125, - 137, - 177, - 182, - 169, - 177, - 169, - 156, - 158, - 155, - 168, - 158, - 168, - 171, - 175, - 185, - 164, - 175, - 164, - 167, - 174, - 187, - 159, - 174, - 159, - 172, - 189, - 181, - 157, - 189, - 157, - 160, - 165, - 161, - 148, - 165, - 148, - 152, - 154, - 151, - 180, - 154, - 180, - 188, - 166, - 153, - 187, - 166, - 187, - 174, - 173, - 170, - 186, - 173, - 186, - 176, - 149, - 162, - 194, - 149, - 194, - 191, - 190, - 193, - 199, - 190, - 199, - 196, - 183, - 178, - 197, - 183, - 197, - 200, - 179, - 150, - 192, - 179, - 192, - 198, - 163, - 184, - 201, - 163, - 201, - 195, - 231, - 210, - 223, - 231, - 223, - 236, - 212, - 225, - 222, - 212, - 222, - 209, - 229, - 221, - 218, - 229, - 218, - 239, - 228, - 226, - 213, - 228, - 213, - 241, - 243, - 214, - 211, - 243, - 211, - 235, - 219, - 206, - 202, - 219, - 202, - 215, - 208, - 242, - 234, - 208, - 234, - 205, - 220, - 228, - 241, - 220, - 241, - 207, - 227, - 230, - 240, - 227, - 240, - 224, - 203, - 245, - 248, - 203, - 248, - 216, - 244, - 250, - 253, - 244, - 253, - 247, - 237, - 254, - 251, - 237, - 251, - 232, - 233, - 252, - 246, - 233, - 246, - 204, - 217, - 249, - 255, - 217, - 255, - 238, - 451, - 261, - 267, - 451, - 267, - 453, - 452, - 265, - 277, - 452, - 277, - 456, - 457, - 279, - 273, - 457, - 273, - 461, - 460, - 271, - 259, - 460, - 259, - 450, - 263, - 275, - 269, - 263, - 269, - 257, - 278, - 266, - 260, - 278, - 260, - 272, - 403, - 405, - 291, - 403, - 291, - 285, - 404, - 408, - 301, - 404, - 301, - 289, - 409, - 413, - 297, - 409, - 297, - 303, - 412, - 402, - 283, - 412, - 283, - 295, - 287, - 281, - 293, - 287, - 293, - 299, - 302, - 296, - 284, - 302, - 284, - 290, - 435, - 309, - 315, - 435, - 315, - 437, - 436, - 313, - 325, - 436, - 325, - 440, - 441, - 327, - 321, - 441, - 321, - 445, - 444, - 319, - 307, - 444, - 307, - 434, - 311, - 323, - 317, - 311, - 317, - 305, - 326, - 314, - 308, - 326, - 308, - 320, - 419, - 421, - 339, - 419, - 339, - 333, - 420, - 424, - 349, - 420, - 349, - 337, - 425, - 429, - 345, - 425, - 345, - 351, - 428, - 418, - 331, - 428, - 331, - 343, - 335, - 329, - 341, - 335, - 341, - 347, - 350, - 344, - 332, - 350, - 332, - 338, - 354, - 365, - 371, - 354, - 371, - 360, - 361, - 358, - 370, - 361, - 370, - 373, - 375, - 372, - 366, - 375, - 366, - 369, - 368, - 357, - 363, - 368, - 363, - 374, - 356, - 353, - 359, - 356, - 359, - 362, - 367, - 364, - 352, - 367, - 352, - 355, - 377, - 389, - 394, - 377, - 394, - 382, - 385, - 383, - 395, - 385, - 395, - 397, - 399, - 396, - 390, - 399, - 390, - 393, - 392, - 380, - 386, - 392, - 386, - 398, - 381, - 378, - 384, - 381, - 384, - 387, - 391, - 388, - 376, - 391, - 376, - 379, - 292, - 280, - 400, - 292, - 400, - 414, - 414, - 400, - 402, - 414, - 402, - 412, - 300, - 294, - 415, - 300, - 415, - 411, - 411, - 415, - 413, - 411, - 413, - 409, - 286, - 298, - 410, - 286, - 410, - 406, - 406, - 410, - 408, - 406, - 408, - 404, - 282, - 288, - 407, - 282, - 407, - 401, - 401, - 407, - 405, - 401, - 405, - 403, - 340, - 328, - 416, - 340, - 416, - 430, - 430, - 416, - 418, - 430, - 418, - 428, - 348, - 342, - 431, - 348, - 431, - 427, - 427, - 431, - 429, - 427, - 429, - 425, - 334, - 346, - 426, - 334, - 426, - 422, - 422, - 426, - 424, - 422, - 424, - 420, - 330, - 336, - 423, - 330, - 423, - 417, - 417, - 423, - 421, - 417, - 421, - 419, - 316, - 446, - 432, - 316, - 432, - 304, - 446, - 444, - 434, - 446, - 434, - 432, - 324, - 443, - 447, - 324, - 447, - 318, - 443, - 441, - 445, - 443, - 445, - 447, - 310, - 438, - 442, - 310, - 442, - 322, - 438, - 436, - 440, - 438, - 440, - 442, - 306, - 433, - 439, - 306, - 439, - 312, - 433, - 435, - 437, - 433, - 437, - 439, - 268, - 462, - 448, - 268, - 448, - 256, - 462, - 460, - 450, - 462, - 450, - 448, - 276, - 459, - 463, - 276, - 463, - 270, - 459, - 457, - 461, - 459, - 461, - 463, - 262, - 454, - 458, - 262, - 458, - 274, - 454, - 452, - 456, - 454, - 456, - 458, - 258, - 449, - 455, - 258, - 455, - 264, - 449, - 451, - 453, - 449, - 453, - 455, -}; - -const D3DXMATRIX accessor_9[] = { - D3DXMATRIX( 1.0000000f, -0.0000000f, -0.0000000f, -0.0000000f, - -0.0000000f, 1.0000000f, 0.0000001f, 0.0000000f, - 0.0000000f, -0.0000001f, 1.0000000f, -0.0000000f, - 0.0020865f, -0.6493472f, 0.0044682f, 1.0000000f), - D3DXMATRIX( 0.9999925f, 0.0038494f, 0.0002189f, -0.0000000f, - 0.0038556f, -0.9983662f, -0.0570068f, 0.0000000f, - -0.0000005f, 0.0570072f, -0.9983711f, -0.0000000f, - 0.1011884f, 0.6297937f, 0.0216967f, 1.0000000f), - D3DXMATRIX( 0.9999585f, -0.0091070f, 0.0003901f, -0.0000000f, - -0.0091151f, -0.9993415f, 0.0351207f, 0.0000000f, - 0.0000704f, -0.0351225f, -0.9993804f, -0.0000000f, - 0.1061165f, 0.3790198f, -0.0133131f, 1.0000000f), - D3DXMATRIX( 1.0000000f, -0.0000000f, 0.0000000f, -0.0000000f, - -0.0000000f, 1.0000000f, -0.0000001f, 0.0000000f, - -0.0000000f, 0.0000001f, 1.0000000f, -0.0000000f, - 0.0020865f, -0.7840567f, 0.0044684f, 1.0000000f), - D3DXMATRIX( 0.9999975f, 0.0022573f, 0.0000000f, -0.0000000f, - -0.0022573f, 0.9999975f, -0.0000001f, 0.0000000f, - -0.0000000f, 0.0000001f, 1.0000000f, -0.0000000f, - 0.0041084f, -0.8957252f, 0.0044684f, 1.0000000f), - D3DXMATRIX( 0.9999950f, 0.0031200f, -0.0002015f, -0.0000000f, - 0.0031265f, -0.9978876f, 0.0648890f, 0.0000000f, - 0.0000006f, -0.0648893f, -0.9978877f, -0.0000000f, - 0.2384893f, 1.1499825f, -0.0747788f, 1.0000000f), - D3DXMATRIX( 1.0000000f, -0.0000001f, 0.0000014f, -0.0000000f, - 0.0000000f, -0.9978564f, -0.0654442f, 0.0000000f, - 0.0000007f, 0.0654436f, -0.9978516f, -0.0000000f, - 0.2411296f, 0.8435937f, 0.0352844f, 1.0000000f), - D3DXMATRIX( 1.0000000f, -0.0000000f, 0.0000014f, -0.0000000f, - -0.0000000f, -1.0000001f, -0.0000000f, 0.0000000f, - 0.0000007f, -0.0000003f, -0.9999953f, -0.0000000f, - 0.2411296f, 0.5391477f, 0.0000003f, 1.0000000f), - D3DXMATRIX( 1.0000001f, 0.0000000f, -0.0000000f, -0.0000000f, - 0.0000000f, 1.0000001f, 0.0000000f, -0.0000000f, - 0.0000000f, -0.0000000f, 1.0000000f, -0.0000000f, - -0.0000000f, -1.2420585f, -0.0000000f, 1.0000000f), - D3DXMATRIX( 1.0000001f, 0.0000000f, 0.0000000f, -0.0000000f, - -0.0000000f, 0.9582973f, 0.2857734f, 0.0000000f, - 0.0000000f, -0.2857733f, 0.9582972f, -0.0000000f, - -0.2488541f, -1.3979810f, -0.3699030f, 1.0000000f), - D3DXMATRIX(-1.0000001f, 0.0000001f, 0.0000000f, -0.0000000f, - -0.0000001f, -0.9963848f, -0.0849537f, 0.0000000f, - 0.0000007f, -0.0849559f, 0.9963719f, -0.0000000f, - -0.2411295f, 1.1474965f, 0.0978378f, 1.0000000f), - D3DXMATRIX(-1.0000001f, -0.0000000f, -0.0000001f, -0.0000000f, - -0.0000000f, -0.9980612f, -0.0622333f, 0.0000000f, - 0.0000006f, -0.0622358f, 0.9980485f, -0.0000000f, - -0.2411295f, 1.0469925f, 0.0740057f, 1.0000000f), - D3DXMATRIX(-1.0000001f, -0.0000001f, -0.0000862f, -0.0000000f, - 0.0000029f, -0.9994135f, -0.0342350f, 0.0000000f, - -0.0000855f, -0.0342378f, 0.9994007f, 0.0000000f, - -0.2411336f, 0.9460898f, 0.0474864f, 1.0000000f), - D3DXMATRIX(-0.9999998f, 0.0000001f, -0.0007542f, 0.0000000f, - -0.0000017f, -0.9999976f, 0.0020114f, 0.0000000f, - -0.0007535f, 0.0020081f, 0.9999847f, -0.0000000f, - -0.2411421f, 0.8438913f, 0.0167495f, 1.0000000f), - D3DXMATRIX(-1.0000001f, -0.0000001f, 0.0000955f, -0.0000000f, - 0.0000050f, -0.9986384f, 0.0521609f, -0.0000000f, - 0.0000961f, 0.0521570f, 0.9986261f, -0.0000000f, - -0.2411315f, 0.7425256f, -0.0203104f, 1.0000000f), - D3DXMATRIX(-0.9999996f, -0.0000002f, -0.0008393f, 0.0000000f, - -0.0001079f, -0.9916946f, 0.1286122f, 0.0000000f, - -0.0008317f, 0.1286074f, 0.9916822f, -0.0000000f, - -0.2410713f, 0.6366479f, -0.0695772f, 1.0000000f), - D3DXMATRIX( 0.9999952f, -0.0031200f, 0.0002028f, -0.0000000f, - -0.0031266f, -0.9978875f, 0.0648890f, 0.0000000f, - 0.0000000f, -0.0648893f, -0.9978880f, -0.0000000f, - -0.2384892f, 1.1499823f, -0.0747791f, 1.0000000f), - D3DXMATRIX( 1.0000001f, 0.0000000f, -0.0000001f, -0.0000000f, - -0.0000000f, -0.9978563f, -0.0654443f, 0.0000000f, - -0.0000000f, 0.0654437f, -0.9978519f, -0.0000000f, - -0.2411296f, 0.8435934f, 0.0352841f, 1.0000000f), - D3DXMATRIX( 1.0000001f, 0.0000000f, -0.0000001f, -0.0000000f, - -0.0000000f, -1.0000000f, -0.0000000f, 0.0000000f, - -0.0000000f, -0.0000003f, -0.9999956f, 0.0000000f, - -0.2411296f, 0.5391475f, -0.0000000f, 1.0000000f), - D3DXMATRIX(-1.0000001f, 0.0000001f, -0.0000021f, -0.0000000f, - 0.0000000f, -0.9963848f, -0.0849539f, 0.0000000f, - -0.0000014f, -0.0849561f, 0.9963719f, 0.0000000f, - 0.2411296f, 1.1474965f, 0.0978385f, 1.0000000f), - D3DXMATRIX(-1.0000001f, -0.0000000f, -0.0000018f, -0.0000000f, - 0.0000001f, -0.9980614f, -0.0622338f, 0.0000000f, - -0.0000011f, -0.0622363f, 0.9980485f, 0.0000000f, - 0.2411295f, 1.0469943f, 0.0740067f, 1.0000000f), - D3DXMATRIX(-1.0000001f, -0.0000001f, 0.0000870f, -0.0000000f, - -0.0000030f, -0.9994135f, -0.0342354f, 0.0000000f, - 0.0000876f, -0.0342383f, 0.9994008f, -0.0000000f, - 0.2411337f, 0.9460909f, 0.0474867f, 1.0000000f), - D3DXMATRIX(-0.9999999f, -0.0000002f, 0.0007523f, -0.0000000f, - 0.0000017f, -0.9999979f, 0.0020110f, 0.0000000f, - 0.0007530f, 0.0020077f, 0.9999850f, -0.0000000f, - 0.2411422f, 0.8438926f, 0.0167503f, 1.0000000f), - D3DXMATRIX(-1.0000001f, -0.0000001f, -0.0000976f, 0.0000000f, - -0.0000051f, -0.9986385f, 0.0521605f, 0.0000000f, - -0.0000968f, 0.0521566f, 0.9986261f, -0.0000000f, - 0.2411316f, 0.7425266f, -0.0203097f, 1.0000000f), - D3DXMATRIX(-0.9999998f, -0.0000000f, 0.0008371f, -0.0000000f, - 0.0001073f, -0.9917392f, 0.1282703f, -0.0000000f, - 0.0008309f, 0.1282655f, 0.9917267f, -0.0000000f, - 0.2410717f, 0.6366724f, -0.0693572f, 1.0000000f), - D3DXMATRIX( 0.9999633f, -0.0042382f, 0.0074449f, -0.0000000f, - -0.0038072f, -0.9983676f, -0.0569888f, 0.0000000f, - 0.0076739f, 0.0569584f, -0.9983445f, -0.0000000f, - -0.1011066f, 0.6298342f, 0.0208916f, 1.0000000f), - D3DXMATRIX( 0.9999292f, 0.0094264f, 0.0072769f, -0.0000000f, - 0.0091650f, -0.9993400f, 0.0351520f, 0.0000000f, - 0.0076031f, -0.0350826f, -0.9993530f, -0.0000000f, - -0.1060352f, 0.3789865f, -0.0141235f, 1.0000000f), - D3DXMATRIX(-0.0000005f, -0.0000000f, -1.0000001f, 0.0000000f, - 1.0000001f, 0.0000000f, -0.0000005f, 0.0000000f, - 0.0000000f, -1.0000002f, 0.0000000f, -0.0000000f, - -0.5391478f, -0.0000001f, -0.2411293f, 1.0000000f), - D3DXMATRIX( 1.0000000f, -0.0000000f, 0.0000000f, -0.0000000f, - -0.0000000f, 0.0000000f, 1.0000000f, 0.0000000f, - 0.0000000f, -1.0000000f, 0.0000000f, -0.0000000f, - 0.2411295f, -0.0870393f, -0.8440942f, 1.0000000f), - D3DXMATRIX( 1.0000000f, -0.0000000f, 0.0000010f, -0.0000000f, - 0.0000010f, -0.0039314f, -0.9999923f, 0.0000000f, - 0.0000000f, 0.9999923f, -0.0039314f, -0.0000000f, - 0.1054246f, 0.0106810f, -0.0000419f, 1.0000000f), - D3DXMATRIX(-0.0475822f, -0.0000003f, -0.9988677f, -0.0000000f, - 0.7671170f, 0.6404657f, -0.0365425f, -0.0000000f, - 0.6397402f, -0.7679867f, -0.0304748f, -0.0000000f, - -0.0567143f, 0.0626259f, -0.1028425f, 1.0000000f), - D3DXMATRIX(-0.0475822f, -0.0000002f, -0.9988677f, -0.0000000f, - 0.9988673f, -0.0000001f, -0.0475821f, 0.0000000f, - -0.0000000f, -0.9999999f, -0.0000000f, 0.0000000f, - -0.0812300f, -0.0106811f, -0.1016747f, 1.0000000f), - D3DXMATRIX( 0.9999926f, -0.0000000f, 0.0038554f, -0.0000000f, - 0.0038554f, 0.0000000f, -0.9999926f, 0.0000000f, - -0.0000000f, 1.0000000f, 0.0000000f, -0.0000000f, - 0.1011885f, -0.2318209f, 0.3805980f, 1.0000000f), - D3DXMATRIX(-0.0000005f, -0.0000000f, 1.0000001f, -0.0000000f, - -1.0000001f, -0.0000000f, -0.0000005f, 0.0000000f, - 0.0000000f, -1.0000002f, 0.0000000f, -0.0000000f, - 0.5391478f, -0.0000001f, -0.2411293f, 1.0000000f), - D3DXMATRIX( 1.0000000f, -0.0000000f, 0.0000000f, -0.0000000f, - -0.0000000f, 0.0000000f, 1.0000000f, 0.0000000f, - 0.0000000f, -1.0000000f, 0.0000000f, -0.0000000f, - -0.2411295f, -0.0870393f, -0.8440942f, 1.0000000f), - D3DXMATRIX( 1.0000000f, -0.0000000f, -0.0000010f, -0.0000000f, - -0.0000010f, -0.0039314f, -0.9999923f, 0.0000000f, - -0.0000000f, 0.9999923f, -0.0039314f, -0.0000000f, - -0.1054246f, 0.0106810f, -0.0000419f, 1.0000000f), - D3DXMATRIX(-0.0475822f, 0.0000003f, 0.9988677f, -0.0000000f, - -0.7671170f, 0.6404657f, -0.0365425f, 0.0000000f, - -0.6397402f, -0.7679867f, -0.0304748f, 0.0000000f, - 0.0567143f, 0.0626259f, -0.1028425f, 1.0000000f), - D3DXMATRIX(-0.0475822f, 0.0000002f, 0.9988677f, -0.0000000f, - -0.9988673f, -0.0000001f, -0.0475821f, 0.0000000f, - 0.0000000f, -0.9999999f, -0.0000000f, -0.0000000f, - 0.0812300f, -0.0106811f, -0.1016747f, 1.0000000f), - D3DXMATRIX( 0.9999926f, -0.0000000f, -0.0038554f, -0.0000000f, - -0.0038554f, 0.0000000f, -0.9999926f, 0.0000000f, - 0.0000000f, 1.0000000f, 0.0000000f, -0.0000000f, - -0.1011885f, -0.2318209f, 0.3805980f, 1.0000000f), -}; - -const float accessor_10[] = { - 0.0, - 0.0416666679084301, - 0.0833333358168602, - 0.125, - 0.1666666716337204, - 0.2083333283662796, - 0.25, - 0.2916666567325592, - 0.3333333432674408, - 0.375, - 0.4166666567325592, - 0.4583333432674408, - 0.5, - 0.5416666865348816, - 0.5833333134651184, - 0.625, - 0.6666666865348816, - 0.7083333134651184, - 0.75, - 0.7916666865348816, - 0.8333333134651184, - 0.875, - 0.9166666865348816, - 0.9583333134651184, - 1.0, - 1.0416666269302368, - 1.0833333730697632, - 1.125, - 1.1666666269302368, - 1.2083333730697632, - 1.25, - 1.2916666269302368, - 1.3333333730697632, - 1.375, - 1.4166666269302368, - 1.4583333730697632, - 1.5, - 1.5416666269302368, - 1.5833333730697632, - 1.625, - 1.6666666269302368, - 1.7083333730697632, - 1.75, - 1.7916666269302368, - 1.8333333730697632, - 1.875, - 1.9166666269302368, - 1.9583333730697632, - 2.0, - 2.0416667461395264, - 2.0833332538604736, - 2.125, - 2.1666667461395264, - 2.2083332538604736, - 2.25, - 2.2916667461395264, - 2.3333332538604736, - 2.375, - 2.4166667461395264, - 2.4583332538604736, - 2.5, - 2.5416667461395264, - 2.5833332538604736, - 2.625, - 2.6666667461395264, - 2.7083332538604736, - 2.75, - 2.7916667461395264, - 2.8333332538604736, - 2.875, - 2.9166667461395264, - 2.9583332538604736, - 3.0, - 3.0416667461395264, - 3.0833332538604736, - 3.125, - 3.1666667461395264, - 3.2083332538604736, - 3.25, - 3.2916667461395264, - 3.3333332538604736, - 3.375, - 3.4166667461395264, - 3.4583332538604736, - 3.5, - 3.5416667461395264, - 3.5833332538604736, - 3.625, - 3.6666667461395264, - 3.7083332538604736, - 3.75, - 3.7916667461395264, - 3.8333332538604736, - 3.875, - 3.9166667461395264, - 3.9583332538604736, - 4.0, - 4.041666507720947, - 4.083333492279053, - 4.125, - 4.166666507720947, -}; - -const D3DXVECTOR3 accessor_11[] = { - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0961419f), - D3DXVECTOR3(-0.0020865f, 0.0060252f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.1826500f, -0.0797067f), - D3DXVECTOR3(-0.0020865f, 0.2834958f, -0.0406729f), - D3DXVECTOR3(-0.0020865f, 0.3014458f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.2924922f, 0.1602180f), - D3DXVECTOR3(-0.0020865f, 0.2529734f, 0.2644956f), - D3DXVECTOR3(-0.0020865f, 0.1544827f, 0.3028520f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.3114135f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.1534726f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_12[] = { - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), - D3DXVECTOR4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), - D3DXVECTOR4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), - D3DXVECTOR4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), - D3DXVECTOR4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_13[] = { - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8206826f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937378f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9667931f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449012f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425745f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), -}; - -const float accessor_14[] = { - 0.0, - 4.166666507720947, -}; - -const D3DXVECTOR3 accessor_15[] = { - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), -}; - -const D3DXVECTOR4 accessor_16[] = { - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), -}; - -const D3DXVECTOR3 accessor_17[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_18[] = { - D3DXVECTOR3( 0.0000000f, 0.2498145f, -0.0000010f), - D3DXVECTOR3(-0.0000000f, 0.2498139f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_19[] = { - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), -}; - -const D3DXVECTOR3 accessor_20[] = { - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_21[] = { - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), - D3DXVECTOR3( 0.0000000f, 0.1042613f, -0.0238797f), - D3DXVECTOR3( 0.0000000f, 0.1135390f, -0.0116829f), - D3DXVECTOR3( 0.0000000f, 0.1261039f, 0.0005138f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1518535f, 0.0059257f), - D3DXVECTOR3(-0.0000000f, 0.1617508f, 0.0050005f), - D3DXVECTOR3( 0.0000000f, 0.1683611f, 0.0024894f), - D3DXVECTOR3( 0.0000000f, 0.1707681f, -0.0024006f), - D3DXVECTOR3( 0.0000000f, 0.1357049f, -0.0174982f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, -0.0294237f), -}; - -const D3DXVECTOR4 accessor_22[] = { - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), - D3DXVECTOR4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), - D3DXVECTOR4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), - D3DXVECTOR4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), - D3DXVECTOR4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), - D3DXVECTOR4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), - D3DXVECTOR4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), - D3DXVECTOR4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), - D3DXVECTOR4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), - D3DXVECTOR4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), -}; - -const D3DXVECTOR3 accessor_23[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_24[] = { - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000000f, 0.1075576f, 0.0008690f), - D3DXVECTOR3(-0.0000000f, 0.1139177f, 0.0012693f), - D3DXVECTOR3( 0.0000000f, 0.1219529f, 0.0018200f), - D3DXVECTOR3(-0.0000000f, 0.1295645f, 0.0024259f), - D3DXVECTOR3(-0.0000000f, 0.1350743f, 0.0029986f), - D3DXVECTOR3( 0.0000000f, 0.1384857f, 0.0034760f), - D3DXVECTOR3(-0.0000000f, 0.1402218f, 0.0038029f), - D3DXVECTOR3( 0.0000000f, 0.1407065f, 0.0039242f), - D3DXVECTOR3(-0.0000000f, 0.1228388f, 0.0023193f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), -}; - -const D3DXVECTOR4 accessor_25[] = { - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), - D3DXVECTOR4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), - D3DXVECTOR4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), - D3DXVECTOR4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), - D3DXVECTOR4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), - D3DXVECTOR4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), - D3DXVECTOR4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), - D3DXVECTOR4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), - D3DXVECTOR4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), - D3DXVECTOR4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), - D3DXVECTOR4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), -}; - -const D3DXVECTOR3 accessor_26[] = { - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_27[] = { - D3DXVECTOR3(-0.0005524f, 0.0688296f, -0.0213631f), - D3DXVECTOR3(-0.0005524f, 0.0688296f, -0.0213631f), -}; - -const D3DXVECTOR4 accessor_28[] = { - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), -}; - -const D3DXVECTOR3 accessor_29[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_30[] = { - D3DXVECTOR3( 0.0000000f, 0.3082166f, -0.0000008f), - D3DXVECTOR3( 0.0000000f, 0.3082193f, 0.0000003f), -}; - -const D3DXVECTOR4 accessor_31[] = { - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_32[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_33[] = { - D3DXVECTOR3(-0.0000000f, 0.3056026f, 0.0000008f), - D3DXVECTOR3(-0.0000000f, 0.3056034f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_34[] = { - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), -}; - -const D3DXVECTOR3 accessor_35[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_36[] = { - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), - D3DXVECTOR3( 0.0015116f, 0.2546579f, 0.0182690f), - D3DXVECTOR3( 0.0014504f, 0.2816091f, 0.0045884f), - D3DXVECTOR3( 0.0013892f, 0.3085600f, -0.0090922f), - D3DXVECTOR3( 0.0013614f, 0.3208103f, -0.0153106f), - D3DXVECTOR3( 0.0013621f, 0.3205253f, -0.0145414f), - D3DXVECTOR3( 0.0013668f, 0.3185293f, -0.0116041f), - D3DXVECTOR3( 0.0013792f, 0.3131109f, -0.0055542f), - D3DXVECTOR3( 0.0014033f, 0.3025597f, 0.0045527f), - D3DXVECTOR3( 0.0014791f, 0.2690616f, 0.0176327f), - D3DXVECTOR3( 0.0015394f, 0.2424075f, 0.0244875f), -}; - -const D3DXVECTOR4 accessor_37[] = { - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), - D3DXVECTOR4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), - D3DXVECTOR4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), - D3DXVECTOR4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), - D3DXVECTOR4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), - D3DXVECTOR4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), - D3DXVECTOR4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), - D3DXVECTOR4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), - D3DXVECTOR4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), - D3DXVECTOR4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), - D3DXVECTOR4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), -}; - -const D3DXVECTOR3 accessor_38[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_39[] = { - D3DXVECTOR3( 0.2488541f, 0.2033313f, -0.0450287f), - D3DXVECTOR3( 0.2488541f, 0.2033313f, -0.0450287f), -}; - -const D3DXVECTOR4 accessor_40[] = { - D3DXVECTOR4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), - D3DXVECTOR4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), -}; - -const D3DXVECTOR3 accessor_41[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_42[] = { - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_43[] = { - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_44[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_45[] = { - D3DXVECTOR3(-0.0000000f, 0.1024612f, -0.0000002f), - D3DXVECTOR3( 0.0000000f, 0.1024621f, -0.0000009f), -}; - -const D3DXVECTOR4 accessor_46[] = { - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_47[] = { - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_48[] = { - D3DXVECTOR3(-0.0000000f, 0.1026015f, -0.0000024f), - D3DXVECTOR3(-0.0000000f, 0.1026023f, -0.0000007f), -}; - -const D3DXVECTOR4 accessor_49[] = { - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_50[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_51[] = { - D3DXVECTOR3( 0.0000000f, 0.1033747f, 0.0000013f), - D3DXVECTOR3( 0.0000000f, 0.1033746f, 0.0000001f), -}; - -const D3DXVECTOR4 accessor_52[] = { - D3DXVECTOR4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_53[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_54[] = { - D3DXVECTOR3(-0.0000000f, 0.1012776f, -0.0000007f), - D3DXVECTOR3(-0.0000000f, 0.1012825f, 0.0000005f), -}; - -const D3DXVECTOR4 accessor_55[] = { - D3DXVECTOR4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), - D3DXVECTOR4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), -}; - -const D3DXVECTOR3 accessor_56[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_57[] = { - D3DXVECTOR3( 0.0000000f, 0.1024311f, -0.0000003f), - D3DXVECTOR3( 0.0000000f, 0.1024376f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_58[] = { - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), -}; - -const D3DXVECTOR3 accessor_59[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_60[] = { - D3DXVECTOR3(-0.0027646f, 0.0680361f, -0.0078378f), - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), -}; - -const D3DXVECTOR4 accessor_61[] = { - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), -}; - -const D3DXVECTOR3 accessor_62[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_63[] = { - D3DXVECTOR3( 0.0000000f, 0.3082178f, -0.0000006f), - D3DXVECTOR3(-0.0000000f, 0.3082178f, 0.0000004f), -}; - -const D3DXVECTOR4 accessor_64[] = { - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_65[] = { - D3DXVECTOR3( 1.0000001f, 1.0000000f, 1.0000001f), - D3DXVECTOR3( 1.0000001f, 1.0000000f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_66[] = { - D3DXVECTOR3(-0.0000000f, 0.3056006f, -0.0000014f), - D3DXVECTOR3( 0.0000000f, 0.3055994f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_67[] = { - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), -}; - -const D3DXVECTOR3 accessor_68[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_69[] = { - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_70[] = { - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_71[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_72[] = { - D3DXVECTOR3( 0.0000000f, 0.1024651f, -0.0000008f), - D3DXVECTOR3( 0.0000000f, 0.1024579f, -0.0000007f), -}; - -const D3DXVECTOR4 accessor_73[] = { - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_74[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_75[] = { - D3DXVECTOR3(-0.0000000f, 0.1026116f, -0.0000006f), - D3DXVECTOR3(-0.0000000f, 0.1026118f, -0.0000004f), -}; - -const D3DXVECTOR4 accessor_76[] = { - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_77[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_78[] = { - D3DXVECTOR3( 0.0000000f, 0.1033683f, 0.0000007f), - D3DXVECTOR3( 0.0000000f, 0.1033682f, 0.0000003f), -}; - -const D3DXVECTOR4 accessor_79[] = { - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_80[] = { - D3DXVECTOR3( 1.0000001f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 1.0000001f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_81[] = { - D3DXVECTOR3(-0.0000000f, 0.1012827f, -0.0000015f), - D3DXVECTOR3( 0.0000000f, 0.1012839f, -0.0000012f), -}; - -const D3DXVECTOR4 accessor_82[] = { - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), -}; - -const D3DXVECTOR3 accessor_83[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_84[] = { - D3DXVECTOR3( 0.0000001f, 0.1024345f, -0.0000001f), - D3DXVECTOR3( 0.0000001f, 0.1024316f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_85[] = { - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), -}; - -const D3DXVECTOR3 accessor_86[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_87[] = { - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), -}; - -const D3DXVECTOR4 accessor_88[] = { - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), -}; - -const D3DXVECTOR3 accessor_89[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_90[] = { - D3DXVECTOR3( 0.0000000f, 0.2498153f, -0.0000002f), - D3DXVECTOR3(-0.0000000f, 0.2498145f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_91[] = { - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), -}; - -const D3DXVECTOR3 accessor_92[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_93[] = { - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_94[] = { - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_95[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_96[] = { - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_97[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_98[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_99[] = { - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_100[] = { - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_101[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_102[] = { - D3DXVECTOR3(-0.0000000f, 0.0919263f, 0.0000001f), - D3DXVECTOR3( 0.0000000f, 0.0919260f, 0.0000004f), -}; - -const D3DXVECTOR4 accessor_103[] = { - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_104[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_105[] = { - D3DXVECTOR3( 0.0000011f, 0.1196970f, 0.0000000f), - D3DXVECTOR3( 0.0000003f, 0.1196963f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_106[] = { - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_107[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_108[] = { - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_109[] = { - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_110[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_111[] = { - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_112[] = { - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_113[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_114[] = { - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_115[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_116[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_117[] = { - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0961420f), - D3DXVECTOR3(-0.0020865f, 0.0613021f, -0.0940875f), - D3DXVECTOR3(-0.0020865f, 0.2418380f, -0.0687290f), - D3DXVECTOR3(-0.0020865f, 0.3463625f, -0.0236981f), - D3DXVECTOR3(-0.0020865f, 0.3659465f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.3578844f, 0.1711183f), - D3DXVECTOR3(-0.0020865f, 0.3162026f, 0.2572443f), - D3DXVECTOR3(-0.0020865f, 0.2100785f, 0.2797687f), - D3DXVECTOR3(-0.0020865f, 0.0552606f, 0.2831892f), - D3DXVECTOR3(-0.0020865f, 0.0583039f, 0.1395508f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_118[] = { - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_119[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_120[] = { - D3DXVECTOR3( 0.0000000f, 0.0919262f, 0.0000003f), - D3DXVECTOR3(-0.0000000f, 0.0919257f, 0.0000010f), -}; - -const D3DXVECTOR4 accessor_121[] = { - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_122[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_123[] = { - D3DXVECTOR3(-0.0000009f, 0.1196955f, 0.0000000f), - D3DXVECTOR3(-0.0000006f, 0.1196964f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_124[] = { - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_125[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_126[] = { - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_127[] = { - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_128[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const float accessor_129[] = { - 0.0, - 0.0416666679084301, - 0.0833333358168602, - 0.125, - 0.1666666716337204, - 0.2083333283662796, - 0.25, - 0.2916666567325592, - 0.3333333432674408, - 0.375, - 0.4166666567325592, - 0.4583333432674408, - 0.5, - 0.5416666865348816, - 0.5833333134651184, - 0.625, - 0.6666666865348816, - 0.7083333134651184, - 0.75, - 0.7916666865348816, - 0.8333333134651184, - 0.875, - 0.9166666865348816, - 0.9583333134651184, - 1.0, - 1.0416666269302368, - 1.0833333730697632, - 1.125, - 1.1666666269302368, - 1.2083333730697632, - 1.25, - 1.2916666269302368, - 1.3333333730697632, - 1.375, - 1.4166666269302368, - 1.4583333730697632, - 1.5, - 1.5416666269302368, - 1.5833333730697632, - 1.625, - 1.6666666269302368, - 1.7083333730697632, - 1.75, - 1.7916666269302368, - 1.8333333730697632, - 1.875, - 1.9166666269302368, - 1.9583333730697632, - 2.0, - 2.0416667461395264, - 2.0833332538604736, - 2.125, - 2.1666667461395264, - 2.2083332538604736, - 2.25, - 2.2916667461395264, - 2.3333332538604736, - 2.375, - 2.4166667461395264, - 2.4583332538604736, - 2.5, - 2.5416667461395264, - 2.5833332538604736, - 2.625, - 2.6666667461395264, - 2.7083332538604736, - 2.75, - 2.7916667461395264, - 2.8333332538604736, - 2.875, - 2.9166667461395264, - 2.9583332538604736, - 3.0, - 3.0416667461395264, - 3.0833332538604736, - 3.125, - 3.1666667461395264, - 3.2083332538604736, - 3.25, - 3.2916667461395264, - 3.3333332538604736, - 3.375, - 3.4166667461395264, - 3.4583332538604736, - 3.5, - 3.5416667461395264, - 3.5833332538604736, - 3.625, - 3.6666667461395264, - 3.7083332538604736, - 3.75, -}; - -const D3DXVECTOR3 accessor_130[] = { - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0178199f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0176253f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0170803f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0162434f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0151729f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0139273f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0125649f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0111441f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0097233f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0083608f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0071152f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0060447f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0052078f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0046629f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0046182f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0050419f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0057004f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0065544f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0075649f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0086928f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0098989f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0111441f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0123893f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0135953f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0147232f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0157337f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0165877f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0172462f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0176699f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0178199f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0176253f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0170803f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0162434f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0151729f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0139273f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0125649f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0111441f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0097233f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0083608f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0071152f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0060447f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0052078f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0046629f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0046182f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0050419f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0057004f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0065544f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0075649f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0086928f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0098989f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0111441f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0123893f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0135953f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0147232f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0157337f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0165877f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0172462f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0176699f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0178199f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0176253f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0170803f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0162434f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0151729f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0139273f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0125649f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0111441f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0097233f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0083608f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0071152f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0060447f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0052078f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0046629f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0046182f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0050419f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0057004f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0065544f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0075649f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0086928f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0098989f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0111441f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0123893f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0135953f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0147232f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0157337f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0165877f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0172462f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0176699f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0178199f), -}; - -const D3DXVECTOR4 accessor_131[] = { - D3DXVECTOR4(-0.0575390f, -0.0034855f, -0.0008133f, 0.9983369f), - D3DXVECTOR4(-0.0572518f, -0.0034832f, -0.0008201f, 0.9983534f), - D3DXVECTOR4(-0.0562628f, -0.0034771f, -0.0008388f, 0.9984096f), - D3DXVECTOR4(-0.0542909f, -0.0034680f, -0.0008658f, 0.9985188f), - D3DXVECTOR4(-0.0508358f, -0.0034575f, -0.0008972f, 0.9987006f), - D3DXVECTOR4(-0.0448832f, -0.0034473f, -0.0009273f, 0.9989859f), - D3DXVECTOR4(-0.0340697f, -0.0034409f, -0.0009455f, 0.9994131f), - D3DXVECTOR4(-0.0152769f, -0.0034421f, -0.0009389f, 0.9998769f), - D3DXVECTOR4( 0.0013275f, -0.0034423f, -0.0009401f, 0.9999927f), - D3DXVECTOR4( 0.0104516f, -0.0034370f, -0.0009648f, 0.9999390f), - D3DXVECTOR4( 0.0156699f, -0.0034296f, -0.0009984f, 0.9998708f), - D3DXVECTOR4( 0.0187803f, -0.0034223f, -0.0010320f, 0.9998173f), - D3DXVECTOR4( 0.0205858f, -0.0034160f, -0.0010604f, 0.9997817f), - D3DXVECTOR4( 0.0215017f, -0.0034117f, -0.0010798f, 0.9997625f), - D3DXVECTOR4( 0.0217697f, -0.0034101f, -0.0010870f, 0.9997566f), - D3DXVECTOR4( 0.0215163f, -0.0034113f, -0.0010816f, 0.9997621f), - D3DXVECTOR4( 0.0206586f, -0.0034145f, -0.0010670f, 0.9997802f), - D3DXVECTOR4( 0.0189944f, -0.0034191f, -0.0010455f, 0.9998133f), - D3DXVECTOR4( 0.0161988f, -0.0034246f, -0.0010197f, 0.9998624f), - D3DXVECTOR4( 0.0117109f, -0.0034302f, -0.0009934f, 0.9999250f), - D3DXVECTOR4( 0.0044984f, -0.0034346f, -0.0009716f, 0.9999835f), - D3DXVECTOR4(-0.0071126f, -0.0034362f, -0.0009619f, 0.9999683f), - D3DXVECTOR4(-0.0226609f, -0.0034351f, -0.0009642f, 0.9997368f), - D3DXVECTOR4(-0.0356789f, -0.0034372f, -0.0009581f, 0.9993569f), - D3DXVECTOR4(-0.0440886f, -0.0034440f, -0.0009380f, 0.9990212f), - D3DXVECTOR4(-0.0494646f, -0.0034530f, -0.0009107f, 0.9987695f), - D3DXVECTOR4(-0.0529636f, -0.0034627f, -0.0008819f, 0.9985901f), - D3DXVECTOR4(-0.0552203f, -0.0034716f, -0.0008551f, 0.9984678f), - D3DXVECTOR4(-0.0565983f, -0.0034789f, -0.0008332f, 0.9983907f), - D3DXVECTOR4(-0.0573221f, -0.0034838f, -0.0008186f, 0.9983494f), - D3DXVECTOR4(-0.0575390f, -0.0034855f, -0.0008133f, 0.9983369f), - D3DXVECTOR4(-0.0572518f, -0.0034832f, -0.0008201f, 0.9983534f), - D3DXVECTOR4(-0.0562628f, -0.0034771f, -0.0008388f, 0.9984096f), - D3DXVECTOR4(-0.0542909f, -0.0034680f, -0.0008658f, 0.9985188f), - D3DXVECTOR4(-0.0508358f, -0.0034575f, -0.0008972f, 0.9987006f), - D3DXVECTOR4(-0.0448832f, -0.0034473f, -0.0009273f, 0.9989859f), - D3DXVECTOR4(-0.0340697f, -0.0034409f, -0.0009455f, 0.9994131f), - D3DXVECTOR4(-0.0152769f, -0.0034421f, -0.0009389f, 0.9998769f), - D3DXVECTOR4( 0.0013275f, -0.0034423f, -0.0009401f, 0.9999927f), - D3DXVECTOR4( 0.0104516f, -0.0034370f, -0.0009648f, 0.9999390f), - D3DXVECTOR4( 0.0156699f, -0.0034296f, -0.0009984f, 0.9998708f), - D3DXVECTOR4( 0.0187803f, -0.0034223f, -0.0010320f, 0.9998173f), - D3DXVECTOR4( 0.0205858f, -0.0034160f, -0.0010604f, 0.9997817f), - D3DXVECTOR4( 0.0215017f, -0.0034117f, -0.0010798f, 0.9997625f), - D3DXVECTOR4( 0.0217697f, -0.0034101f, -0.0010870f, 0.9997566f), - D3DXVECTOR4( 0.0215163f, -0.0034113f, -0.0010816f, 0.9997621f), - D3DXVECTOR4( 0.0206586f, -0.0034145f, -0.0010670f, 0.9997802f), - D3DXVECTOR4( 0.0189944f, -0.0034191f, -0.0010455f, 0.9998133f), - D3DXVECTOR4( 0.0161988f, -0.0034246f, -0.0010197f, 0.9998624f), - D3DXVECTOR4( 0.0117109f, -0.0034302f, -0.0009934f, 0.9999250f), - D3DXVECTOR4( 0.0044984f, -0.0034346f, -0.0009716f, 0.9999835f), - D3DXVECTOR4(-0.0071126f, -0.0034362f, -0.0009619f, 0.9999683f), - D3DXVECTOR4(-0.0226609f, -0.0034351f, -0.0009642f, 0.9997368f), - D3DXVECTOR4(-0.0356789f, -0.0034372f, -0.0009581f, 0.9993569f), - D3DXVECTOR4(-0.0440886f, -0.0034440f, -0.0009380f, 0.9990212f), - D3DXVECTOR4(-0.0494646f, -0.0034530f, -0.0009107f, 0.9987695f), - D3DXVECTOR4(-0.0529636f, -0.0034627f, -0.0008819f, 0.9985901f), - D3DXVECTOR4(-0.0552203f, -0.0034716f, -0.0008551f, 0.9984678f), - D3DXVECTOR4(-0.0565983f, -0.0034789f, -0.0008332f, 0.9983907f), - D3DXVECTOR4(-0.0573221f, -0.0034838f, -0.0008186f, 0.9983494f), - D3DXVECTOR4(-0.0575390f, -0.0034855f, -0.0008133f, 0.9983369f), - D3DXVECTOR4(-0.0572518f, -0.0034832f, -0.0008201f, 0.9983534f), - D3DXVECTOR4(-0.0562628f, -0.0034771f, -0.0008388f, 0.9984096f), - D3DXVECTOR4(-0.0542909f, -0.0034680f, -0.0008658f, 0.9985188f), - D3DXVECTOR4(-0.0508358f, -0.0034575f, -0.0008972f, 0.9987006f), - D3DXVECTOR4(-0.0448832f, -0.0034473f, -0.0009273f, 0.9989859f), - D3DXVECTOR4(-0.0340697f, -0.0034409f, -0.0009455f, 0.9994131f), - D3DXVECTOR4(-0.0152769f, -0.0034421f, -0.0009389f, 0.9998769f), - D3DXVECTOR4( 0.0013275f, -0.0034423f, -0.0009401f, 0.9999927f), - D3DXVECTOR4( 0.0104516f, -0.0034370f, -0.0009648f, 0.9999390f), - D3DXVECTOR4( 0.0156699f, -0.0034296f, -0.0009984f, 0.9998708f), - D3DXVECTOR4( 0.0187803f, -0.0034223f, -0.0010320f, 0.9998173f), - D3DXVECTOR4( 0.0205858f, -0.0034160f, -0.0010604f, 0.9997817f), - D3DXVECTOR4( 0.0215017f, -0.0034117f, -0.0010798f, 0.9997625f), - D3DXVECTOR4( 0.0217697f, -0.0034101f, -0.0010870f, 0.9997566f), - D3DXVECTOR4( 0.0215163f, -0.0034113f, -0.0010816f, 0.9997621f), - D3DXVECTOR4( 0.0206586f, -0.0034145f, -0.0010670f, 0.9997802f), - D3DXVECTOR4( 0.0189944f, -0.0034191f, -0.0010455f, 0.9998133f), - D3DXVECTOR4( 0.0161988f, -0.0034246f, -0.0010197f, 0.9998624f), - D3DXVECTOR4( 0.0117109f, -0.0034302f, -0.0009934f, 0.9999250f), - D3DXVECTOR4( 0.0044984f, -0.0034346f, -0.0009716f, 0.9999835f), - D3DXVECTOR4(-0.0071126f, -0.0034362f, -0.0009619f, 0.9999683f), - D3DXVECTOR4(-0.0226609f, -0.0034351f, -0.0009642f, 0.9997368f), - D3DXVECTOR4(-0.0356789f, -0.0034372f, -0.0009581f, 0.9993569f), - D3DXVECTOR4(-0.0440886f, -0.0034440f, -0.0009380f, 0.9990212f), - D3DXVECTOR4(-0.0494646f, -0.0034530f, -0.0009107f, 0.9987695f), - D3DXVECTOR4(-0.0529636f, -0.0034627f, -0.0008819f, 0.9985901f), - D3DXVECTOR4(-0.0552203f, -0.0034716f, -0.0008551f, 0.9984678f), - D3DXVECTOR4(-0.0565983f, -0.0034789f, -0.0008332f, 0.9983907f), - D3DXVECTOR4(-0.0573221f, -0.0034838f, -0.0008186f, 0.9983494f), - D3DXVECTOR4(-0.0575390f, -0.0034855f, -0.0008133f, 0.9983369f), -}; - -const float accessor_132[] = { - 0.0, - 3.75, -}; - -const D3DXVECTOR3 accessor_133[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_134[] = { - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), -}; - -const D3DXVECTOR4 accessor_135[] = { - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), -}; - -const D3DXVECTOR3 accessor_136[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_137[] = { - D3DXVECTOR3(-0.0000000f, 0.2498149f, -0.0000000f), - D3DXVECTOR3(-0.0000000f, 0.2498149f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_138[] = { - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), -}; - -const D3DXVECTOR3 accessor_139[] = { - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999998f), - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999998f), -}; - -const D3DXVECTOR3 accessor_140[] = { - D3DXVECTOR3( 0.0000000f, 0.1347095f, -0.0000000f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000161f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000823f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002668f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006553f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008652f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009378f), - D3DXVECTOR3( 0.0000375f, 0.1128822f, -0.0009551f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009390f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008728f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006883f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002997f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000899f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000173f), - D3DXVECTOR3(-0.0000000f, 0.1347095f, -0.0000000f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000161f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000823f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002668f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006553f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008652f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009378f), - D3DXVECTOR3( 0.0000375f, 0.1128822f, -0.0009551f), - D3DXVECTOR3( 0.0000362f, 0.1136308f, -0.0009433f), - D3DXVECTOR3( 0.0000328f, 0.1156368f, -0.0008998f), - D3DXVECTOR3( 0.0000278f, 0.1185411f, -0.0008043f), - D3DXVECTOR3( 0.0000219f, 0.1219844f, -0.0006213f), - D3DXVECTOR3( 0.0000156f, 0.1256073f, -0.0003603f), - D3DXVECTOR3( 0.0000097f, 0.1290506f, -0.0001627f), - D3DXVECTOR3( 0.0000047f, 0.1319548f, -0.0000591f), - D3DXVECTOR3( 0.0000013f, 0.1339610f, -0.0000125f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, -0.0000000f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000161f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000823f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002668f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006553f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008652f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009378f), - D3DXVECTOR3( 0.0000375f, 0.1128822f, -0.0009551f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009390f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008728f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006883f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002997f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000899f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000173f), - D3DXVECTOR3(-0.0000000f, 0.1347095f, -0.0000000f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000161f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000823f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002668f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006553f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008652f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009378f), - D3DXVECTOR3( 0.0000375f, 0.1128822f, -0.0009551f), - D3DXVECTOR3( 0.0000362f, 0.1136308f, -0.0009433f), - D3DXVECTOR3( 0.0000328f, 0.1156368f, -0.0008998f), - D3DXVECTOR3( 0.0000278f, 0.1185411f, -0.0008043f), - D3DXVECTOR3( 0.0000219f, 0.1219844f, -0.0006213f), - D3DXVECTOR3( 0.0000156f, 0.1256073f, -0.0003603f), - D3DXVECTOR3( 0.0000097f, 0.1290506f, -0.0001627f), - D3DXVECTOR3( 0.0000047f, 0.1319548f, -0.0000591f), - D3DXVECTOR3( 0.0000013f, 0.1339610f, -0.0000125f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, -0.0000000f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000161f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000823f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002668f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006553f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008652f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009378f), - D3DXVECTOR3( 0.0000375f, 0.1128822f, -0.0009551f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009390f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008728f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006883f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002997f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000899f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000173f), - D3DXVECTOR3(-0.0000000f, 0.1347095f, -0.0000000f), - D3DXVECTOR3( 0.0000021f, 0.1335004f, -0.0000161f), - D3DXVECTOR3( 0.0000074f, 0.1303822f, -0.0000823f), - D3DXVECTOR3( 0.0000148f, 0.1261186f, -0.0002668f), - D3DXVECTOR3( 0.0000228f, 0.1214731f, -0.0006553f), - D3DXVECTOR3( 0.0000301f, 0.1172095f, -0.0008652f), - D3DXVECTOR3( 0.0000354f, 0.1140913f, -0.0009378f), - D3DXVECTOR3( 0.0000375f, 0.1128822f, -0.0009551f), - D3DXVECTOR3( 0.0000362f, 0.1136308f, -0.0009433f), - D3DXVECTOR3( 0.0000328f, 0.1156368f, -0.0008998f), - D3DXVECTOR3( 0.0000278f, 0.1185411f, -0.0008043f), - D3DXVECTOR3( 0.0000219f, 0.1219844f, -0.0006213f), - D3DXVECTOR3( 0.0000156f, 0.1256073f, -0.0003603f), - D3DXVECTOR3( 0.0000097f, 0.1290506f, -0.0001627f), - D3DXVECTOR3( 0.0000047f, 0.1319548f, -0.0000591f), - D3DXVECTOR3( 0.0000013f, 0.1339610f, -0.0000125f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_141[] = { - D3DXVECTOR4(-0.0474069f, -0.0008774f, 0.0013231f, 0.9988744f), - D3DXVECTOR4(-0.0463134f, -0.0008756f, 0.0013147f, 0.9989257f), - D3DXVECTOR4(-0.0432515f, -0.0008707f, 0.0012913f, 0.9990631f), - D3DXVECTOR4(-0.0385485f, -0.0008633f, 0.0012554f, 0.9992556f), - D3DXVECTOR4(-0.0325317f, -0.0008544f, 0.0012093f, 0.9994696f), - D3DXVECTOR4(-0.0255289f, -0.0008449f, 0.0011556f, 0.9996731f), - D3DXVECTOR4(-0.0178682f, -0.0008353f, 0.0010968f, 0.9998394f), - D3DXVECTOR4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), - D3DXVECTOR4(-0.0018872f, -0.0008184f, 0.0009740f, 0.9999974f), - D3DXVECTOR4( 0.0057753f, -0.0008118f, 0.0009150f, 0.9999826f), - D3DXVECTOR4( 0.0127808f, -0.0008065f, 0.0008610f, 0.9999176f), - D3DXVECTOR4( 0.0188006f, -0.0008027f, 0.0008146f, 0.9998226f), - D3DXVECTOR4( 0.0235065f, -0.0008001f, 0.0007783f, 0.9997230f), - D3DXVECTOR4( 0.0265706f, -0.0007986f, 0.0007547f, 0.9996463f), - D3DXVECTOR4( 0.0276649f, -0.0007981f, 0.0007462f, 0.9996167f), - D3DXVECTOR4( 0.0268218f, -0.0007984f, 0.0007527f, 0.9996396f), - D3DXVECTOR4( 0.0244393f, -0.0007996f, 0.0007711f, 0.9997007f), - D3DXVECTOR4( 0.0207369f, -0.0008016f, 0.0007997f, 0.9997844f), - D3DXVECTOR4( 0.0159344f, -0.0008044f, 0.0008367f, 0.9998724f), - D3DXVECTOR4( 0.0102516f, -0.0008083f, 0.0008805f, 0.9999467f), - D3DXVECTOR4( 0.0039085f, -0.0008133f, 0.0009293f, 0.9999916f), - D3DXVECTOR4(-0.0028748f, -0.0008193f, 0.0009816f, 0.9999951f), - D3DXVECTOR4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), - D3DXVECTOR4(-0.0168807f, -0.0008341f, 0.0010892f, 0.9998566f), - D3DXVECTOR4(-0.0236626f, -0.0008424f, 0.0011413f, 0.9997190f), - D3DXVECTOR4(-0.0300036f, -0.0008509f, 0.0011899f, 0.9995487f), - D3DXVECTOR4(-0.0356838f, -0.0008590f, 0.0012334f, 0.9993620f), - D3DXVECTOR4(-0.0404836f, -0.0008663f, 0.0012702f, 0.9991790f), - D3DXVECTOR4(-0.0441837f, -0.0008721f, 0.0012985f, 0.9990222f), - D3DXVECTOR4(-0.0465645f, -0.0008760f, 0.0013167f, 0.9989141f), - D3DXVECTOR4(-0.0474069f, -0.0008774f, 0.0013231f, 0.9988744f), - D3DXVECTOR4(-0.0463134f, -0.0008756f, 0.0013147f, 0.9989257f), - D3DXVECTOR4(-0.0432515f, -0.0008707f, 0.0012913f, 0.9990631f), - D3DXVECTOR4(-0.0385485f, -0.0008633f, 0.0012554f, 0.9992556f), - D3DXVECTOR4(-0.0325317f, -0.0008544f, 0.0012093f, 0.9994696f), - D3DXVECTOR4(-0.0255289f, -0.0008449f, 0.0011556f, 0.9996731f), - D3DXVECTOR4(-0.0178682f, -0.0008353f, 0.0010968f, 0.9998394f), - D3DXVECTOR4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), - D3DXVECTOR4(-0.0018872f, -0.0008184f, 0.0009740f, 0.9999974f), - D3DXVECTOR4( 0.0057753f, -0.0008118f, 0.0009150f, 0.9999826f), - D3DXVECTOR4( 0.0127808f, -0.0008065f, 0.0008610f, 0.9999176f), - D3DXVECTOR4( 0.0188006f, -0.0008027f, 0.0008146f, 0.9998226f), - D3DXVECTOR4( 0.0235065f, -0.0008001f, 0.0007783f, 0.9997230f), - D3DXVECTOR4( 0.0265706f, -0.0007986f, 0.0007547f, 0.9996463f), - D3DXVECTOR4( 0.0276649f, -0.0007981f, 0.0007462f, 0.9996167f), - D3DXVECTOR4( 0.0268218f, -0.0007984f, 0.0007527f, 0.9996396f), - D3DXVECTOR4( 0.0244393f, -0.0007996f, 0.0007711f, 0.9997007f), - D3DXVECTOR4( 0.0207369f, -0.0008016f, 0.0007997f, 0.9997844f), - D3DXVECTOR4( 0.0159344f, -0.0008044f, 0.0008367f, 0.9998724f), - D3DXVECTOR4( 0.0102516f, -0.0008083f, 0.0008805f, 0.9999467f), - D3DXVECTOR4( 0.0039085f, -0.0008133f, 0.0009293f, 0.9999916f), - D3DXVECTOR4(-0.0028748f, -0.0008193f, 0.0009816f, 0.9999951f), - D3DXVECTOR4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), - D3DXVECTOR4(-0.0168807f, -0.0008341f, 0.0010892f, 0.9998566f), - D3DXVECTOR4(-0.0236626f, -0.0008424f, 0.0011413f, 0.9997190f), - D3DXVECTOR4(-0.0300036f, -0.0008509f, 0.0011899f, 0.9995487f), - D3DXVECTOR4(-0.0356838f, -0.0008590f, 0.0012334f, 0.9993620f), - D3DXVECTOR4(-0.0404836f, -0.0008663f, 0.0012702f, 0.9991790f), - D3DXVECTOR4(-0.0441837f, -0.0008721f, 0.0012985f, 0.9990222f), - D3DXVECTOR4(-0.0465645f, -0.0008760f, 0.0013167f, 0.9989141f), - D3DXVECTOR4(-0.0474069f, -0.0008774f, 0.0013231f, 0.9988744f), - D3DXVECTOR4(-0.0463134f, -0.0008756f, 0.0013147f, 0.9989257f), - D3DXVECTOR4(-0.0432515f, -0.0008707f, 0.0012913f, 0.9990631f), - D3DXVECTOR4(-0.0385485f, -0.0008633f, 0.0012554f, 0.9992556f), - D3DXVECTOR4(-0.0325317f, -0.0008544f, 0.0012093f, 0.9994696f), - D3DXVECTOR4(-0.0255289f, -0.0008449f, 0.0011556f, 0.9996731f), - D3DXVECTOR4(-0.0178682f, -0.0008353f, 0.0010968f, 0.9998394f), - D3DXVECTOR4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), - D3DXVECTOR4(-0.0018872f, -0.0008184f, 0.0009740f, 0.9999974f), - D3DXVECTOR4( 0.0057753f, -0.0008118f, 0.0009150f, 0.9999826f), - D3DXVECTOR4( 0.0127808f, -0.0008065f, 0.0008610f, 0.9999176f), - D3DXVECTOR4( 0.0188006f, -0.0008027f, 0.0008146f, 0.9998226f), - D3DXVECTOR4( 0.0235065f, -0.0008001f, 0.0007783f, 0.9997230f), - D3DXVECTOR4( 0.0265706f, -0.0007986f, 0.0007547f, 0.9996463f), - D3DXVECTOR4( 0.0276649f, -0.0007981f, 0.0007462f, 0.9996167f), - D3DXVECTOR4( 0.0268218f, -0.0007984f, 0.0007527f, 0.9996396f), - D3DXVECTOR4( 0.0244393f, -0.0007996f, 0.0007711f, 0.9997007f), - D3DXVECTOR4( 0.0207369f, -0.0008016f, 0.0007997f, 0.9997844f), - D3DXVECTOR4( 0.0159344f, -0.0008044f, 0.0008367f, 0.9998724f), - D3DXVECTOR4( 0.0102516f, -0.0008083f, 0.0008805f, 0.9999467f), - D3DXVECTOR4( 0.0039085f, -0.0008133f, 0.0009293f, 0.9999916f), - D3DXVECTOR4(-0.0028748f, -0.0008193f, 0.0009816f, 0.9999951f), - D3DXVECTOR4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), - D3DXVECTOR4(-0.0168807f, -0.0008341f, 0.0010892f, 0.9998566f), - D3DXVECTOR4(-0.0236626f, -0.0008424f, 0.0011413f, 0.9997190f), - D3DXVECTOR4(-0.0300036f, -0.0008509f, 0.0011899f, 0.9995487f), - D3DXVECTOR4(-0.0356838f, -0.0008590f, 0.0012334f, 0.9993620f), - D3DXVECTOR4(-0.0404836f, -0.0008663f, 0.0012702f, 0.9991790f), - D3DXVECTOR4(-0.0441837f, -0.0008721f, 0.0012985f, 0.9990222f), - D3DXVECTOR4(-0.0465645f, -0.0008760f, 0.0013167f, 0.9989141f), - D3DXVECTOR4(-0.0474069f, -0.0008774f, 0.0013231f, 0.9988744f), -}; - -const D3DXVECTOR3 accessor_142[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_143[] = { - D3DXVECTOR3(-0.0000000f, 0.1116755f, 0.0000000f), - D3DXVECTOR3(-0.0000009f, 0.1099318f, -0.0000377f), - D3DXVECTOR3(-0.0000031f, 0.1054348f, -0.0001928f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0006252f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0015358f), - D3DXVECTOR3(-0.0000127f, 0.0864372f, -0.0020276f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0021978f), - D3DXVECTOR3(-0.0000159f, 0.0801965f, -0.0022383f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0022006f), - D3DXVECTOR3(-0.0000127f, 0.0864373f, -0.0020454f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0016131f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0007024f), - D3DXVECTOR3(-0.0000031f, 0.1054347f, -0.0002107f), - D3DXVECTOR3(-0.0000009f, 0.1099317f, -0.0000405f), - D3DXVECTOR3(-0.0000000f, 0.1116755f, -0.0000000f), - D3DXVECTOR3(-0.0000009f, 0.1099318f, -0.0000377f), - D3DXVECTOR3(-0.0000031f, 0.1054347f, -0.0001928f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0006252f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0015358f), - D3DXVECTOR3(-0.0000127f, 0.0864372f, -0.0020276f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0021978f), - D3DXVECTOR3(-0.0000159f, 0.0801965f, -0.0022382f), - D3DXVECTOR3(-0.0000153f, 0.0812760f, -0.0022106f), - D3DXVECTOR3(-0.0000139f, 0.0841692f, -0.0021086f), - D3DXVECTOR3(-0.0000118f, 0.0883577f, -0.0018848f), - D3DXVECTOR3(-0.0000092f, 0.0933236f, -0.0014561f), - D3DXVECTOR3(-0.0000066f, 0.0985484f, -0.0008443f), - D3DXVECTOR3(-0.0000041f, 0.1035143f, -0.0003812f), - D3DXVECTOR3(-0.0000020f, 0.1077028f, -0.0001386f), - D3DXVECTOR3(-0.0000005f, 0.1105960f, -0.0000294f), - D3DXVECTOR3(-0.0000000f, 0.1116755f, 0.0000000f), - D3DXVECTOR3(-0.0000009f, 0.1099318f, -0.0000377f), - D3DXVECTOR3(-0.0000031f, 0.1054348f, -0.0001928f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0006252f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0015358f), - D3DXVECTOR3(-0.0000127f, 0.0864372f, -0.0020276f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0021978f), - D3DXVECTOR3(-0.0000159f, 0.0801965f, -0.0022383f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0022006f), - D3DXVECTOR3(-0.0000127f, 0.0864373f, -0.0020454f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0016131f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0007024f), - D3DXVECTOR3(-0.0000031f, 0.1054347f, -0.0002107f), - D3DXVECTOR3(-0.0000009f, 0.1099317f, -0.0000405f), - D3DXVECTOR3(-0.0000000f, 0.1116755f, -0.0000000f), - D3DXVECTOR3(-0.0000009f, 0.1099318f, -0.0000377f), - D3DXVECTOR3(-0.0000031f, 0.1054347f, -0.0001928f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0006252f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0015358f), - D3DXVECTOR3(-0.0000127f, 0.0864372f, -0.0020276f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0021978f), - D3DXVECTOR3(-0.0000159f, 0.0801965f, -0.0022382f), - D3DXVECTOR3(-0.0000153f, 0.0812760f, -0.0022106f), - D3DXVECTOR3(-0.0000139f, 0.0841692f, -0.0021086f), - D3DXVECTOR3(-0.0000118f, 0.0883577f, -0.0018848f), - D3DXVECTOR3(-0.0000092f, 0.0933236f, -0.0014561f), - D3DXVECTOR3(-0.0000066f, 0.0985484f, -0.0008443f), - D3DXVECTOR3(-0.0000041f, 0.1035143f, -0.0003812f), - D3DXVECTOR3(-0.0000020f, 0.1077028f, -0.0001386f), - D3DXVECTOR3(-0.0000005f, 0.1105960f, -0.0000294f), - D3DXVECTOR3(-0.0000000f, 0.1116755f, 0.0000000f), - D3DXVECTOR3(-0.0000009f, 0.1099318f, -0.0000377f), - D3DXVECTOR3(-0.0000031f, 0.1054348f, -0.0001928f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0006252f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0015358f), - D3DXVECTOR3(-0.0000127f, 0.0864372f, -0.0020276f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0021978f), - D3DXVECTOR3(-0.0000159f, 0.0801965f, -0.0022383f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0022006f), - D3DXVECTOR3(-0.0000127f, 0.0864373f, -0.0020454f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0016131f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0007024f), - D3DXVECTOR3(-0.0000031f, 0.1054347f, -0.0002107f), - D3DXVECTOR3(-0.0000009f, 0.1099317f, -0.0000405f), - D3DXVECTOR3(-0.0000000f, 0.1116755f, -0.0000000f), - D3DXVECTOR3(-0.0000009f, 0.1099318f, -0.0000377f), - D3DXVECTOR3(-0.0000031f, 0.1054347f, -0.0001928f), - D3DXVECTOR3(-0.0000062f, 0.0992858f, -0.0006252f), - D3DXVECTOR3(-0.0000096f, 0.0925862f, -0.0015358f), - D3DXVECTOR3(-0.0000127f, 0.0864372f, -0.0020276f), - D3DXVECTOR3(-0.0000150f, 0.0819402f, -0.0021978f), - D3DXVECTOR3(-0.0000159f, 0.0801965f, -0.0022382f), - D3DXVECTOR3(-0.0000153f, 0.0812760f, -0.0022106f), - D3DXVECTOR3(-0.0000139f, 0.0841692f, -0.0021086f), - D3DXVECTOR3(-0.0000118f, 0.0883577f, -0.0018848f), - D3DXVECTOR3(-0.0000092f, 0.0933236f, -0.0014561f), - D3DXVECTOR3(-0.0000066f, 0.0985484f, -0.0008443f), - D3DXVECTOR3(-0.0000041f, 0.1035143f, -0.0003812f), - D3DXVECTOR3(-0.0000020f, 0.1077028f, -0.0001386f), - D3DXVECTOR3(-0.0000005f, 0.1105960f, -0.0000294f), - D3DXVECTOR3(-0.0000000f, 0.1116755f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_144[] = { - D3DXVECTOR4( 0.0829399f, 0.0021449f, 0.0019676f, 0.9965503f), - D3DXVECTOR4( 0.0814701f, 0.0021492f, 0.0019763f, 0.9966716f), - D3DXVECTOR4( 0.0773537f, 0.0021613f, 0.0020006f, 0.9969994f), - D3DXVECTOR4( 0.0710295f, 0.0021796f, 0.0020383f, 0.9974698f), - D3DXVECTOR4( 0.0629364f, 0.0022024f, 0.0020869f, 0.9980130f), - D3DXVECTOR4( 0.0535137f, 0.0022280f, 0.0021441f, 0.9985623f), - D3DXVECTOR4( 0.0432023f, 0.0022551f, 0.0022075f, 0.9990614f), - D3DXVECTOR4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), - D3DXVECTOR4( 0.0216820f, 0.0023081f, 0.0023421f, 0.9997596f), - D3DXVECTOR4( 0.0113599f, 0.0023318f, 0.0024078f, 0.9999298f), - D3DXVECTOR4( 0.0019214f, 0.0023525f, 0.0024685f, 0.9999924f), - D3DXVECTOR4(-0.0061899f, 0.0023695f, 0.0025212f, 0.9999748f), - D3DXVECTOR4(-0.0125312f, 0.0023824f, 0.0025627f, 0.9999154f), - D3DXVECTOR4(-0.0166602f, 0.0023905f, 0.0025898f, 0.9998550f), - D3DXVECTOR4(-0.0181348f, 0.0023933f, 0.0025995f, 0.9998293f), - D3DXVECTOR4(-0.0169988f, 0.0023912f, 0.0025920f, 0.9998493f), - D3DXVECTOR4(-0.0137882f, 0.0023849f, 0.0025709f, 0.9998988f), - D3DXVECTOR4(-0.0087991f, 0.0023749f, 0.0025382f, 0.9999552f), - D3DXVECTOR4(-0.0023278f, 0.0023615f, 0.0024961f, 0.9999914f), - D3DXVECTOR4( 0.0053291f, 0.0023451f, 0.0024465f, 0.9999801f), - D3DXVECTOR4( 0.0138749f, 0.0023261f, 0.0023917f, 0.9998982f), - D3DXVECTOR4( 0.0230123f, 0.0023050f, 0.0023337f, 0.9997298f), - D3DXVECTOR4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), - D3DXVECTOR4( 0.0418729f, 0.0022585f, 0.0022157f, 0.9991179f), - D3DXVECTOR4( 0.0510020f, 0.0022347f, 0.0021595f, 0.9986938f), - D3DXVECTOR4( 0.0595351f, 0.0022117f, 0.0021075f, 0.9982215f), - D3DXVECTOR4( 0.0671767f, 0.0021905f, 0.0020614f, 0.9977366f), - D3DXVECTOR4( 0.0736320f, 0.0021721f, 0.0020228f, 0.9972811f), - D3DXVECTOR4( 0.0786069f, 0.0021577f, 0.0019932f, 0.9969013f), - D3DXVECTOR4( 0.0818075f, 0.0021482f, 0.0019743f, 0.9966439f), - D3DXVECTOR4( 0.0829399f, 0.0021449f, 0.0019676f, 0.9965503f), - D3DXVECTOR4( 0.0814701f, 0.0021492f, 0.0019763f, 0.9966716f), - D3DXVECTOR4( 0.0773537f, 0.0021613f, 0.0020006f, 0.9969994f), - D3DXVECTOR4( 0.0710295f, 0.0021796f, 0.0020383f, 0.9974698f), - D3DXVECTOR4( 0.0629364f, 0.0022024f, 0.0020869f, 0.9980130f), - D3DXVECTOR4( 0.0535137f, 0.0022280f, 0.0021441f, 0.9985623f), - D3DXVECTOR4( 0.0432023f, 0.0022551f, 0.0022075f, 0.9990614f), - D3DXVECTOR4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), - D3DXVECTOR4( 0.0216820f, 0.0023081f, 0.0023421f, 0.9997596f), - D3DXVECTOR4( 0.0113599f, 0.0023318f, 0.0024078f, 0.9999298f), - D3DXVECTOR4( 0.0019214f, 0.0023525f, 0.0024685f, 0.9999924f), - D3DXVECTOR4(-0.0061899f, 0.0023695f, 0.0025212f, 0.9999748f), - D3DXVECTOR4(-0.0125312f, 0.0023824f, 0.0025627f, 0.9999154f), - D3DXVECTOR4(-0.0166602f, 0.0023905f, 0.0025898f, 0.9998550f), - D3DXVECTOR4(-0.0181348f, 0.0023933f, 0.0025995f, 0.9998293f), - D3DXVECTOR4(-0.0169988f, 0.0023912f, 0.0025920f, 0.9998493f), - D3DXVECTOR4(-0.0137882f, 0.0023849f, 0.0025709f, 0.9998988f), - D3DXVECTOR4(-0.0087991f, 0.0023749f, 0.0025382f, 0.9999552f), - D3DXVECTOR4(-0.0023278f, 0.0023615f, 0.0024961f, 0.9999914f), - D3DXVECTOR4( 0.0053291f, 0.0023451f, 0.0024465f, 0.9999801f), - D3DXVECTOR4( 0.0138749f, 0.0023261f, 0.0023917f, 0.9998982f), - D3DXVECTOR4( 0.0230123f, 0.0023050f, 0.0023337f, 0.9997298f), - D3DXVECTOR4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), - D3DXVECTOR4( 0.0418729f, 0.0022585f, 0.0022157f, 0.9991179f), - D3DXVECTOR4( 0.0510020f, 0.0022347f, 0.0021595f, 0.9986938f), - D3DXVECTOR4( 0.0595351f, 0.0022117f, 0.0021075f, 0.9982215f), - D3DXVECTOR4( 0.0671767f, 0.0021905f, 0.0020614f, 0.9977366f), - D3DXVECTOR4( 0.0736320f, 0.0021721f, 0.0020228f, 0.9972811f), - D3DXVECTOR4( 0.0786069f, 0.0021577f, 0.0019932f, 0.9969013f), - D3DXVECTOR4( 0.0818075f, 0.0021482f, 0.0019743f, 0.9966439f), - D3DXVECTOR4( 0.0829399f, 0.0021449f, 0.0019676f, 0.9965503f), - D3DXVECTOR4( 0.0814701f, 0.0021492f, 0.0019763f, 0.9966716f), - D3DXVECTOR4( 0.0773537f, 0.0021613f, 0.0020006f, 0.9969994f), - D3DXVECTOR4( 0.0710295f, 0.0021796f, 0.0020383f, 0.9974698f), - D3DXVECTOR4( 0.0629364f, 0.0022024f, 0.0020869f, 0.9980130f), - D3DXVECTOR4( 0.0535137f, 0.0022280f, 0.0021441f, 0.9985623f), - D3DXVECTOR4( 0.0432023f, 0.0022551f, 0.0022075f, 0.9990614f), - D3DXVECTOR4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), - D3DXVECTOR4( 0.0216820f, 0.0023081f, 0.0023421f, 0.9997596f), - D3DXVECTOR4( 0.0113599f, 0.0023318f, 0.0024078f, 0.9999298f), - D3DXVECTOR4( 0.0019214f, 0.0023525f, 0.0024685f, 0.9999924f), - D3DXVECTOR4(-0.0061899f, 0.0023695f, 0.0025212f, 0.9999748f), - D3DXVECTOR4(-0.0125312f, 0.0023824f, 0.0025627f, 0.9999154f), - D3DXVECTOR4(-0.0166602f, 0.0023905f, 0.0025898f, 0.9998550f), - D3DXVECTOR4(-0.0181348f, 0.0023933f, 0.0025995f, 0.9998293f), - D3DXVECTOR4(-0.0169988f, 0.0023912f, 0.0025920f, 0.9998493f), - D3DXVECTOR4(-0.0137882f, 0.0023849f, 0.0025709f, 0.9998988f), - D3DXVECTOR4(-0.0087991f, 0.0023749f, 0.0025382f, 0.9999552f), - D3DXVECTOR4(-0.0023278f, 0.0023615f, 0.0024961f, 0.9999914f), - D3DXVECTOR4( 0.0053291f, 0.0023451f, 0.0024465f, 0.9999801f), - D3DXVECTOR4( 0.0138749f, 0.0023261f, 0.0023917f, 0.9998982f), - D3DXVECTOR4( 0.0230123f, 0.0023050f, 0.0023337f, 0.9997298f), - D3DXVECTOR4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), - D3DXVECTOR4( 0.0418729f, 0.0022585f, 0.0022157f, 0.9991179f), - D3DXVECTOR4( 0.0510020f, 0.0022347f, 0.0021595f, 0.9986938f), - D3DXVECTOR4( 0.0595351f, 0.0022117f, 0.0021075f, 0.9982215f), - D3DXVECTOR4( 0.0671767f, 0.0021905f, 0.0020614f, 0.9977366f), - D3DXVECTOR4( 0.0736320f, 0.0021721f, 0.0020228f, 0.9972811f), - D3DXVECTOR4( 0.0786069f, 0.0021577f, 0.0019932f, 0.9969013f), - D3DXVECTOR4( 0.0818075f, 0.0021482f, 0.0019743f, 0.9966439f), - D3DXVECTOR4( 0.0829399f, 0.0021449f, 0.0019676f, 0.9965503f), -}; - -const D3DXVECTOR3 accessor_145[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 0.9985101f, 1.0018822f), - D3DXVECTOR3( 0.9999998f, 0.9946679f, 1.0067360f), - D3DXVECTOR3( 0.9999999f, 0.9894144f, 1.0133730f), - D3DXVECTOR3( 1.0000001f, 0.9836906f, 1.0206046f), - D3DXVECTOR3( 0.9999999f, 0.9784368f, 1.0272412f), - D3DXVECTOR3( 0.9999999f, 0.9745947f, 1.0320952f), - D3DXVECTOR3( 1.0000001f, 0.9731048f, 1.0339775f), - D3DXVECTOR3( 0.9999999f, 0.9745945f, 1.0320953f), - D3DXVECTOR3( 0.9999999f, 0.9784369f, 1.0272415f), - D3DXVECTOR3( 0.9999999f, 0.9836905f, 1.0206045f), - D3DXVECTOR3( 0.9999999f, 0.9894144f, 1.0133733f), - D3DXVECTOR3( 0.9999999f, 0.9946678f, 1.0067360f), - D3DXVECTOR3( 0.9999998f, 0.9985100f, 1.0018821f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 0.9985101f, 1.0018821f), - D3DXVECTOR3( 0.9999999f, 0.9946678f, 1.0067360f), - D3DXVECTOR3( 0.9999998f, 0.9894144f, 1.0133730f), - D3DXVECTOR3( 0.9999999f, 0.9836904f, 1.0206043f), - D3DXVECTOR3( 0.9999999f, 0.9784369f, 1.0272416f), - D3DXVECTOR3( 0.9999999f, 0.9745947f, 1.0320953f), - D3DXVECTOR3( 0.9999999f, 0.9731049f, 1.0339775f), - D3DXVECTOR3( 0.9999999f, 0.9740270f, 1.0328122f), - D3DXVECTOR3( 1.0000001f, 0.9764991f, 1.0296897f), - D3DXVECTOR3( 0.9999999f, 0.9800777f, 1.0251684f), - D3DXVECTOR3( 1.0000001f, 0.9843205f, 1.0198087f), - D3DXVECTOR3( 0.9999999f, 0.9887845f, 1.0141689f), - D3DXVECTOR3( 0.9999999f, 0.9930271f, 1.0088089f), - D3DXVECTOR3( 0.9999999f, 0.9966060f, 1.0042881f), - D3DXVECTOR3( 0.9999999f, 0.9990774f, 1.0011650f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 0.9985101f, 1.0018822f), - D3DXVECTOR3( 0.9999998f, 0.9946679f, 1.0067360f), - D3DXVECTOR3( 0.9999999f, 0.9894144f, 1.0133730f), - D3DXVECTOR3( 1.0000001f, 0.9836906f, 1.0206046f), - D3DXVECTOR3( 0.9999999f, 0.9784368f, 1.0272412f), - D3DXVECTOR3( 0.9999999f, 0.9745947f, 1.0320952f), - D3DXVECTOR3( 1.0000001f, 0.9731048f, 1.0339775f), - D3DXVECTOR3( 0.9999999f, 0.9745945f, 1.0320953f), - D3DXVECTOR3( 0.9999999f, 0.9784369f, 1.0272415f), - D3DXVECTOR3( 0.9999999f, 0.9836905f, 1.0206045f), - D3DXVECTOR3( 0.9999999f, 0.9894144f, 1.0133733f), - D3DXVECTOR3( 0.9999999f, 0.9946678f, 1.0067360f), - D3DXVECTOR3( 0.9999998f, 0.9985100f, 1.0018821f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 0.9985101f, 1.0018821f), - D3DXVECTOR3( 0.9999999f, 0.9946678f, 1.0067360f), - D3DXVECTOR3( 0.9999998f, 0.9894144f, 1.0133730f), - D3DXVECTOR3( 0.9999999f, 0.9836904f, 1.0206043f), - D3DXVECTOR3( 0.9999999f, 0.9784369f, 1.0272416f), - D3DXVECTOR3( 0.9999999f, 0.9745947f, 1.0320953f), - D3DXVECTOR3( 0.9999999f, 0.9731049f, 1.0339775f), - D3DXVECTOR3( 0.9999999f, 0.9740270f, 1.0328122f), - D3DXVECTOR3( 1.0000001f, 0.9764991f, 1.0296897f), - D3DXVECTOR3( 0.9999999f, 0.9800777f, 1.0251684f), - D3DXVECTOR3( 1.0000001f, 0.9843205f, 1.0198087f), - D3DXVECTOR3( 0.9999999f, 0.9887845f, 1.0141689f), - D3DXVECTOR3( 0.9999999f, 0.9930271f, 1.0088089f), - D3DXVECTOR3( 0.9999999f, 0.9966060f, 1.0042881f), - D3DXVECTOR3( 0.9999999f, 0.9990774f, 1.0011650f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 0.9985101f, 1.0018822f), - D3DXVECTOR3( 0.9999998f, 0.9946679f, 1.0067360f), - D3DXVECTOR3( 0.9999999f, 0.9894144f, 1.0133730f), - D3DXVECTOR3( 1.0000001f, 0.9836906f, 1.0206046f), - D3DXVECTOR3( 0.9999999f, 0.9784368f, 1.0272412f), - D3DXVECTOR3( 0.9999999f, 0.9745947f, 1.0320952f), - D3DXVECTOR3( 1.0000001f, 0.9731048f, 1.0339775f), - D3DXVECTOR3( 0.9999999f, 0.9745945f, 1.0320953f), - D3DXVECTOR3( 0.9999999f, 0.9784369f, 1.0272415f), - D3DXVECTOR3( 0.9999999f, 0.9836905f, 1.0206045f), - D3DXVECTOR3( 0.9999999f, 0.9894144f, 1.0133733f), - D3DXVECTOR3( 0.9999999f, 0.9946678f, 1.0067360f), - D3DXVECTOR3( 0.9999998f, 0.9985100f, 1.0018821f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 0.9985101f, 1.0018821f), - D3DXVECTOR3( 0.9999999f, 0.9946678f, 1.0067360f), - D3DXVECTOR3( 0.9999998f, 0.9894144f, 1.0133730f), - D3DXVECTOR3( 0.9999999f, 0.9836904f, 1.0206043f), - D3DXVECTOR3( 0.9999999f, 0.9784369f, 1.0272416f), - D3DXVECTOR3( 0.9999999f, 0.9745947f, 1.0320953f), - D3DXVECTOR3( 0.9999999f, 0.9731049f, 1.0339775f), - D3DXVECTOR3( 0.9999999f, 0.9740270f, 1.0328122f), - D3DXVECTOR3( 1.0000001f, 0.9764991f, 1.0296897f), - D3DXVECTOR3( 0.9999999f, 0.9800777f, 1.0251684f), - D3DXVECTOR3( 1.0000001f, 0.9843205f, 1.0198087f), - D3DXVECTOR3( 0.9999999f, 0.9887845f, 1.0141689f), - D3DXVECTOR3( 0.9999999f, 0.9930271f, 1.0088089f), - D3DXVECTOR3( 0.9999999f, 0.9966060f, 1.0042881f), - D3DXVECTOR3( 0.9999999f, 0.9990774f, 1.0011650f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_146[] = { - D3DXVECTOR3(-0.0005524f, 0.0688296f, -0.0213631f), - D3DXVECTOR3(-0.0005524f, 0.0688296f, -0.0213631f), -}; - -const D3DXVECTOR4 accessor_147[] = { - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), -}; - -const D3DXVECTOR3 accessor_148[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_149[] = { - D3DXVECTOR3(-0.0000000f, 0.3082172f, 0.0000012f), - D3DXVECTOR3(-0.0000000f, 0.3082172f, 0.0000012f), -}; - -const D3DXVECTOR4 accessor_150[] = { - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_151[] = { - D3DXVECTOR3( 1.0000001f, 1.0000001f, 1.0000001f), - D3DXVECTOR3( 1.0000001f, 1.0000001f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_152[] = { - D3DXVECTOR3(-0.0000000f, 0.3056023f, -0.0000003f), - D3DXVECTOR3(-0.0000000f, 0.3056023f, -0.0000003f), -}; - -const D3DXVECTOR4 accessor_153[] = { - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), -}; - -const D3DXVECTOR3 accessor_154[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_155[] = { - D3DXVECTOR3( 0.0013047f, 0.3463301f, 0.0044682f), - D3DXVECTOR3( 0.0012900f, 0.3433006f, 0.0044711f), - D3DXVECTOR3( 0.0012551f, 0.3360764f, 0.0044868f), - D3DXVECTOR3( 0.0012134f, 0.3274537f, 0.0045631f), - D3DXVECTOR3( 0.0011785f, 0.3202294f, 0.0045817f), - D3DXVECTOR3( 0.0011639f, 0.3171998f, 0.0045849f), - D3DXVECTOR3( 0.0011687f, 0.3181989f, 0.0045834f), - D3DXVECTOR3( 0.0011816f, 0.3208762f, 0.0045781f), - D3DXVECTOR3( 0.0012004f, 0.3247521f, 0.0045665f), - D3DXVECTOR3( 0.0012226f, 0.3293475f, 0.0045441f), - D3DXVECTOR3( 0.0012460f, 0.3341826f, 0.0045122f), - D3DXVECTOR3( 0.0012682f, 0.3387780f, 0.0044881f), - D3DXVECTOR3( 0.0012869f, 0.3426540f, 0.0044754f), - D3DXVECTOR3( 0.0012998f, 0.3453312f, 0.0044697f), - D3DXVECTOR3( 0.0013047f, 0.3463303f, 0.0044682f), - D3DXVECTOR3( 0.0012969f, 0.3447166f, 0.0044701f), - D3DXVECTOR3( 0.0012768f, 0.3405551f, 0.0044782f), - D3DXVECTOR3( 0.0012493f, 0.3348649f, 0.0045008f), - D3DXVECTOR3( 0.0012193f, 0.3286653f, 0.0045483f), - D3DXVECTOR3( 0.0011918f, 0.3229749f, 0.0045739f), - D3DXVECTOR3( 0.0011717f, 0.3188135f, 0.0045828f), - D3DXVECTOR3( 0.0011639f, 0.3171999f, 0.0045849f), - D3DXVECTOR3( 0.0011687f, 0.3181988f, 0.0045834f), - D3DXVECTOR3( 0.0011816f, 0.3208760f, 0.0045781f), - D3DXVECTOR3( 0.0012004f, 0.3247522f, 0.0045665f), - D3DXVECTOR3( 0.0012226f, 0.3293476f, 0.0045441f), - D3DXVECTOR3( 0.0012460f, 0.3341825f, 0.0045122f), - D3DXVECTOR3( 0.0012682f, 0.3387780f, 0.0044881f), - D3DXVECTOR3( 0.0012869f, 0.3426539f, 0.0044754f), - D3DXVECTOR3( 0.0012998f, 0.3453312f, 0.0044697f), - D3DXVECTOR3( 0.0013047f, 0.3463301f, 0.0044682f), - D3DXVECTOR3( 0.0012900f, 0.3433006f, 0.0044711f), - D3DXVECTOR3( 0.0012551f, 0.3360764f, 0.0044868f), - D3DXVECTOR3( 0.0012134f, 0.3274537f, 0.0045631f), - D3DXVECTOR3( 0.0011785f, 0.3202294f, 0.0045817f), - D3DXVECTOR3( 0.0011639f, 0.3171998f, 0.0045849f), - D3DXVECTOR3( 0.0011687f, 0.3181989f, 0.0045834f), - D3DXVECTOR3( 0.0011816f, 0.3208762f, 0.0045781f), - D3DXVECTOR3( 0.0012004f, 0.3247521f, 0.0045665f), - D3DXVECTOR3( 0.0012226f, 0.3293475f, 0.0045441f), - D3DXVECTOR3( 0.0012460f, 0.3341826f, 0.0045122f), - D3DXVECTOR3( 0.0012682f, 0.3387780f, 0.0044881f), - D3DXVECTOR3( 0.0012869f, 0.3426540f, 0.0044754f), - D3DXVECTOR3( 0.0012998f, 0.3453312f, 0.0044697f), - D3DXVECTOR3( 0.0013047f, 0.3463303f, 0.0044682f), - D3DXVECTOR3( 0.0012969f, 0.3447166f, 0.0044701f), - D3DXVECTOR3( 0.0012768f, 0.3405551f, 0.0044782f), - D3DXVECTOR3( 0.0012493f, 0.3348649f, 0.0045008f), - D3DXVECTOR3( 0.0012193f, 0.3286653f, 0.0045483f), - D3DXVECTOR3( 0.0011918f, 0.3229749f, 0.0045739f), - D3DXVECTOR3( 0.0011717f, 0.3188135f, 0.0045828f), - D3DXVECTOR3( 0.0011639f, 0.3171999f, 0.0045849f), - D3DXVECTOR3( 0.0011687f, 0.3181988f, 0.0045834f), - D3DXVECTOR3( 0.0011816f, 0.3208760f, 0.0045781f), - D3DXVECTOR3( 0.0012004f, 0.3247522f, 0.0045665f), - D3DXVECTOR3( 0.0012226f, 0.3293476f, 0.0045441f), - D3DXVECTOR3( 0.0012460f, 0.3341825f, 0.0045122f), - D3DXVECTOR3( 0.0012682f, 0.3387780f, 0.0044881f), - D3DXVECTOR3( 0.0012869f, 0.3426539f, 0.0044754f), - D3DXVECTOR3( 0.0012998f, 0.3453312f, 0.0044697f), - D3DXVECTOR3( 0.0013047f, 0.3463301f, 0.0044682f), - D3DXVECTOR3( 0.0012900f, 0.3433006f, 0.0044711f), - D3DXVECTOR3( 0.0012551f, 0.3360764f, 0.0044868f), - D3DXVECTOR3( 0.0012134f, 0.3274537f, 0.0045631f), - D3DXVECTOR3( 0.0011785f, 0.3202294f, 0.0045817f), - D3DXVECTOR3( 0.0011639f, 0.3171998f, 0.0045849f), - D3DXVECTOR3( 0.0011687f, 0.3181989f, 0.0045834f), - D3DXVECTOR3( 0.0011816f, 0.3208762f, 0.0045781f), - D3DXVECTOR3( 0.0012004f, 0.3247521f, 0.0045665f), - D3DXVECTOR3( 0.0012226f, 0.3293475f, 0.0045441f), - D3DXVECTOR3( 0.0012460f, 0.3341826f, 0.0045122f), - D3DXVECTOR3( 0.0012682f, 0.3387780f, 0.0044881f), - D3DXVECTOR3( 0.0012869f, 0.3426540f, 0.0044754f), - D3DXVECTOR3( 0.0012998f, 0.3453312f, 0.0044697f), - D3DXVECTOR3( 0.0013047f, 0.3463303f, 0.0044682f), - D3DXVECTOR3( 0.0012969f, 0.3447166f, 0.0044701f), - D3DXVECTOR3( 0.0012768f, 0.3405551f, 0.0044782f), - D3DXVECTOR3( 0.0012493f, 0.3348649f, 0.0045008f), - D3DXVECTOR3( 0.0012193f, 0.3286653f, 0.0045483f), - D3DXVECTOR3( 0.0011918f, 0.3229749f, 0.0045739f), - D3DXVECTOR3( 0.0011717f, 0.3188135f, 0.0045828f), - D3DXVECTOR3( 0.0011639f, 0.3171999f, 0.0045849f), - D3DXVECTOR3( 0.0011687f, 0.3181988f, 0.0045834f), - D3DXVECTOR3( 0.0011816f, 0.3208760f, 0.0045781f), - D3DXVECTOR3( 0.0012004f, 0.3247522f, 0.0045665f), - D3DXVECTOR3( 0.0012226f, 0.3293476f, 0.0045441f), - D3DXVECTOR3( 0.0012460f, 0.3341825f, 0.0045122f), - D3DXVECTOR3( 0.0012682f, 0.3387780f, 0.0044881f), - D3DXVECTOR3( 0.0012869f, 0.3426539f, 0.0044754f), - D3DXVECTOR3( 0.0012998f, 0.3453312f, 0.0044697f), - D3DXVECTOR3( 0.0013047f, 0.3463301f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_156[] = { - D3DXVECTOR4( 0.0387171f, 0.0002800f, 0.0013465f, 0.9992493f), - D3DXVECTOR4( 0.0493966f, -0.0177392f, 0.0031159f, 0.9986169f), - D3DXVECTOR4( 0.0606996f, -0.0367296f, 0.0057199f, 0.9974637f), - D3DXVECTOR4( 0.0707855f, -0.0537815f, 0.0088475f, 0.9960014f), - D3DXVECTOR4( 0.0784067f, -0.0683515f, 0.0121915f, 0.9945008f), - D3DXVECTOR4( 0.0817480f, -0.0807198f, 0.0154127f, 0.9932593f), - D3DXVECTOR4( 0.0747506f, -0.0913959f, 0.0177616f, 0.9928463f), - D3DXVECTOR4( 0.0556845f, -0.1008034f, 0.0187529f, 0.9931698f), - D3DXVECTOR4( 0.0280073f, -0.1091611f, 0.0184893f, 0.9934574f), - D3DXVECTOR4(-0.0048100f, -0.1165879f, 0.0172380f, 0.9930192f), - D3DXVECTOR4(-0.0392765f, -0.1231512f, 0.0153883f, 0.9914910f), - D3DXVECTOR4(-0.0719051f, -0.1289043f, 0.0134206f, 0.9889557f), - D3DXVECTOR4(-0.0992459f, -0.1339051f, 0.0118810f, 0.9859404f), - D3DXVECTOR4(-0.1179052f, -0.1382139f, 0.0113604f, 0.9832936f), - D3DXVECTOR4(-0.1245264f, -0.1418656f, 0.0124765f, 0.9819428f), - D3DXVECTOR4(-0.1224307f, -0.1449235f, 0.0148707f, 0.9817264f), - D3DXVECTOR4(-0.1177773f, -0.1474829f, 0.0176792f, 0.9818680f), - D3DXVECTOR4(-0.1111950f, -0.1495734f, 0.0208023f, 0.9822580f), - D3DXVECTOR4(-0.1030942f, -0.1512196f, 0.0241640f, 0.9828125f), - D3DXVECTOR4(-0.0937609f, -0.1524428f, 0.0277018f, 0.9834647f), - D3DXVECTOR4(-0.0834059f, -0.1532622f, 0.0313610f, 0.9841599f), - D3DXVECTOR4(-0.0721912f, -0.1536956f, 0.0350924f, 0.9848526f), - D3DXVECTOR4(-0.0602471f, -0.1537601f, 0.0388498f, 0.9855043f), - D3DXVECTOR4(-0.0476825f, -0.1534726f, 0.0425891f, 0.9860825f), - D3DXVECTOR4(-0.0346149f, -0.1524973f, 0.0462851f, 0.9866124f), - D3DXVECTOR4(-0.0211576f, -0.1504736f, 0.0498945f, 0.9871275f), - D3DXVECTOR4(-0.0074050f, -0.1473921f, 0.0533421f, 0.9876110f), - D3DXVECTOR4( 0.0065302f, -0.1432618f, 0.0565457f, 0.9880466f), - D3DXVECTOR4( 0.0204811f, -0.1381156f, 0.0594127f, 0.9884204f), - D3DXVECTOR4( 0.0340977f, -0.1320207f, 0.0618273f, 0.9887291f), - D3DXVECTOR4( 0.0458801f, -0.1251444f, 0.0635367f, 0.9890384f), - D3DXVECTOR4( 0.0560597f, -0.1175647f, 0.0646364f, 0.9893726f), - D3DXVECTOR4( 0.0667953f, -0.1092420f, 0.0654558f, 0.9896061f), - D3DXVECTOR4( 0.0762679f, -0.1004125f, 0.0657694f, 0.9898358f), - D3DXVECTOR4( 0.0832366f, -0.0912865f, 0.0654775f, 0.9901773f), - D3DXVECTOR4( 0.0858855f, -0.0821102f, 0.0644889f, 0.9908193f), - D3DXVECTOR4( 0.0781465f, -0.0733800f, 0.0624743f, 0.9922729f), - D3DXVECTOR4( 0.0582459f, -0.0652543f, 0.0595997f, 0.9943828f), - D3DXVECTOR4( 0.0296094f, -0.0575410f, 0.0563712f, 0.9963105f), - D3DXVECTOR4(-0.0042901f, -0.0500729f, 0.0531686f, 0.9973201f), - D3DXVECTOR4(-0.0399325f, -0.0427180f, 0.0502538f, 0.9970232f), - D3DXVECTOR4(-0.0737814f, -0.0353748f, 0.0477780f, 0.9955010f), - D3DXVECTOR4(-0.1023270f, -0.0279613f, 0.0457860f, 0.9933031f), - D3DXVECTOR4(-0.1221112f, -0.0204020f, 0.0442204f, 0.9913210f), - D3DXVECTOR4(-0.1297143f, -0.0126208f, 0.0429242f, 0.9905415f), - D3DXVECTOR4(-0.1284588f, -0.0048277f, 0.0416081f, 0.9908299f), - D3DXVECTOR4(-0.1245467f, 0.0026717f, 0.0401076f, 0.9913992f), - D3DXVECTOR4(-0.1186091f, 0.0098275f, 0.0384018f, 0.9921495f), - D3DXVECTOR4(-0.1110591f, 0.0166051f, 0.0364919f, 0.9930048f), - D3DXVECTOR4(-0.1021864f, 0.0229805f, 0.0343923f, 0.9939049f), - D3DXVECTOR4(-0.0922060f, 0.0289375f, 0.0321254f, 0.9948008f), - D3DXVECTOR4(-0.0812846f, 0.0344663f, 0.0297177f, 0.9956514f), - D3DXVECTOR4(-0.0695569f, 0.0395620f, 0.0271980f, 0.9964221f), - D3DXVECTOR4(-0.0571369f, 0.0442244f, 0.0245948f, 0.9970831f), - D3DXVECTOR4(-0.0441252f, 0.0484563f, 0.0219360f, 0.9976090f), - D3DXVECTOR4(-0.0306171f, 0.0522637f, 0.0192480f, 0.9979783f), - D3DXVECTOR4(-0.0167114f, 0.0556548f, 0.0165563f, 0.9981729f), - D3DXVECTOR4(-0.0025264f, 0.0586396f, 0.0138860f, 0.9981794f), - D3DXVECTOR4( 0.0117657f, 0.0612287f, 0.0112215f, 0.9979913f), - D3DXVECTOR4( 0.0258064f, 0.0634317f, 0.0085759f, 0.9976156f), - D3DXVECTOR4( 0.0380740f, 0.0652552f, 0.0060878f, 0.9971234f), - D3DXVECTOR4( 0.0487839f, 0.0667278f, 0.0037808f, 0.9965708f), - D3DXVECTOR4( 0.0600906f, 0.0678784f, 0.0015353f, 0.9958812f), - D3DXVECTOR4( 0.0701530f, 0.0687186f, -0.0005088f, 0.9951664f), - D3DXVECTOR4( 0.0777177f, 0.0692727f, -0.0022518f, 0.9945633f), - D3DXVECTOR4( 0.0809597f, 0.0695728f, -0.0035576f, 0.9942799f), - D3DXVECTOR4( 0.0737964f, 0.0696717f, -0.0039981f, 0.9948286f), - D3DXVECTOR4( 0.0544659f, 0.0696648f, -0.0034622f, 0.9960765f), - D3DXVECTOR4( 0.0264120f, 0.0695867f, -0.0022097f, 0.9972237f), - D3DXVECTOR4(-0.0068905f, 0.0693881f, -0.0004990f, 0.9975659f), - D3DXVECTOR4(-0.0419323f, 0.0690313f, 0.0014082f, 0.9967319f), - D3DXVECTOR4(-0.0751965f, 0.0685022f, 0.0032511f, 0.9948077f), - D3DXVECTOR4(-0.1031954f, 0.0678127f, 0.0047754f, 0.9923353f), - D3DXVECTOR4(-0.1224918f, 0.0669932f, 0.0057394f, 0.9901893f), - D3DXVECTOR4(-0.1296819f, 0.0660770f, 0.0059186f, 0.9893339f), - D3DXVECTOR4(-0.1280892f, 0.0650392f, 0.0055533f, 0.9896122f), - D3DXVECTOR4(-0.1239076f, 0.0638259f, 0.0050655f, 0.9902260f), - D3DXVECTOR4(-0.1177644f, 0.0624143f, 0.0045012f, 0.9910680f), - D3DXVECTOR4(-0.1100672f, 0.0607806f, 0.0038919f, 0.9920564f), - D3DXVECTOR4(-0.1010998f, 0.0588983f, 0.0032619f, 0.9931260f), - D3DXVECTOR4(-0.0910701f, 0.0567373f, 0.0026315f, 0.9942234f), - D3DXVECTOR4(-0.0801377f, 0.0542614f, 0.0020192f, 0.9953038f), - D3DXVECTOR4(-0.0684298f, 0.0514265f, 0.0014430f, 0.9963285f), - D3DXVECTOR4(-0.0560529f, 0.0481771f, 0.0009217f, 0.9972644f), - D3DXVECTOR4(-0.0431007f, 0.0444411f, 0.0004759f, 0.9980817f), - D3DXVECTOR4(-0.0296616f, 0.0401208f, 0.0001293f, 0.9987545f), - D3DXVECTOR4(-0.0158282f, 0.0350788f, -0.0000894f, 0.9992592f), - D3DXVECTOR4(-0.0017130f, 0.0291084f, -0.0001432f, 0.9995748f), - D3DXVECTOR4( 0.0125142f, 0.0218732f, 0.0000184f, 0.9996824f), - D3DXVECTOR4( 0.0264973f, 0.0127488f, 0.0004716f, 0.9995675f), - D3DXVECTOR4( 0.0387171f, 0.0002800f, 0.0013465f, 0.9992493f), -}; - -const D3DXVECTOR3 accessor_157[] = { - D3DXVECTOR3( 1.0000001f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_158[] = { - D3DXVECTOR3( 0.2488541f, 0.2033312f, -0.0450287f), - D3DXVECTOR3( 0.2488541f, 0.2033312f, -0.0450287f), -}; - -const D3DXVECTOR4 accessor_159[] = { - D3DXVECTOR4(-0.1444001f, -0.0000000f, 0.0000000f, 0.9895194f), - D3DXVECTOR4(-0.1444001f, -0.0000000f, 0.0000000f, 0.9895194f), -}; - -const D3DXVECTOR3 accessor_160[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_161[] = { - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_162[] = { - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_163[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_164[] = { - D3DXVECTOR3( 0.0000000f, 0.1024574f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1024574f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_165[] = { - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_166[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000001f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_167[] = { - D3DXVECTOR3( 0.0000000f, 0.1026066f, -0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1026066f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_168[] = { - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_169[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_170[] = { - D3DXVECTOR3( 0.0000000f, 0.1033655f, -0.0000004f), - D3DXVECTOR3( 0.0000000f, 0.1033655f, -0.0000004f), -}; - -const D3DXVECTOR4 accessor_171[] = { - D3DXVECTOR4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_172[] = { - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000001f), - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_173[] = { - D3DXVECTOR3(-0.0000000f, 0.1012878f, -0.0000007f), - D3DXVECTOR3(-0.0000000f, 0.1012878f, -0.0000007f), -}; - -const D3DXVECTOR4 accessor_174[] = { - D3DXVECTOR4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), - D3DXVECTOR4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), -}; - -const D3DXVECTOR3 accessor_175[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_176[] = { - D3DXVECTOR3( 0.0000000f, 0.1024326f, 0.0000002f), - D3DXVECTOR3( 0.0000000f, 0.1024326f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_177[] = { - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), -}; - -const D3DXVECTOR3 accessor_178[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_179[] = { - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), -}; - -const D3DXVECTOR4 accessor_180[] = { - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), -}; - -const D3DXVECTOR3 accessor_181[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_182[] = { - D3DXVECTOR3(-0.0000000f, 0.3082159f, -0.0000004f), - D3DXVECTOR3(-0.0000000f, 0.3082159f, -0.0000004f), -}; - -const D3DXVECTOR4 accessor_183[] = { - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_184[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_185[] = { - D3DXVECTOR3( 0.0000000f, 0.3056036f, -0.0000006f), - D3DXVECTOR3( 0.0000000f, 0.3056036f, -0.0000006f), -}; - -const D3DXVECTOR4 accessor_186[] = { - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), -}; - -const D3DXVECTOR3 accessor_187[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_188[] = { - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_189[] = { - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_190[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_191[] = { - D3DXVECTOR3(-0.0000000f, 0.1024561f, 0.0000011f), - D3DXVECTOR3(-0.0000000f, 0.1024561f, 0.0000011f), -}; - -const D3DXVECTOR4 accessor_192[] = { - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_193[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_194[] = { - D3DXVECTOR3(-0.0000000f, 0.1025997f, 0.0000007f), - D3DXVECTOR3(-0.0000000f, 0.1025997f, 0.0000007f), -}; - -const D3DXVECTOR4 accessor_195[] = { - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_196[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_197[] = { - D3DXVECTOR3( 0.0000000f, 0.1033699f, 0.0000007f), - D3DXVECTOR3( 0.0000000f, 0.1033699f, 0.0000007f), -}; - -const D3DXVECTOR4 accessor_198[] = { - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_199[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999998f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999998f), -}; - -const D3DXVECTOR3 accessor_200[] = { - D3DXVECTOR3( 0.0000001f, 0.1012830f, -0.0000006f), - D3DXVECTOR3( 0.0000001f, 0.1012830f, -0.0000006f), -}; - -const D3DXVECTOR4 accessor_201[] = { - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), -}; - -const D3DXVECTOR3 accessor_202[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_203[] = { - D3DXVECTOR3(-0.0000000f, 0.1024322f, 0.0000006f), - D3DXVECTOR3(-0.0000000f, 0.1024322f, 0.0000006f), -}; - -const D3DXVECTOR4 accessor_204[] = { - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), -}; - -const D3DXVECTOR3 accessor_205[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_206[] = { - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), -}; - -const D3DXVECTOR4 accessor_207[] = { - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), -}; - -const D3DXVECTOR3 accessor_208[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_209[] = { - D3DXVECTOR3(-0.0000000f, 0.2498153f, -0.0000003f), - D3DXVECTOR3(-0.0000000f, 0.2498153f, -0.0000003f), -}; - -const D3DXVECTOR4 accessor_210[] = { - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), -}; - -const D3DXVECTOR3 accessor_211[] = { - D3DXVECTOR3( 0.9999998f, 0.9999997f, 0.9999998f), - D3DXVECTOR3( 0.9999998f, 0.9999997f, 0.9999998f), -}; - -const D3DXVECTOR3 accessor_212[] = { - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_213[] = { - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_214[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_215[] = { - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_216[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_217[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_218[] = { - D3DXVECTOR3(-0.0019501f, 0.0729339f, -0.0255577f), - D3DXVECTOR3(-0.0019493f, 0.0729384f, -0.0253245f), - D3DXVECTOR3(-0.0019473f, 0.0729535f, -0.0246470f), - D3DXVECTOR3(-0.0019446f, 0.0729828f, -0.0235456f), - D3DXVECTOR3(-0.0019421f, 0.0730318f, -0.0220117f), - D3DXVECTOR3(-0.0019408f, 0.0731085f, -0.0199669f), - D3DXVECTOR3(-0.0019434f, 0.0732235f, -0.0171513f), - D3DXVECTOR3(-0.0019529f, 0.0733484f, -0.0132013f), - D3DXVECTOR3(-0.0019604f, 0.0733796f, -0.0095440f), - D3DXVECTOR3(-0.0019613f, 0.0733652f, -0.0069526f), - D3DXVECTOR3(-0.0019592f, 0.0733468f, -0.0050041f), - D3DXVECTOR3(-0.0019561f, 0.0733323f, -0.0035149f), - D3DXVECTOR3(-0.0019531f, 0.0733228f, -0.0024349f), - D3DXVECTOR3(-0.0019509f, 0.0733176f, -0.0017666f), - D3DXVECTOR3(-0.0019501f, 0.0733160f, -0.0015359f), - D3DXVECTOR3(-0.0019507f, 0.0733175f, -0.0017199f), - D3DXVECTOR3(-0.0019522f, 0.0733223f, -0.0022591f), - D3DXVECTOR3(-0.0019544f, 0.0733312f, -0.0031417f), - D3DXVECTOR3(-0.0019566f, 0.0733445f, -0.0043721f), - D3DXVECTOR3(-0.0019581f, 0.0733614f, -0.0059870f), - D3DXVECTOR3(-0.0019577f, 0.0733771f, -0.0080864f), - D3DXVECTOR3(-0.0019536f, 0.0733730f, -0.0108565f), - D3DXVECTOR3(-0.0019461f, 0.0733107f, -0.0141954f), - D3DXVECTOR3(-0.0019409f, 0.0732084f, -0.0171920f), - D3DXVECTOR3(-0.0019398f, 0.0731180f, -0.0195282f), - D3DXVECTOR3(-0.0019409f, 0.0730503f, -0.0213779f), - D3DXVECTOR3(-0.0019431f, 0.0730020f, -0.0228579f), - D3DXVECTOR3(-0.0019456f, 0.0729691f, -0.0240146f), - D3DXVECTOR3(-0.0019479f, 0.0729484f, -0.0248579f), - D3DXVECTOR3(-0.0019495f, 0.0729373f, -0.0253787f), - D3DXVECTOR3(-0.0019501f, 0.0729339f, -0.0255577f), - D3DXVECTOR3(-0.0019493f, 0.0729384f, -0.0253245f), - D3DXVECTOR3(-0.0019473f, 0.0729535f, -0.0246470f), - D3DXVECTOR3(-0.0019446f, 0.0729828f, -0.0235456f), - D3DXVECTOR3(-0.0019421f, 0.0730318f, -0.0220117f), - D3DXVECTOR3(-0.0019408f, 0.0731085f, -0.0199669f), - D3DXVECTOR3(-0.0019434f, 0.0732235f, -0.0171513f), - D3DXVECTOR3(-0.0019529f, 0.0733484f, -0.0132013f), - D3DXVECTOR3(-0.0019604f, 0.0733796f, -0.0095440f), - D3DXVECTOR3(-0.0019613f, 0.0733652f, -0.0069526f), - D3DXVECTOR3(-0.0019592f, 0.0733468f, -0.0050041f), - D3DXVECTOR3(-0.0019561f, 0.0733323f, -0.0035149f), - D3DXVECTOR3(-0.0019531f, 0.0733228f, -0.0024349f), - D3DXVECTOR3(-0.0019509f, 0.0733176f, -0.0017666f), - D3DXVECTOR3(-0.0019501f, 0.0733160f, -0.0015359f), - D3DXVECTOR3(-0.0019507f, 0.0733175f, -0.0017199f), - D3DXVECTOR3(-0.0019522f, 0.0733223f, -0.0022591f), - D3DXVECTOR3(-0.0019544f, 0.0733312f, -0.0031417f), - D3DXVECTOR3(-0.0019566f, 0.0733445f, -0.0043721f), - D3DXVECTOR3(-0.0019581f, 0.0733614f, -0.0059870f), - D3DXVECTOR3(-0.0019577f, 0.0733771f, -0.0080864f), - D3DXVECTOR3(-0.0019536f, 0.0733730f, -0.0108565f), - D3DXVECTOR3(-0.0019461f, 0.0733107f, -0.0141954f), - D3DXVECTOR3(-0.0019409f, 0.0732084f, -0.0171920f), - D3DXVECTOR3(-0.0019398f, 0.0731180f, -0.0195282f), - D3DXVECTOR3(-0.0019409f, 0.0730503f, -0.0213779f), - D3DXVECTOR3(-0.0019431f, 0.0730020f, -0.0228579f), - D3DXVECTOR3(-0.0019456f, 0.0729691f, -0.0240146f), - D3DXVECTOR3(-0.0019479f, 0.0729484f, -0.0248579f), - D3DXVECTOR3(-0.0019495f, 0.0729373f, -0.0253787f), - D3DXVECTOR3(-0.0019501f, 0.0729339f, -0.0255577f), - D3DXVECTOR3(-0.0019493f, 0.0729384f, -0.0253245f), - D3DXVECTOR3(-0.0019473f, 0.0729535f, -0.0246470f), - D3DXVECTOR3(-0.0019446f, 0.0729828f, -0.0235456f), - D3DXVECTOR3(-0.0019421f, 0.0730318f, -0.0220117f), - D3DXVECTOR3(-0.0019408f, 0.0731085f, -0.0199669f), - D3DXVECTOR3(-0.0019434f, 0.0732235f, -0.0171513f), - D3DXVECTOR3(-0.0019529f, 0.0733484f, -0.0132013f), - D3DXVECTOR3(-0.0019604f, 0.0733796f, -0.0095440f), - D3DXVECTOR3(-0.0019613f, 0.0733652f, -0.0069526f), - D3DXVECTOR3(-0.0019592f, 0.0733468f, -0.0050041f), - D3DXVECTOR3(-0.0019561f, 0.0733323f, -0.0035149f), - D3DXVECTOR3(-0.0019531f, 0.0733228f, -0.0024349f), - D3DXVECTOR3(-0.0019509f, 0.0733176f, -0.0017666f), - D3DXVECTOR3(-0.0019501f, 0.0733160f, -0.0015359f), - D3DXVECTOR3(-0.0019507f, 0.0733175f, -0.0017199f), - D3DXVECTOR3(-0.0019522f, 0.0733223f, -0.0022591f), - D3DXVECTOR3(-0.0019544f, 0.0733312f, -0.0031417f), - D3DXVECTOR3(-0.0019566f, 0.0733445f, -0.0043721f), - D3DXVECTOR3(-0.0019581f, 0.0733614f, -0.0059870f), - D3DXVECTOR3(-0.0019577f, 0.0733771f, -0.0080864f), - D3DXVECTOR3(-0.0019536f, 0.0733730f, -0.0108565f), - D3DXVECTOR3(-0.0019461f, 0.0733107f, -0.0141954f), - D3DXVECTOR3(-0.0019409f, 0.0732084f, -0.0171920f), - D3DXVECTOR3(-0.0019398f, 0.0731180f, -0.0195282f), - D3DXVECTOR3(-0.0019409f, 0.0730503f, -0.0213779f), - D3DXVECTOR3(-0.0019431f, 0.0730020f, -0.0228579f), - D3DXVECTOR3(-0.0019456f, 0.0729691f, -0.0240146f), - D3DXVECTOR3(-0.0019479f, 0.0729484f, -0.0248579f), - D3DXVECTOR3(-0.0019495f, 0.0729373f, -0.0253787f), - D3DXVECTOR3(-0.0019501f, 0.0729339f, -0.0255577f), -}; - -const D3DXVECTOR4 accessor_219[] = { - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_220[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_221[] = { - D3DXVECTOR3(-0.0000000f, 0.0919257f, -0.0000006f), - D3DXVECTOR3(-0.0000000f, 0.0919257f, -0.0000006f), -}; - -const D3DXVECTOR4 accessor_222[] = { - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_223[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_224[] = { - D3DXVECTOR3( 0.0000006f, 0.1196968f, 0.0000000f), - D3DXVECTOR3( 0.0000006f, 0.1196968f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_225[] = { - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_226[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_227[] = { - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_228[] = { - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_229[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_230[] = { - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_231[] = { - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_232[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_233[] = { - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_234[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_235[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_236[] = { - D3DXVECTOR3(-0.0019501f, 0.0729339f, -0.0255577f), - D3DXVECTOR3(-0.0019493f, 0.0729384f, -0.0253245f), - D3DXVECTOR3(-0.0019473f, 0.0729535f, -0.0246470f), - D3DXVECTOR3(-0.0019446f, 0.0729828f, -0.0235456f), - D3DXVECTOR3(-0.0019421f, 0.0730318f, -0.0220117f), - D3DXVECTOR3(-0.0019408f, 0.0731085f, -0.0199669f), - D3DXVECTOR3(-0.0019434f, 0.0732235f, -0.0171513f), - D3DXVECTOR3(-0.0019529f, 0.0733484f, -0.0132013f), - D3DXVECTOR3(-0.0019604f, 0.0733796f, -0.0095440f), - D3DXVECTOR3(-0.0019613f, 0.0733652f, -0.0069526f), - D3DXVECTOR3(-0.0019592f, 0.0733468f, -0.0050041f), - D3DXVECTOR3(-0.0019561f, 0.0733323f, -0.0035149f), - D3DXVECTOR3(-0.0019531f, 0.0733228f, -0.0024349f), - D3DXVECTOR3(-0.0019509f, 0.0733176f, -0.0017666f), - D3DXVECTOR3(-0.0019501f, 0.0733160f, -0.0015359f), - D3DXVECTOR3(-0.0019507f, 0.0733175f, -0.0017199f), - D3DXVECTOR3(-0.0019523f, 0.0733223f, -0.0022591f), - D3DXVECTOR3(-0.0019544f, 0.0733313f, -0.0031417f), - D3DXVECTOR3(-0.0019566f, 0.0733445f, -0.0043721f), - D3DXVECTOR3(-0.0019581f, 0.0733614f, -0.0059870f), - D3DXVECTOR3(-0.0019577f, 0.0733771f, -0.0080864f), - D3DXVECTOR3(-0.0019536f, 0.0733731f, -0.0108565f), - D3DXVECTOR3(-0.0019461f, 0.0733107f, -0.0141954f), - D3DXVECTOR3(-0.0019409f, 0.0732084f, -0.0171920f), - D3DXVECTOR3(-0.0019398f, 0.0731180f, -0.0195282f), - D3DXVECTOR3(-0.0019409f, 0.0730503f, -0.0213779f), - D3DXVECTOR3(-0.0019431f, 0.0730020f, -0.0228579f), - D3DXVECTOR3(-0.0019456f, 0.0729691f, -0.0240146f), - D3DXVECTOR3(-0.0019479f, 0.0729484f, -0.0248579f), - D3DXVECTOR3(-0.0019495f, 0.0729373f, -0.0253787f), - D3DXVECTOR3(-0.0019501f, 0.0729339f, -0.0255577f), - D3DXVECTOR3(-0.0019493f, 0.0729384f, -0.0253245f), - D3DXVECTOR3(-0.0019473f, 0.0729535f, -0.0246470f), - D3DXVECTOR3(-0.0019446f, 0.0729828f, -0.0235456f), - D3DXVECTOR3(-0.0019421f, 0.0730318f, -0.0220117f), - D3DXVECTOR3(-0.0019408f, 0.0731085f, -0.0199669f), - D3DXVECTOR3(-0.0019434f, 0.0732235f, -0.0171513f), - D3DXVECTOR3(-0.0019529f, 0.0733484f, -0.0132013f), - D3DXVECTOR3(-0.0019604f, 0.0733796f, -0.0095440f), - D3DXVECTOR3(-0.0019613f, 0.0733652f, -0.0069526f), - D3DXVECTOR3(-0.0019592f, 0.0733468f, -0.0050041f), - D3DXVECTOR3(-0.0019561f, 0.0733323f, -0.0035149f), - D3DXVECTOR3(-0.0019531f, 0.0733228f, -0.0024349f), - D3DXVECTOR3(-0.0019509f, 0.0733176f, -0.0017666f), - D3DXVECTOR3(-0.0019501f, 0.0733160f, -0.0015359f), - D3DXVECTOR3(-0.0019507f, 0.0733175f, -0.0017199f), - D3DXVECTOR3(-0.0019523f, 0.0733223f, -0.0022591f), - D3DXVECTOR3(-0.0019544f, 0.0733313f, -0.0031417f), - D3DXVECTOR3(-0.0019566f, 0.0733445f, -0.0043721f), - D3DXVECTOR3(-0.0019581f, 0.0733614f, -0.0059870f), - D3DXVECTOR3(-0.0019577f, 0.0733771f, -0.0080864f), - D3DXVECTOR3(-0.0019536f, 0.0733731f, -0.0108565f), - D3DXVECTOR3(-0.0019461f, 0.0733107f, -0.0141954f), - D3DXVECTOR3(-0.0019409f, 0.0732084f, -0.0171920f), - D3DXVECTOR3(-0.0019398f, 0.0731180f, -0.0195282f), - D3DXVECTOR3(-0.0019409f, 0.0730503f, -0.0213779f), - D3DXVECTOR3(-0.0019431f, 0.0730020f, -0.0228579f), - D3DXVECTOR3(-0.0019456f, 0.0729691f, -0.0240146f), - D3DXVECTOR3(-0.0019479f, 0.0729484f, -0.0248579f), - D3DXVECTOR3(-0.0019495f, 0.0729373f, -0.0253787f), - D3DXVECTOR3(-0.0019501f, 0.0729339f, -0.0255577f), - D3DXVECTOR3(-0.0019493f, 0.0729384f, -0.0253245f), - D3DXVECTOR3(-0.0019473f, 0.0729535f, -0.0246470f), - D3DXVECTOR3(-0.0019446f, 0.0729828f, -0.0235456f), - D3DXVECTOR3(-0.0019421f, 0.0730318f, -0.0220117f), - D3DXVECTOR3(-0.0019408f, 0.0731085f, -0.0199669f), - D3DXVECTOR3(-0.0019434f, 0.0732235f, -0.0171513f), - D3DXVECTOR3(-0.0019529f, 0.0733484f, -0.0132013f), - D3DXVECTOR3(-0.0019604f, 0.0733796f, -0.0095440f), - D3DXVECTOR3(-0.0019613f, 0.0733652f, -0.0069526f), - D3DXVECTOR3(-0.0019592f, 0.0733468f, -0.0050041f), - D3DXVECTOR3(-0.0019561f, 0.0733323f, -0.0035149f), - D3DXVECTOR3(-0.0019531f, 0.0733228f, -0.0024349f), - D3DXVECTOR3(-0.0019509f, 0.0733176f, -0.0017666f), - D3DXVECTOR3(-0.0019501f, 0.0733160f, -0.0015359f), - D3DXVECTOR3(-0.0019507f, 0.0733175f, -0.0017199f), - D3DXVECTOR3(-0.0019523f, 0.0733223f, -0.0022591f), - D3DXVECTOR3(-0.0019544f, 0.0733313f, -0.0031417f), - D3DXVECTOR3(-0.0019566f, 0.0733445f, -0.0043721f), - D3DXVECTOR3(-0.0019581f, 0.0733614f, -0.0059870f), - D3DXVECTOR3(-0.0019577f, 0.0733771f, -0.0080864f), - D3DXVECTOR3(-0.0019536f, 0.0733731f, -0.0108565f), - D3DXVECTOR3(-0.0019461f, 0.0733107f, -0.0141954f), - D3DXVECTOR3(-0.0019409f, 0.0732084f, -0.0171920f), - D3DXVECTOR3(-0.0019398f, 0.0731180f, -0.0195282f), - D3DXVECTOR3(-0.0019409f, 0.0730503f, -0.0213779f), - D3DXVECTOR3(-0.0019431f, 0.0730020f, -0.0228579f), - D3DXVECTOR3(-0.0019456f, 0.0729691f, -0.0240146f), - D3DXVECTOR3(-0.0019479f, 0.0729484f, -0.0248579f), - D3DXVECTOR3(-0.0019495f, 0.0729373f, -0.0253787f), - D3DXVECTOR3(-0.0019501f, 0.0729339f, -0.0255577f), -}; - -const D3DXVECTOR4 accessor_237[] = { - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_238[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_239[] = { - D3DXVECTOR3(-0.0000000f, 0.0919261f, -0.0000001f), - D3DXVECTOR3(-0.0000000f, 0.0919261f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_240[] = { - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_241[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_242[] = { - D3DXVECTOR3(-0.0000000f, 0.1196964f, 0.0000000f), - D3DXVECTOR3(-0.0000000f, 0.1196964f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_243[] = { - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_244[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_245[] = { - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_246[] = { - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_247[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const float accessor_248[] = { - 0.0, - 0.0416666679084301, - 0.0833333358168602, - 0.125, - 0.1666666716337204, - 0.2083333283662796, - 0.25, - 0.2916666567325592, - 0.3333333432674408, - 0.375, - 0.4166666567325592, - 0.4583333432674408, - 0.5, - 0.5416666865348816, - 0.5833333134651184, - 0.625, - 0.6666666865348816, - 0.7083333134651184, - 0.75, - 0.7916666865348816, - 0.8333333134651184, - 0.875, - 0.9166666865348816, - 0.9583333134651184, - 1.0, - 1.0416666269302368, - 1.0833333730697632, - 1.125, - 1.1666666269302368, - 1.2083333730697632, - 1.25, - 1.2916666269302368, - 1.3333333730697632, - 1.375, - 1.4166666269302368, - 1.4583333730697632, - 1.5, - 1.5416666269302368, - 1.5833333730697632, - 1.625, - 1.6666666269302368, - 1.7083333730697632, - 1.75, - 1.7916666269302368, - 1.8333333730697632, - 1.875, - 1.9166666269302368, - 1.9583333730697632, - 2.0, - 2.0416667461395264, - 2.0833332538604736, - 2.125, - 2.1666667461395264, - 2.2083332538604736, - 2.25, - 2.2916667461395264, - 2.3333332538604736, - 2.375, - 2.4166667461395264, - 2.4583332538604736, - 2.5, - 2.5416667461395264, - 2.5833332538604736, -}; - -const D3DXVECTOR3 accessor_249[] = { - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0062762f, -0.0043773f), - D3DXVECTOR3(-0.0020865f, 0.0070791f, -0.0040864f), - D3DXVECTOR3(-0.0020865f, 0.0085242f, -0.0035628f), - D3DXVECTOR3(-0.0020865f, 0.0107303f, -0.0027635f), - D3DXVECTOR3(-0.0020865f, 0.0138577f, -0.0016304f), - D3DXVECTOR3(-0.0020865f, 0.0181335f, -0.0000812f), - D3DXVECTOR3(-0.0020865f, 0.0238962f, 0.0020067f), - D3DXVECTOR3(-0.0020865f, 0.0316917f, 0.0048312f), - D3DXVECTOR3(-0.0020865f, 0.0425045f, 0.0087489f), - D3DXVECTOR3(-0.0020865f, 0.0584514f, 0.0145268f), - D3DXVECTOR3(-0.0020865f, 0.0853066f, 0.0242569f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1092089f, 0.0329172f), - D3DXVECTOR3(-0.0020865f, 0.1077885f, 0.0324025f), - D3DXVECTOR3(-0.0020865f, 0.1034038f, 0.0308139f), - D3DXVECTOR3(-0.0020865f, 0.0960671f, 0.0281556f), - D3DXVECTOR3(-0.0020865f, 0.0861663f, 0.0245684f), - D3DXVECTOR3(-0.0020865f, 0.0745076f, 0.0203442f), - D3DXVECTOR3(-0.0020865f, 0.0621546f, 0.0158685f), - D3DXVECTOR3(-0.0020865f, 0.0501323f, 0.0115126f), - D3DXVECTOR3(-0.0020865f, 0.0391940f, 0.0075494f), - D3DXVECTOR3(-0.0020865f, 0.0297636f, 0.0041326f), - D3DXVECTOR3(-0.0020865f, 0.0220025f, 0.0013206f), - D3DXVECTOR3(-0.0020865f, 0.0159076f, -0.0008877f), - D3DXVECTOR3(-0.0020865f, 0.0113903f, -0.0025244f), - D3DXVECTOR3(-0.0020865f, 0.0083256f, -0.0036348f), - D3DXVECTOR3(-0.0020865f, 0.0065801f, -0.0042672f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_250[] = { - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0021890f, 0.0000000f, 0.0000000f, 0.9999976f), - D3DXVECTOR4(-0.0087159f, 0.0000000f, 0.0000000f, 0.9999620f), - D3DXVECTOR4(-0.0190081f, 0.0000000f, 0.0000000f, 0.9998193f), - D3DXVECTOR4(-0.0317616f, 0.0000000f, 0.0000000f, 0.9994955f), - D3DXVECTOR4(-0.0452434f, 0.0000000f, 0.0000000f, 0.9989760f), - D3DXVECTOR4(-0.0578583f, 0.0000000f, 0.0000000f, 0.9983248f), - D3DXVECTOR4(-0.0685415f, 0.0000000f, 0.0000000f, 0.9976483f), - D3DXVECTOR4(-0.0767916f, 0.0000000f, 0.0000000f, 0.9970472f), - D3DXVECTOR4(-0.0825017f, 0.0000000f, 0.0000000f, 0.9965910f), - D3DXVECTOR4(-0.0857791f, 0.0000000f, 0.0000000f, 0.9963142f), - D3DXVECTOR4(-0.0868221f, 0.0000000f, -0.0000000f, 0.9962238f), - D3DXVECTOR4(-0.0850719f, 0.0000000f, -0.0000000f, 0.9963748f), - D3DXVECTOR4(-0.0794759f, 0.0000000f, -0.0000000f, 0.9968368f), - D3DXVECTOR4(-0.0694075f, -0.0000000f, -0.0000000f, 0.9975884f), - D3DXVECTOR4(-0.0540456f, -0.0000000f, -0.0000000f, 0.9985385f), - D3DXVECTOR4(-0.0322847f, -0.0000000f, -0.0000000f, 0.9994787f), - D3DXVECTOR4(-0.0025831f, -0.0000000f, -0.0000000f, 0.9999967f), - D3DXVECTOR4( 0.0373075f, -0.0000000f, -0.0000000f, 0.9993039f), - D3DXVECTOR4( 0.0908692f, -0.0000000f, -0.0000000f, 0.9958628f), - D3DXVECTOR4( 0.1639615f, -0.0000000f, -0.0000000f, 0.9864668f), - D3DXVECTOR4( 0.2675723f, -0.0000000f, -0.0000000f, 0.9635378f), - D3DXVECTOR4( 0.4205562f, -0.0000000f, -0.0000000f, 0.9072665f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), - D3DXVECTOR4( 0.5225267f, -0.0000000f, -0.0000000f, 0.8526229f), - D3DXVECTOR4( 0.5021838f, -0.0000000f, -0.0000000f, 0.8647609f), - D3DXVECTOR4( 0.4675197f, -0.0000000f, -0.0000000f, 0.8839827f), - D3DXVECTOR4( 0.4195871f, -0.0000000f, -0.0000000f, 0.9077151f), - D3DXVECTOR4( 0.3616153f, -0.0000000f, -0.0000000f, 0.9323274f), - D3DXVECTOR4( 0.2986318f, -0.0000000f, -0.0000000f, 0.9543684f), - D3DXVECTOR4( 0.2360551f, -0.0000000f, -0.0000000f, 0.9717396f), - D3DXVECTOR4( 0.1782483f, -0.0000000f, -0.0000000f, 0.9839855f), - D3DXVECTOR4( 0.1279036f, -0.0000000f, -0.0000000f, 0.9917866f), - D3DXVECTOR4( 0.0862154f, -0.0000000f, -0.0000000f, 0.9962766f), - D3DXVECTOR4( 0.0533678f, -0.0000000f, -0.0000000f, 0.9985749f), - D3DXVECTOR4( 0.0289823f, -0.0000000f, -0.0000000f, 0.9995800f), - D3DXVECTOR4( 0.0124282f, -0.0000000f, -0.0000000f, 0.9999228f), - D3DXVECTOR4( 0.0029981f, -0.0000000f, -0.0000000f, 0.9999955f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), -}; - -const float accessor_251[] = { - 0.0, - 2.5833332538604736, -}; - -const D3DXVECTOR3 accessor_252[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_253[] = { - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), -}; - -const D3DXVECTOR4 accessor_254[] = { - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), -}; - -const D3DXVECTOR3 accessor_255[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_256[] = { - D3DXVECTOR3( 0.0000000f, 0.2498153f, 0.0000001f), - D3DXVECTOR3( 0.0000000f, 0.2498153f, 0.0000001f), -}; - -const D3DXVECTOR4 accessor_257[] = { - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), -}; - -const D3DXVECTOR3 accessor_258[] = { - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_259[] = { - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_260[] = { - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0009257f, 0.0000000f, 0.0000000f, 0.9999996f), - D3DXVECTOR4(-0.0036345f, 0.0000000f, 0.0000000f, 0.9999934f), - D3DXVECTOR4(-0.0078942f, 0.0000000f, 0.0000000f, 0.9999688f), - D3DXVECTOR4(-0.0132994f, 0.0000000f, 0.0000000f, 0.9999115f), - D3DXVECTOR4(-0.0193194f, 0.0000000f, 0.0000000f, 0.9998134f), - D3DXVECTOR4(-0.0253953f, 0.0000000f, 0.0000000f, 0.9996775f), - D3DXVECTOR4(-0.0310393f, 0.0000000f, 0.0000000f, 0.9995182f), - D3DXVECTOR4(-0.0358950f, 0.0000000f, 0.0000000f, 0.9993556f), - D3DXVECTOR4(-0.0397460f, 0.0000000f, 0.0000000f, 0.9992098f), - D3DXVECTOR4(-0.0424918f, 0.0000000f, 0.0000000f, 0.9990968f), - D3DXVECTOR4(-0.0441135f, -0.0000000f, -0.0000000f, 0.9990265f), - D3DXVECTOR4(-0.0446421f, 0.0000000f, -0.0000000f, 0.9990031f), - D3DXVECTOR4(-0.0440583f, 0.0000000f, 0.0000000f, 0.9990289f), - D3DXVECTOR4(-0.0421923f, -0.0000000f, 0.0000000f, 0.9991095f), - D3DXVECTOR4(-0.0388366f, 0.0000000f, 0.0000000f, 0.9992456f), - D3DXVECTOR4(-0.0337202f, 0.0000000f, 0.0000000f, 0.9994313f), - D3DXVECTOR4(-0.0264780f, -0.0000000f, 0.0000000f, 0.9996494f), - D3DXVECTOR4(-0.0165986f, 0.0000000f, 0.0000000f, 0.9998623f), - D3DXVECTOR4(-0.0033273f, -0.0000000f, -0.0000000f, 0.9999945f), - D3DXVECTOR4( 0.0145278f, -0.0000000f, 0.0000000f, 0.9998945f), - D3DXVECTOR4( 0.0390374f, 0.0000000f, 0.0000000f, 0.9992377f), - D3DXVECTOR4( 0.0742943f, 0.0000000f, 0.0000000f, 0.9972364f), - D3DXVECTOR4( 0.1282902f, 0.0000000f, 0.0000000f, 0.9917367f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), - D3DXVECTOR4( 0.1752275f, -0.0000000f, -0.0000000f, 0.9845279f), - D3DXVECTOR4( 0.1891056f, 0.0000000f, -0.0000000f, 0.9819568f), - D3DXVECTOR4( 0.2033245f, 0.0000000f, 0.0000000f, 0.9791114f), - D3DXVECTOR4( 0.2105833f, -0.0000000f, 0.0000000f, 0.9775760f), - D3DXVECTOR4( 0.2022800f, 0.0000000f, 0.0000000f, 0.9793277f), - D3DXVECTOR4( 0.1771124f, -0.0000000f, 0.0000000f, 0.9841906f), - D3DXVECTOR4( 0.1446740f, 0.0000000f, 0.0000000f, 0.9894794f), - D3DXVECTOR4( 0.1100498f, -0.0000000f, 0.0000000f, 0.9939261f), - D3DXVECTOR4( 0.0769736f, 0.0000000f, 0.0000000f, 0.9970331f), - D3DXVECTOR4( 0.0477586f, 0.0000000f, 0.0000000f, 0.9988589f), - D3DXVECTOR4( 0.0236126f, -0.0000000f, 0.0000000f, 0.9997212f), - D3DXVECTOR4( 0.0050150f, 0.0000000f, 0.0000000f, 0.9999874f), - D3DXVECTOR4(-0.0079844f, 0.0000000f, -0.0000000f, 0.9999681f), - D3DXVECTOR4(-0.0155661f, 0.0000000f, 0.0000000f, 0.9998789f), - D3DXVECTOR4(-0.0180204f, 0.0000000f, -0.0000000f, 0.9998376f), - D3DXVECTOR4(-0.0171851f, -0.0000000f, -0.0000000f, 0.9998524f), - D3DXVECTOR4(-0.0074814f, -0.0000000f, -0.0000000f, 0.9999720f), - D3DXVECTOR4(-0.0016779f, -0.0000000f, -0.0000000f, 0.9999986f), - D3DXVECTOR4(-0.0003088f, -0.0000000f, -0.0000000f, 0.9999999f), - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_261[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_262[] = { - D3DXVECTOR3( 0.0000000f, 0.1116755f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1116755f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_263[] = { - D3DXVECTOR4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), - D3DXVECTOR4(-0.0007234f, -0.0000008f, -0.0011286f, 0.9999991f), - D3DXVECTOR4(-0.0028400f, -0.0000032f, -0.0011285f, 0.9999954f), - D3DXVECTOR4(-0.0061685f, -0.0000069f, -0.0011282f, 0.9999803f), - D3DXVECTOR4(-0.0103921f, -0.0000117f, -0.0011280f, 0.9999454f), - D3DXVECTOR4(-0.0150963f, -0.0000170f, -0.0011279f, 0.9998854f), - D3DXVECTOR4(-0.0198443f, -0.0000224f, -0.0011278f, 0.9998025f), - D3DXVECTOR4(-0.0242551f, -0.0000274f, -0.0011278f, 0.9997052f), - D3DXVECTOR4(-0.0280501f, -0.0000316f, -0.0011278f, 0.9996060f), - D3DXVECTOR4(-0.0310600f, -0.0000350f, -0.0011279f, 0.9995170f), - D3DXVECTOR4(-0.0332062f, -0.0000375f, -0.0011279f, 0.9994479f), - D3DXVECTOR4(-0.0344739f, -0.0000389f, -0.0011280f, 0.9994050f), - D3DXVECTOR4(-0.0348870f, -0.0000394f, -0.0011280f, 0.9993907f), - D3DXVECTOR4(-0.0343703f, -0.0000388f, -0.0011278f, 0.9994086f), - D3DXVECTOR4(-0.0327188f, -0.0000370f, -0.0011272f, 0.9994640f), - D3DXVECTOR4(-0.0297488f, -0.0000338f, -0.0011260f, 0.9995568f), - D3DXVECTOR4(-0.0252208f, -0.0000288f, -0.0011244f, 0.9996813f), - D3DXVECTOR4(-0.0188120f, -0.0000218f, -0.0011222f, 0.9998224f), - D3DXVECTOR4(-0.0100701f, -0.0000121f, -0.0011194f, 0.9999486f), - D3DXVECTOR4( 0.0016721f, 0.0000009f, -0.0011161f, 0.9999980f), - D3DXVECTOR4( 0.0174689f, 0.0000185f, -0.0011124f, 0.9998468f), - D3DXVECTOR4( 0.0391528f, 0.0000429f, -0.0011086f, 0.9992326f), - D3DXVECTOR4( 0.0703498f, 0.0000783f, -0.0011059f, 0.9975218f), - D3DXVECTOR4( 0.1181570f, 0.0001329f, -0.0011082f, 0.9929943f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), - D3DXVECTOR4( 0.1722621f, 0.0001928f, -0.0011071f, 0.9850505f), - D3DXVECTOR4( 0.2138882f, 0.0002375f, -0.0010919f, 0.9768575f), - D3DXVECTOR4( 0.2613858f, 0.0002906f, -0.0010801f, 0.9652337f), - D3DXVECTOR4( 0.2975744f, 0.0003326f, -0.0010731f, 0.9546980f), - D3DXVECTOR4( 0.3032275f, 0.0003390f, -0.0010639f, 0.9529175f), - D3DXVECTOR4( 0.2764442f, 0.0003066f, -0.0010510f, 0.9610294f), - D3DXVECTOR4( 0.2377807f, 0.0002601f, -0.0010413f, 0.9713183f), - D3DXVECTOR4( 0.1941246f, 0.0002082f, -0.0010385f, 0.9809763f), - D3DXVECTOR4( 0.1497597f, 0.0001566f, -0.0010432f, 0.9887218f), - D3DXVECTOR4( 0.1075356f, 0.0001089f, -0.0010544f, 0.9942007f), - D3DXVECTOR4( 0.0694092f, 0.0000673f, -0.0010698f, 0.9975877f), - D3DXVECTOR4( 0.0367376f, 0.0000330f, -0.0010869f, 0.9993244f), - D3DXVECTOR4( 0.0104604f, 0.0000063f, -0.0011032f, 0.9999447f), - D3DXVECTOR4(-0.0087762f, -0.0000126f, -0.0011165f, 0.9999608f), - D3DXVECTOR4(-0.0205302f, -0.0000239f, -0.0011253f, 0.9997886f), - D3DXVECTOR4(-0.0244987f, -0.0000276f, -0.0011283f, 0.9996992f), - D3DXVECTOR4(-0.0233632f, -0.0000264f, -0.0011283f, 0.9997264f), - D3DXVECTOR4(-0.0101714f, -0.0000115f, -0.0011283f, 0.9999477f), - D3DXVECTOR4(-0.0022814f, -0.0000026f, -0.0011285f, 0.9999968f), - D3DXVECTOR4(-0.0004200f, -0.0000005f, -0.0011286f, 0.9999993f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), -}; - -const D3DXVECTOR3 accessor_264[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_265[] = { - D3DXVECTOR3(-0.0005524f, 0.0688295f, -0.0213631f), - D3DXVECTOR3(-0.0005524f, 0.0688295f, -0.0213631f), -}; - -const D3DXVECTOR4 accessor_266[] = { - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), -}; - -const D3DXVECTOR3 accessor_267[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_268[] = { - D3DXVECTOR3( 0.0000000f, 0.3082207f, -0.0000010f), - D3DXVECTOR3( 0.0000000f, 0.3082207f, -0.0000010f), -}; - -const D3DXVECTOR4 accessor_269[] = { - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_270[] = { - D3DXVECTOR3( 1.0000001f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000001f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_271[] = { - D3DXVECTOR3( 0.0000000f, 0.3056044f, 0.0000002f), - D3DXVECTOR3( 0.0000000f, 0.3056044f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_272[] = { - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), -}; - -const D3DXVECTOR3 accessor_273[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_274[] = { - D3DXVECTOR3( 0.0013047f, 0.3463302f, 0.0044682f), - D3DXVECTOR3( 0.0013047f, 0.3463302f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_275[] = { - D3DXVECTOR4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), - D3DXVECTOR4( 0.0052462f, 0.0000059f, 0.0011287f, 0.9999856f), - D3DXVECTOR4( 0.0174043f, 0.0000196f, 0.0011285f, 0.9998479f), - D3DXVECTOR4( 0.0328632f, 0.0000370f, 0.0011281f, 0.9994593f), - D3DXVECTOR4( 0.0487975f, 0.0000550f, 0.0011273f, 0.9988081f), - D3DXVECTOR4( 0.0616544f, 0.0041983f, 0.0008714f, 0.9980884f), - D3DXVECTOR4( 0.0631043f, 0.0148462f, 0.0001917f, 0.9978965f), - D3DXVECTOR4( 0.0437547f, 0.0294095f, -0.0001603f, 0.9986094f), - D3DXVECTOR4( 0.0153181f, 0.0452828f, 0.0004309f, 0.9988567f), - D3DXVECTOR4(-0.0144889f, 0.0598154f, 0.0019910f, 0.9981024f), - D3DXVECTOR4(-0.0405501f, 0.0703799f, 0.0039846f, 0.9966878f), - D3DXVECTOR4(-0.0601135f, 0.0744065f, 0.0056148f, 0.9953987f), - D3DXVECTOR4(-0.0720231f, 0.0704410f, 0.0062225f, 0.9948930f), - D3DXVECTOR4(-0.0760304f, 0.0509407f, 0.0050169f, 0.9957908f), - D3DXVECTOR4(-0.0759624f, -0.0523691f, -0.0028679f, 0.9957303f), - D3DXVECTOR4(-0.0760197f, -0.0717574f, -0.0043619f, 0.9945114f), - D3DXVECTOR4(-0.0763799f, -0.0635159f, -0.0037504f, 0.9950466f), - D3DXVECTOR4(-0.0770289f, 0.0526805f, 0.0052030f, 0.9956225f), - D3DXVECTOR4(-0.0776180f, 0.0727178f, 0.0067996f, 0.9943045f), - D3DXVECTOR4(-0.0785292f, 0.0715604f, 0.0067751f, 0.9943171f), - D3DXVECTOR4(-0.0797666f, 0.0669097f, 0.0064910f, 0.9945443f), - D3DXVECTOR4(-0.0814541f, 0.0538083f, 0.0055315f, 0.9952081f), - D3DXVECTOR4(-0.0837723f, 0.0091012f, 0.0018977f, 0.9964416f), - D3DXVECTOR4(-0.0870097f, -0.0002338f, 0.0011125f, 0.9962068f), - D3DXVECTOR4(-0.0919945f, -0.0002393f, 0.0011114f, 0.9957589f), - D3DXVECTOR4(-0.0957634f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), - D3DXVECTOR4(-0.0940729f, -0.0002394f, 0.0011110f, 0.9955647f), - D3DXVECTOR4(-0.0896200f, -0.0002281f, 0.0011126f, 0.9959754f), - D3DXVECTOR4(-0.0833320f, -0.0002114f, 0.0011147f, 0.9965212f), - D3DXVECTOR4(-0.0761369f, -0.0001909f, 0.0011170f, 0.9970967f), - D3DXVECTOR4(-0.0689652f, -0.0001680f, 0.0011194f, 0.9976184f), - D3DXVECTOR4(-0.0625721f, -0.0001459f, 0.0011214f, 0.9980398f), - D3DXVECTOR4(-0.0570058f, -0.0001265f, 0.0011229f, 0.9983733f), - D3DXVECTOR4(-0.0521370f, -0.0001094f, 0.0011242f, 0.9986393f), - D3DXVECTOR4(-0.0478361f, -0.0000942f, 0.0011252f, 0.9988546f), - D3DXVECTOR4(-0.0439732f, -0.0000807f, 0.0011260f, 0.9990321f), - D3DXVECTOR4(-0.0404184f, -0.0000687f, 0.0011266f, 0.9991822f), - D3DXVECTOR4(-0.0370413f, -0.0000580f, 0.0011272f, 0.9993132f), - D3DXVECTOR4(-0.0337119f, -0.0000486f, 0.0011276f, 0.9994310f), - D3DXVECTOR4(-0.0302997f, -0.0000402f, 0.0011279f, 0.9995403f), - D3DXVECTOR4(-0.0266745f, -0.0000328f, 0.0011282f, 0.9996436f), - D3DXVECTOR4(-0.0227057f, -0.0000263f, 0.0011284f, 0.9997416f), - D3DXVECTOR4(-0.0182630f, -0.0000206f, 0.0011285f, 0.9998326f), - D3DXVECTOR4(-0.0133408f, -0.0000151f, 0.0011286f, 0.9999104f), - D3DXVECTOR4(-0.0084337f, -0.0000095f, 0.0011286f, 0.9999638f), - D3DXVECTOR4(-0.0041614f, -0.0000047f, 0.0011287f, 0.9999907f), - D3DXVECTOR4(-0.0011437f, -0.0000013f, 0.0011287f, 0.9999987f), - D3DXVECTOR4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), -}; - -const D3DXVECTOR3 accessor_276[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999998f), - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0624999f, 1.0624999f, 0.9687501f), - D3DXVECTOR3( 1.1999999f, 1.1999999f, 0.9000000f), - D3DXVECTOR3( 1.3375000f, 1.3375002f, 0.8312501f), - D3DXVECTOR3( 1.4000000f, 1.4000000f, 0.8000001f), - D3DXVECTOR3( 1.2962962f, 1.2962964f, 0.8518518f), - D3DXVECTOR3( 1.1037036f, 1.1037037f, 0.9481483f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000001f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_277[] = { - D3DXVECTOR3( 0.2488541f, 0.2033313f, -0.0450287f), - D3DXVECTOR3( 0.2488541f, 0.2033313f, -0.0450287f), -}; - -const D3DXVECTOR4 accessor_278[] = { - D3DXVECTOR4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), - D3DXVECTOR4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), -}; - -const D3DXVECTOR3 accessor_279[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_280[] = { - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_281[] = { - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_282[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_283[] = { - D3DXVECTOR3(-0.0000000f, 0.1024613f, -0.0000003f), - D3DXVECTOR3(-0.0000000f, 0.1024613f, -0.0000003f), -}; - -const D3DXVECTOR4 accessor_284[] = { - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_285[] = { - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_286[] = { - D3DXVECTOR3(-0.0000000f, 0.1026039f, 0.0000002f), - D3DXVECTOR3(-0.0000000f, 0.1026039f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_287[] = { - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_288[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_289[] = { - D3DXVECTOR3(-0.0000000f, 0.1033674f, -0.0000001f), - D3DXVECTOR3(-0.0000000f, 0.1033674f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_290[] = { - D3DXVECTOR4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_291[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_292[] = { - D3DXVECTOR3(-0.0000000f, 0.1012845f, -0.0000001f), - D3DXVECTOR3(-0.0000000f, 0.1012845f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_293[] = { - D3DXVECTOR4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), - D3DXVECTOR4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), -}; - -const D3DXVECTOR3 accessor_294[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_295[] = { - D3DXVECTOR3(-0.0000000f, 0.1024317f, -0.0000007f), - D3DXVECTOR3(-0.0000000f, 0.1024317f, -0.0000007f), -}; - -const D3DXVECTOR4 accessor_296[] = { - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), -}; - -const D3DXVECTOR3 accessor_297[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_298[] = { - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), -}; - -const D3DXVECTOR4 accessor_299[] = { - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), -}; - -const D3DXVECTOR3 accessor_300[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_301[] = { - D3DXVECTOR3(-0.0000000f, 0.3082130f, -0.0000004f), - D3DXVECTOR3(-0.0000000f, 0.3082130f, -0.0000004f), -}; - -const D3DXVECTOR4 accessor_302[] = { - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_303[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_304[] = { - D3DXVECTOR3(-0.0000000f, 0.3055982f, -0.0000002f), - D3DXVECTOR3(-0.0000000f, 0.3055982f, -0.0000002f), -}; - -const D3DXVECTOR4 accessor_305[] = { - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), -}; - -const D3DXVECTOR3 accessor_306[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_307[] = { - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_308[] = { - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_309[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_310[] = { - D3DXVECTOR3( 0.0000000f, 0.1024598f, 0.0000012f), - D3DXVECTOR3( 0.0000000f, 0.1024598f, 0.0000012f), -}; - -const D3DXVECTOR4 accessor_311[] = { - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_312[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_313[] = { - D3DXVECTOR3(-0.0000000f, 0.1026044f, 0.0000000f), - D3DXVECTOR3(-0.0000000f, 0.1026044f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_314[] = { - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_315[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_316[] = { - D3DXVECTOR3( 0.0000000f, 0.1033676f, 0.0000003f), - D3DXVECTOR3( 0.0000000f, 0.1033676f, 0.0000003f), -}; - -const D3DXVECTOR4 accessor_317[] = { - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_318[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_319[] = { - D3DXVECTOR3(-0.0000000f, 0.1012811f, -0.0000002f), - D3DXVECTOR3(-0.0000000f, 0.1012811f, -0.0000002f), -}; - -const D3DXVECTOR4 accessor_320[] = { - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), -}; - -const D3DXVECTOR3 accessor_321[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_322[] = { - D3DXVECTOR3( 0.0000000f, 0.1024289f, -0.0000001f), - D3DXVECTOR3( 0.0000000f, 0.1024289f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_323[] = { - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), -}; - -const D3DXVECTOR3 accessor_324[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_325[] = { - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), -}; - -const D3DXVECTOR4 accessor_326[] = { - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), -}; - -const D3DXVECTOR3 accessor_327[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_328[] = { - D3DXVECTOR3(-0.0000000f, 0.2498146f, 0.0000000f), - D3DXVECTOR3(-0.0000000f, 0.2498146f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_329[] = { - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), -}; - -const D3DXVECTOR3 accessor_330[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_331[] = { - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_332[] = { - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_333[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_334[] = { - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_335[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_336[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_337[] = { - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733793f, -0.0047631f), - D3DXVECTOR3(-0.0020865f, 0.0733698f, -0.0056423f), - D3DXVECTOR3(-0.0020865f, 0.0733313f, -0.0070283f), - D3DXVECTOR3(-0.0020865f, 0.0732441f, -0.0087447f), - D3DXVECTOR3(-0.0020865f, 0.0731042f, -0.0105567f), - D3DXVECTOR3(-0.0020865f, 0.0729290f, -0.0122492f), - D3DXVECTOR3(-0.0020865f, 0.0727471f, -0.0136797f), - D3DXVECTOR3(-0.0020865f, 0.0725856f, -0.0147822f), - D3DXVECTOR3(-0.0020865f, 0.0724631f, -0.0155441f), - D3DXVECTOR3(-0.0020865f, 0.0723888f, -0.0159809f), - D3DXVECTOR3(-0.0020865f, 0.0723645f, -0.0161198f), - D3DXVECTOR3(-0.0020865f, 0.0726561f, -0.0157957f), - D3DXVECTOR3(-0.0020865f, 0.0735829f, -0.0147587f), - D3DXVECTOR3(-0.0020865f, 0.0752300f, -0.0128901f), - D3DXVECTOR3(-0.0020865f, 0.0776915f, -0.0100333f), - D3DXVECTOR3(-0.0020865f, 0.0810721f, -0.0059772f), - D3DXVECTOR3(-0.0020865f, 0.0854873f, -0.0004292f), - D3DXVECTOR3(-0.0020865f, 0.0910634f, 0.0070289f), - D3DXVECTOR3(-0.0020865f, 0.0979341f, 0.0170215f), - D3DXVECTOR3(-0.0020865f, 0.1062378f, 0.0305371f), - D3DXVECTOR3(-0.0020865f, 0.1161616f, 0.0492570f), - D3DXVECTOR3(-0.0020865f, 0.1288357f, 0.0756562f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1383629f, 0.0924180f), - D3DXVECTOR3(-0.0020865f, 0.1367864f, 0.0893140f), - D3DXVECTOR3(-0.0020865f, 0.1339777f, 0.0838283f), - D3DXVECTOR3(-0.0020865f, 0.1298050f, 0.0758746f), - D3DXVECTOR3(-0.0020865f, 0.1242470f, 0.0657607f), - D3DXVECTOR3(-0.0020865f, 0.1174958f, 0.0542613f), - D3DXVECTOR3(-0.0020865f, 0.1099808f, 0.0424128f), - D3DXVECTOR3(-0.0020865f, 0.1022687f, 0.0311766f), - D3DXVECTOR3(-0.0020865f, 0.0949146f, 0.0212209f), - D3DXVECTOR3(-0.0020865f, 0.0883559f, 0.0128914f), - D3DXVECTOR3(-0.0020865f, 0.0828787f, 0.0062912f), - D3DXVECTOR3(-0.0020865f, 0.0786319f, 0.0013782f), - D3DXVECTOR3(-0.0020865f, 0.0756596f, -0.0019607f), - D3DXVECTOR3(-0.0020865f, 0.0739336f, -0.0038633f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_338[] = { - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_339[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_340[] = { - D3DXVECTOR3(-0.0000000f, 0.0919256f, -0.0000004f), - D3DXVECTOR3(-0.0000000f, 0.0919256f, -0.0000004f), -}; - -const D3DXVECTOR4 accessor_341[] = { - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_342[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_343[] = { - D3DXVECTOR3(-0.0000001f, 0.1196961f, 0.0000000f), - D3DXVECTOR3(-0.0000001f, 0.1196961f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_344[] = { - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_345[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_346[] = { - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_347[] = { - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_348[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_349[] = { - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_350[] = { - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_351[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_352[] = { - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_353[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_354[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_355[] = { - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733793f, -0.0047631f), - D3DXVECTOR3(-0.0020865f, 0.0733698f, -0.0056423f), - D3DXVECTOR3(-0.0020865f, 0.0733313f, -0.0070283f), - D3DXVECTOR3(-0.0020865f, 0.0732441f, -0.0087447f), - D3DXVECTOR3(-0.0020865f, 0.0731043f, -0.0105567f), - D3DXVECTOR3(-0.0020865f, 0.0729290f, -0.0122492f), - D3DXVECTOR3(-0.0020865f, 0.0727471f, -0.0136797f), - D3DXVECTOR3(-0.0020865f, 0.0725856f, -0.0147822f), - D3DXVECTOR3(-0.0020865f, 0.0724631f, -0.0155441f), - D3DXVECTOR3(-0.0020865f, 0.0723888f, -0.0159809f), - D3DXVECTOR3(-0.0020865f, 0.0723645f, -0.0161198f), - D3DXVECTOR3(-0.0020865f, 0.0726561f, -0.0157957f), - D3DXVECTOR3(-0.0020865f, 0.0735829f, -0.0147587f), - D3DXVECTOR3(-0.0020865f, 0.0752300f, -0.0128901f), - D3DXVECTOR3(-0.0020865f, 0.0776915f, -0.0100333f), - D3DXVECTOR3(-0.0020865f, 0.0810721f, -0.0059772f), - D3DXVECTOR3(-0.0020865f, 0.0854873f, -0.0004292f), - D3DXVECTOR3(-0.0020865f, 0.0910634f, 0.0070289f), - D3DXVECTOR3(-0.0020865f, 0.0979341f, 0.0170215f), - D3DXVECTOR3(-0.0020865f, 0.1062378f, 0.0305371f), - D3DXVECTOR3(-0.0020865f, 0.1161616f, 0.0492570f), - D3DXVECTOR3(-0.0020865f, 0.1288357f, 0.0756562f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1388586f, 0.0933949f), - D3DXVECTOR3(-0.0020865f, 0.1383629f, 0.0924180f), - D3DXVECTOR3(-0.0020865f, 0.1367864f, 0.0893140f), - D3DXVECTOR3(-0.0020865f, 0.1339777f, 0.0838283f), - D3DXVECTOR3(-0.0020865f, 0.1298050f, 0.0758746f), - D3DXVECTOR3(-0.0020865f, 0.1242470f, 0.0657607f), - D3DXVECTOR3(-0.0020865f, 0.1174958f, 0.0542613f), - D3DXVECTOR3(-0.0020865f, 0.1099808f, 0.0424128f), - D3DXVECTOR3(-0.0020865f, 0.1022687f, 0.0311766f), - D3DXVECTOR3(-0.0020865f, 0.0949146f, 0.0212209f), - D3DXVECTOR3(-0.0020865f, 0.0883559f, 0.0128914f), - D3DXVECTOR3(-0.0020865f, 0.0828787f, 0.0062912f), - D3DXVECTOR3(-0.0020865f, 0.0786319f, 0.0013782f), - D3DXVECTOR3(-0.0020865f, 0.0756596f, -0.0019607f), - D3DXVECTOR3(-0.0020865f, 0.0739336f, -0.0038633f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_356[] = { - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_357[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_358[] = { - D3DXVECTOR3(-0.0000000f, 0.0919253f, 0.0000002f), - D3DXVECTOR3(-0.0000000f, 0.0919253f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_359[] = { - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_360[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_361[] = { - D3DXVECTOR3( 0.0000001f, 0.1196966f, 0.0000001f), - D3DXVECTOR3( 0.0000001f, 0.1196966f, 0.0000001f), -}; - -const D3DXVECTOR4 accessor_362[] = { - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_363[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_364[] = { - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_365[] = { - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_366[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const float accessor_367[] = { - 0.0, - 1.4583333730697632, -}; - -const D3DXVECTOR3 accessor_368[] = { - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), -}; - -const float accessor_369[] = { - 0.0, - 0.0416666679084301, - 0.0833333358168602, - 0.125, - 0.1666666716337204, - 0.2083333283662796, - 0.25, - 0.2916666567325592, - 0.3333333432674408, - 0.375, - 0.4166666567325592, - 0.4583333432674408, - 0.5, - 0.5416666865348816, - 0.5833333134651184, - 0.625, - 0.6666666865348816, - 0.7083333134651184, - 0.75, - 0.7916666865348816, - 0.8333333134651184, - 0.875, - 0.9166666865348816, - 0.9583333134651184, - 1.0, - 1.0416666269302368, - 1.0833333730697632, - 1.125, - 1.1666666269302368, - 1.2083333730697632, - 1.25, - 1.2916666269302368, - 1.3333333730697632, - 1.375, - 1.4166666269302368, - 1.4583333730697632, -}; - -const D3DXVECTOR4 accessor_370[] = { - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0004076f, 0.0000000f, 0.0000000f, 0.9999999f), - D3DXVECTOR4(-0.0016908f, 0.0000000f, 0.0000000f, 0.9999986f), - D3DXVECTOR4(-0.0039414f, 0.0000000f, 0.0000000f, 0.9999923f), - D3DXVECTOR4(-0.0072462f, 0.0000000f, 0.0000000f, 0.9999738f), - D3DXVECTOR4(-0.0116751f, 0.0000000f, 0.0000000f, 0.9999319f), - D3DXVECTOR4(-0.0172633f, 0.0000000f, 0.0000000f, 0.9998510f), - D3DXVECTOR4(-0.0239899f, 0.0000000f, 0.0000000f, 0.9997122f), - D3DXVECTOR4(-0.0317582f, 0.0000000f, 0.0000000f, 0.9994956f), - D3DXVECTOR4(-0.0403872f, 0.0000000f, 0.0000000f, 0.9991841f), - D3DXVECTOR4(-0.0496231f, 0.0000000f, 0.0000000f, 0.9987680f), - D3DXVECTOR4(-0.0591723f, 0.0000000f, 0.0000000f, 0.9982477f), - D3DXVECTOR4(-0.0687446f, 0.0000000f, 0.0000000f, 0.9976343f), - D3DXVECTOR4(-0.0780917f, 0.0000000f, 0.0000000f, 0.9969462f), - D3DXVECTOR4(-0.0870264f, 0.0000000f, 0.0000000f, 0.9962060f), - D3DXVECTOR4(-0.0954261f, 0.0000000f, 0.0000000f, 0.9954365f), - D3DXVECTOR4(-0.1032224f, 0.0000000f, 0.0000000f, 0.9946584f), - D3DXVECTOR4(-0.1103887f, 0.0000000f, 0.0000000f, 0.9938886f), - D3DXVECTOR4(-0.1169263f, 0.0000000f, 0.0000000f, 0.9931406f), - D3DXVECTOR4(-0.1228541f, 0.0000000f, 0.0000000f, 0.9924248f), - D3DXVECTOR4(-0.1282012f, 0.0000000f, 0.0000000f, 0.9917482f), - D3DXVECTOR4(-0.1330013f, 0.0000000f, 0.0000000f, 0.9911159f), - D3DXVECTOR4(-0.1372900f, 0.0000000f, 0.0000000f, 0.9905309f), - D3DXVECTOR4(-0.1411023f, 0.0000000f, 0.0000000f, 0.9899950f), - D3DXVECTOR4(-0.1444717f, 0.0000000f, 0.0000000f, 0.9895090f), - D3DXVECTOR4(-0.1474298f, 0.0000000f, 0.0000000f, 0.9890725f), - D3DXVECTOR4(-0.1500057f, 0.0000000f, 0.0000000f, 0.9886851f), - D3DXVECTOR4(-0.1522263f, 0.0000000f, 0.0000000f, 0.9883457f), - D3DXVECTOR4(-0.1541162f, 0.0000000f, 0.0000000f, 0.9880528f), - D3DXVECTOR4(-0.1556976f, 0.0000000f, 0.0000000f, 0.9878048f), - D3DXVECTOR4(-0.1569911f, 0.0000000f, 0.0000000f, 0.9876000f), - D3DXVECTOR4(-0.1580151f, 0.0000000f, 0.0000000f, 0.9874367f), - D3DXVECTOR4(-0.1587865f, 0.0000000f, 0.0000000f, 0.9873130f), - D3DXVECTOR4(-0.1593208f, 0.0000000f, 0.0000000f, 0.9872268f), - D3DXVECTOR4(-0.1596321f, 0.0000000f, 0.0000000f, 0.9871766f), - D3DXVECTOR4(-0.1597332f, 0.0000000f, 0.0000000f, 0.9871602f), -}; - -const D3DXVECTOR3 accessor_371[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9996425f, 0.9999951f), - D3DXVECTOR3( 1.0000000f, 0.9985171f, 0.9999799f), - D3DXVECTOR3( 1.0000000f, 0.9965431f, 0.9999531f), - D3DXVECTOR3( 1.0000000f, 0.9936445f, 0.9999138f), - D3DXVECTOR3( 1.0000000f, 0.9897598f, 0.9998610f), - D3DXVECTOR3( 1.0000000f, 0.9848580f, 0.9997945f), - D3DXVECTOR3( 1.0000000f, 0.9789570f, 0.9997146f), - D3DXVECTOR3( 1.0000000f, 0.9721409f, 0.9996219f), - D3DXVECTOR3( 1.0000000f, 0.9645676f, 0.9995192f), - D3DXVECTOR3( 1.0000000f, 0.9564588f, 0.9994091f), - D3DXVECTOR3( 1.0000000f, 0.9480710f, 0.9992954f), - D3DXVECTOR3( 1.0000000f, 0.9396580f, 0.9991812f), - D3DXVECTOR3( 1.0000000f, 0.9314376f, 0.9990696f), - D3DXVECTOR3( 1.0000000f, 0.9235743f, 0.9989629f), - D3DXVECTOR3( 1.0000000f, 0.9161762f, 0.9988625f), - D3DXVECTOR3( 1.0000000f, 0.9093043f, 0.9987693f), - D3DXVECTOR3( 1.0000000f, 0.9029827f, 0.9986835f), - D3DXVECTOR3( 1.0000000f, 0.8972114f, 0.9986051f), - D3DXVECTOR3( 1.0000000f, 0.8919743f, 0.9985340f), - D3DXVECTOR3( 1.0000000f, 0.8872471f, 0.9984699f), - D3DXVECTOR3( 1.0000000f, 0.8830007f, 0.9984124f), - D3DXVECTOR3( 1.0000000f, 0.8792043f, 0.9983608f), - D3DXVECTOR3( 1.0000000f, 0.8758278f, 0.9983150f), - D3DXVECTOR3( 1.0000000f, 0.8728420f, 0.9982744f), - D3DXVECTOR3( 1.0000000f, 0.8702194f, 0.9982388f), - D3DXVECTOR3( 1.0000000f, 0.8679347f, 0.9982079f), - D3DXVECTOR3( 1.0000000f, 0.8659645f, 0.9981811f), - D3DXVECTOR3( 1.0000000f, 0.8642871f, 0.9981584f), - D3DXVECTOR3( 1.0000000f, 0.8628830f, 0.9981393f), - D3DXVECTOR3( 1.0000000f, 0.8617345f, 0.9981238f), - D3DXVECTOR3( 1.0000000f, 0.8608249f, 0.9981114f), - D3DXVECTOR3( 1.0000000f, 0.8601398f, 0.9981021f), - D3DXVECTOR3( 1.0000000f, 0.8596650f, 0.9980956f), - D3DXVECTOR3( 1.0000000f, 0.8593884f, 0.9980919f), - D3DXVECTOR3( 1.0000000f, 0.8592988f, 0.9980907f), -}; - -const D3DXVECTOR3 accessor_372[] = { - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), -}; - -const D3DXVECTOR4 accessor_373[] = { - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), -}; - -const D3DXVECTOR3 accessor_374[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_375[] = { - D3DXVECTOR3( 0.0000000f, 0.2498153f, 0.0000001f), - D3DXVECTOR3(-0.0000000f, 0.2498147f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_376[] = { - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), -}; - -const D3DXVECTOR3 accessor_377[] = { - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999998f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_378[] = { - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1346838f, -0.0001081f), - D3DXVECTOR3(-0.0000000f, 0.1346030f, -0.0004485f), - D3DXVECTOR3(-0.0000000f, 0.1344613f, -0.0010455f), - D3DXVECTOR3( 0.0000000f, 0.1342532f, -0.0019221f), - D3DXVECTOR3( 0.0000000f, 0.1339744f, -0.0030969f), - D3DXVECTOR3( 0.0000000f, 0.1336226f, -0.0045794f), - D3DXVECTOR3( 0.0000000f, 0.1331989f, -0.0063640f), - D3DXVECTOR3( 0.0000000f, 0.1327097f, -0.0084254f), - D3DXVECTOR3(-0.0000000f, 0.1321661f, -0.0107157f), - D3DXVECTOR3( 0.0000000f, 0.1315840f, -0.0131681f), - D3DXVECTOR3( 0.0000000f, 0.1309819f, -0.0157048f), - D3DXVECTOR3( 0.0000000f, 0.1303780f, -0.0182491f), - D3DXVECTOR3( 0.0000000f, 0.1297879f, -0.0207352f), - D3DXVECTOR3( 0.0000000f, 0.1292235f, -0.0231133f), - D3DXVECTOR3( 0.0000000f, 0.1286924f, -0.0253506f), - D3DXVECTOR3( 0.0000000f, 0.1281991f, -0.0274289f), - D3DXVECTOR3( 0.0000000f, 0.1277453f, -0.0293407f), - D3DXVECTOR3( 0.0000000f, 0.1273311f, -0.0310862f), - D3DXVECTOR3( 0.0000000f, 0.1269551f, -0.0326700f), - D3DXVECTOR3( 0.0000000f, 0.1266158f, -0.0340996f), - D3DXVECTOR3( 0.0000000f, 0.1263110f, -0.0353839f), - D3DXVECTOR3( 0.0000000f, 0.1260385f, -0.0365320f), - D3DXVECTOR3( 0.0000000f, 0.1257961f, -0.0375531f), - D3DXVECTOR3( 0.0000000f, 0.1255818f, -0.0384562f), - D3DXVECTOR3( 0.0000000f, 0.1253936f, -0.0392493f), - D3DXVECTOR3( 0.0000000f, 0.1252295f, -0.0399402f), - D3DXVECTOR3( 0.0000000f, 0.1250881f, -0.0405361f), - D3DXVECTOR3( 0.0000000f, 0.1249677f, -0.0410434f), - D3DXVECTOR3( 0.0000000f, 0.1248669f, -0.0414680f), - D3DXVECTOR3( 0.0000000f, 0.1247845f, -0.0418154f), - D3DXVECTOR3( 0.0000000f, 0.1247192f, -0.0420904f), - D3DXVECTOR3( 0.0000000f, 0.1246700f, -0.0422977f), - D3DXVECTOR3( 0.0000000f, 0.1246359f, -0.0424412f), - D3DXVECTOR3( 0.0000000f, 0.1246161f, -0.0425249f), - D3DXVECTOR3( 0.0000000f, 0.1246096f, -0.0425520f), -}; - -const D3DXVECTOR4 accessor_379[] = { - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0003120f, 0.0000000f, 0.0000000f, 0.9999999f), - D3DXVECTOR4(-0.0012946f, 0.0000000f, 0.0000000f, 0.9999992f), - D3DXVECTOR4(-0.0030179f, 0.0000000f, 0.0000000f, 0.9999955f), - D3DXVECTOR4(-0.0055485f, 0.0000000f, 0.0000000f, 0.9999846f), - D3DXVECTOR4(-0.0089399f, 0.0000000f, 0.0000000f, 0.9999601f), - D3DXVECTOR4(-0.0132191f, 0.0000000f, 0.0000000f, 0.9999126f), - D3DXVECTOR4(-0.0183703f, 0.0000000f, 0.0000000f, 0.9998313f), - D3DXVECTOR4(-0.0243197f, 0.0000000f, 0.0000000f, 0.9997042f), - D3DXVECTOR4(-0.0309290f, 0.0000000f, 0.0000000f, 0.9995216f), - D3DXVECTOR4(-0.0380041f, 0.0000000f, 0.0000000f, 0.9992776f), - D3DXVECTOR4(-0.0453207f, 0.0000000f, 0.0000000f, 0.9989725f), - D3DXVECTOR4(-0.0526567f, 0.0000000f, 0.0000000f, 0.9986127f), - D3DXVECTOR4(-0.0598221f, 0.0000000f, 0.0000000f, 0.9982091f), - D3DXVECTOR4(-0.0666734f, 0.0000000f, 0.0000000f, 0.9977748f), - D3DXVECTOR4(-0.0731163f, 0.0000000f, 0.0000000f, 0.9973235f), - D3DXVECTOR4(-0.0790985f, 0.0000000f, 0.0000000f, 0.9968668f), - D3DXVECTOR4(-0.0845989f, 0.0000000f, 0.0000000f, 0.9964151f), - D3DXVECTOR4(-0.0896185f, 0.0000000f, 0.0000000f, 0.9959762f), - D3DXVECTOR4(-0.0941712f, 0.0000000f, 0.0000000f, 0.9955560f), - D3DXVECTOR4(-0.0982792f, 0.0000000f, 0.0000000f, 0.9951589f), - D3DXVECTOR4(-0.1019679f, 0.0000000f, 0.0000000f, 0.9947877f), - D3DXVECTOR4(-0.1052644f, 0.0000000f, 0.0000000f, 0.9944443f), - D3DXVECTOR4(-0.1081954f, 0.0000000f, 0.0000000f, 0.9941297f), - D3DXVECTOR4(-0.1107865f, 0.0000000f, 0.0000000f, 0.9938442f), - D3DXVECTOR4(-0.1130617f, 0.0000000f, 0.0000000f, 0.9935880f), - D3DXVECTOR4(-0.1150433f, 0.0000000f, 0.0000000f, 0.9933605f), - D3DXVECTOR4(-0.1167519f, 0.0000000f, 0.0000000f, 0.9931611f), - D3DXVECTOR4(-0.1182062f, 0.0000000f, 0.0000000f, 0.9929891f), - D3DXVECTOR4(-0.1194233f, 0.0000000f, 0.0000000f, 0.9928434f), - D3DXVECTOR4(-0.1204187f, 0.0000000f, 0.0000000f, 0.9927232f), - D3DXVECTOR4(-0.1212069f, 0.0000000f, 0.0000000f, 0.9926273f), - D3DXVECTOR4(-0.1218008f, 0.0000000f, 0.0000000f, 0.9925546f), - D3DXVECTOR4(-0.1222121f, 0.0000000f, 0.0000000f, 0.9925041f), - D3DXVECTOR4(-0.1224517f, 0.0000000f, 0.0000000f, 0.9924745f), - D3DXVECTOR4(-0.1225295f, 0.0000000f, 0.0000000f, 0.9924649f), -}; - -const D3DXVECTOR3 accessor_380[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_381[] = { - D3DXVECTOR3( 0.0000000f, 0.1116755f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1115194f, -0.0001042f), - D3DXVECTOR3( 0.0000000f, 0.1110281f, -0.0004322f), - D3DXVECTOR3(-0.0000000f, 0.1101664f, -0.0010075f), - D3DXVECTOR3( 0.0000000f, 0.1089009f, -0.0018524f), - D3DXVECTOR3(-0.0000000f, 0.1072050f, -0.0029846f), - D3DXVECTOR3( 0.0000000f, 0.1050651f, -0.0044133f), - D3DXVECTOR3(-0.0000000f, 0.1024889f, -0.0061332f), - D3DXVECTOR3(-0.0000000f, 0.0995133f, -0.0081198f), - D3DXVECTOR3(-0.0000000f, 0.0962070f, -0.0103272f), - D3DXVECTOR3(-0.0000000f, 0.0926670f, -0.0126906f), - D3DXVECTOR3(-0.0000000f, 0.0890052f, -0.0151353f), - D3DXVECTOR3(-0.0000000f, 0.0853324f, -0.0175874f), - D3DXVECTOR3(-0.0000000f, 0.0817437f, -0.0199833f), - D3DXVECTOR3(-0.0000000f, 0.0783109f, -0.0222751f), - D3DXVECTOR3(-0.0000000f, 0.0750812f, -0.0244313f), - D3DXVECTOR3( 0.0000000f, 0.0720812f, -0.0264343f), - D3DXVECTOR3(-0.0000000f, 0.0693214f, -0.0282768f), - D3DXVECTOR3(-0.0000000f, 0.0668018f, -0.0299589f), - D3DXVECTOR3(-0.0000000f, 0.0645156f, -0.0314853f), - D3DXVECTOR3( 0.0000000f, 0.0624519f, -0.0328631f), - D3DXVECTOR3(-0.0000000f, 0.0605980f, -0.0341008f), - D3DXVECTOR3(-0.0000000f, 0.0589407f, -0.0352072f), - D3DXVECTOR3(-0.0000000f, 0.0574667f, -0.0361914f), - D3DXVECTOR3(-0.0000000f, 0.0561631f, -0.0370616f), - D3DXVECTOR3(-0.0000000f, 0.0550182f, -0.0378260f), - D3DXVECTOR3(-0.0000000f, 0.0540208f, -0.0384919f), - D3DXVECTOR3( 0.0000000f, 0.0531607f, -0.0390662f), - D3DXVECTOR3(-0.0000000f, 0.0524284f, -0.0395551f), - D3DXVECTOR3( 0.0000000f, 0.0518155f, -0.0399642f), - D3DXVECTOR3( 0.0000000f, 0.0513140f, -0.0402990f), - D3DXVECTOR3( 0.0000000f, 0.0509170f, -0.0405641f), - D3DXVECTOR3(-0.0000000f, 0.0506178f, -0.0407638f), - D3DXVECTOR3(-0.0000000f, 0.0504106f, -0.0409022f), - D3DXVECTOR3(-0.0000000f, 0.0502899f, -0.0409828f), - D3DXVECTOR3( 0.0000000f, 0.0502506f, -0.0410090f), -}; - -const D3DXVECTOR4 accessor_382[] = { - D3DXVECTOR4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), - D3DXVECTOR4( 0.0004308f, -0.0000004f, -0.0008704f, 0.9999995f), - D3DXVECTOR4( 0.0017872f, -0.0000002f, -0.0001396f, 0.9999984f), - D3DXVECTOR4( 0.0041660f, 0.0000042f, 0.0010036f, 0.9999908f), - D3DXVECTOR4( 0.0076592f, 0.0000192f, 0.0025053f, 0.9999676f), - D3DXVECTOR4( 0.0123404f, 0.0000533f, 0.0043171f, 0.9999145f), - D3DXVECTOR4( 0.0182468f, 0.0001167f, 0.0063941f, 0.9998131f), - D3DXVECTOR4( 0.0253094f, 0.0055413f, 0.0085593f, 0.9996276f), - D3DXVECTOR4( 0.0333918f, 0.0155730f, 0.0106662f, 0.9992641f), - D3DXVECTOR4( 0.0423915f, 0.0210981f, 0.0129269f, 0.9987947f), - D3DXVECTOR4( 0.0524793f, -0.0013882f, 0.0166593f, 0.9984821f), - D3DXVECTOR4( 0.0629941f, -0.0237829f, 0.0209049f, 0.9975115f), - D3DXVECTOR4( 0.0725314f, 0.0063933f, 0.0218316f, 0.9971067f), - D3DXVECTOR4( 0.0815918f, 0.0365677f, 0.0221360f, 0.9957488f), - D3DXVECTOR4( 0.0919024f, 0.0038264f, 0.0276729f, 0.9953761f), - D3DXVECTOR4( 0.1017266f, -0.0288304f, 0.0337013f, 0.9938233f), - D3DXVECTOR4( 0.1087924f, 0.0104874f, 0.0322857f, 0.9934847f), - D3DXVECTOR4( 0.1148307f, 0.0497130f, 0.0300239f, 0.9916860f), - D3DXVECTOR4( 0.1235279f, 0.0031324f, 0.0377347f, 0.9916185f), - D3DXVECTOR4( 0.1314894f, -0.0434065f, 0.0456742f, 0.9893130f), - D3DXVECTOR4( 0.1351532f, 0.0103212f, 0.0401490f, 0.9899572f), - D3DXVECTOR4( 0.1377223f, 0.0638586f, 0.0334506f, 0.9878439f), - D3DXVECTOR4( 0.1446365f, 0.0129983f, 0.0410873f, 0.9885460f), - D3DXVECTOR4( 0.1506421f, -0.0380793f, 0.0475474f, 0.9867098f), - D3DXVECTOR4( 0.1523061f, 0.0121889f, 0.0336960f, 0.9876836f), - D3DXVECTOR4( 0.1541551f, 0.0613527f, 0.0131141f, 0.9860528f), - D3DXVECTOR4( 0.1583794f, 0.0083855f, 0.0131145f, 0.9872556f), - D3DXVECTOR4( 0.1610744f, -0.0440960f, 0.0165747f, 0.9858174f), - D3DXVECTOR4( 0.1629164f, -0.0232214f, 0.0098386f, 0.9863175f), - D3DXVECTOR4( 0.1644170f, 0.0161364f, 0.0009279f, 0.9862585f), - D3DXVECTOR4( 0.1656732f, 0.0372386f, -0.0043580f, 0.9854677f), - D3DXVECTOR4( 0.1668234f, 0.0332291f, -0.0049411f, 0.9854143f), - D3DXVECTOR4( 0.1677081f, 0.0240142f, -0.0042448f, 0.9855350f), - D3DXVECTOR4( 0.1683121f, 0.0130929f, -0.0029379f, 0.9856424f), - D3DXVECTOR4( 0.1686491f, 0.0039690f, -0.0016794f, 0.9856668f), - D3DXVECTOR4( 0.1687535f, 0.0001485f, -0.0011179f, 0.9856576f), -}; - -const D3DXVECTOR3 accessor_383[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0002844f, 0.9995330f, 0.9999747f), - D3DXVECTOR3( 1.0011802f, 0.9980625f, 0.9998952f), - D3DXVECTOR3( 1.0027509f, 0.9954833f, 0.9997556f), - D3DXVECTOR3( 1.0050577f, 0.9916960f, 0.9995508f), - D3DXVECTOR3( 1.0081490f, 0.9866204f, 0.9992760f), - D3DXVECTOR3( 1.0120502f, 0.9802159f, 0.9989296f), - D3DXVECTOR3( 1.0167462f, 0.9725057f, 0.9985124f), - D3DXVECTOR3( 1.0221705f, 0.9635997f, 0.9980305f), - D3DXVECTOR3( 1.0281975f, 0.9537047f, 0.9974951f), - D3DXVECTOR3( 1.0346508f, 0.9431097f, 0.9969220f), - D3DXVECTOR3( 1.0413260f, 0.9321504f, 0.9963289f), - D3DXVECTOR3( 1.0480207f, 0.9211581f, 0.9957342f), - D3DXVECTOR3( 1.0545628f, 0.9104177f, 0.9951530f), - D3DXVECTOR3( 1.0608205f, 0.9001434f, 0.9945972f), - D3DXVECTOR3( 1.0667080f, 0.8904773f, 0.9940742f), - D3DXVECTOR3( 1.0721768f, 0.8814985f, 0.9935883f), - D3DXVECTOR3( 1.0772076f, 0.8732389f, 0.9931415f), - D3DXVECTOR3( 1.0818005f, 0.8656982f, 0.9927335f), - D3DXVECTOR3( 1.0859680f, 0.8588557f, 0.9923633f), - D3DXVECTOR3( 1.0897303f, 0.8526793f, 0.9920291f), - D3DXVECTOR3( 1.0931095f, 0.8471308f, 0.9917288f), - D3DXVECTOR3( 1.0961307f, 0.8421705f, 0.9914606f), - D3DXVECTOR3( 1.0988178f, 0.8377588f, 0.9912218f), - D3DXVECTOR3( 1.1011939f, 0.8338575f, 0.9910107f), - D3DXVECTOR3( 1.1032810f, 0.8304309f, 0.9908254f), - D3DXVECTOR3( 1.1050994f, 0.8274459f, 0.9906639f), - D3DXVECTOR3( 1.1066670f, 0.8248714f, 0.9905245f), - D3DXVECTOR3( 1.1080019f, 0.8226799f, 0.9904059f), - D3DXVECTOR3( 1.1091194f, 0.8208455f, 0.9903066f), - D3DXVECTOR3( 1.1100335f, 0.8193448f, 0.9902255f), - D3DXVECTOR3( 1.1107574f, 0.8181564f, 0.9901612f), - D3DXVECTOR3( 1.1113026f, 0.8172610f, 0.9901127f), - D3DXVECTOR3( 1.1116803f, 0.8166409f, 0.9900792f), - D3DXVECTOR3( 1.1119004f, 0.8162794f, 0.9900597f), - D3DXVECTOR3( 1.1119719f, 0.8161622f, 0.9900533f), -}; - -const D3DXVECTOR3 accessor_384[] = { - D3DXVECTOR3(-0.0005524f, 0.0688295f, -0.0213631f), - D3DXVECTOR3(-0.0005524f, 0.0688295f, -0.0213631f), -}; - -const D3DXVECTOR4 accessor_385[] = { - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), -}; - -const D3DXVECTOR3 accessor_386[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_387[] = { - D3DXVECTOR3( 0.0000000f, 0.3082207f, -0.0000010f), - D3DXVECTOR3( 0.0000000f, 0.3082177f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_388[] = { - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_389[] = { - D3DXVECTOR3( 1.0000001f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000001f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_390[] = { - D3DXVECTOR3( 0.0000000f, 0.3056044f, 0.0000002f), - D3DXVECTOR3( 0.0000000f, 0.3056000f, 0.0000004f), -}; - -const D3DXVECTOR4 accessor_391[] = { - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), -}; - -const D3DXVECTOR3 accessor_392[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_393[] = { - D3DXVECTOR3( 0.0013047f, 0.3463302f, 0.0044682f), - D3DXVECTOR3( 0.0013054f, 0.3460192f, 0.0048573f), - D3DXVECTOR3( 0.0013076f, 0.3450401f, 0.0060825f), - D3DXVECTOR3( 0.0013116f, 0.3433230f, 0.0082313f), - D3DXVECTOR3( 0.0013174f, 0.3408013f, 0.0113867f), - D3DXVECTOR3( 0.0013252f, 0.3374218f, 0.0156154f), - D3DXVECTOR3( 0.0013350f, 0.3331575f, 0.0209514f), - D3DXVECTOR3( 0.0013468f, 0.3280240f, 0.0273751f), - D3DXVECTOR3( 0.0013604f, 0.3220945f, 0.0347950f), - D3DXVECTOR3( 0.0013756f, 0.3155062f, 0.0430391f), - D3DXVECTOR3( 0.0013918f, 0.3084520f, 0.0518662f), - D3DXVECTOR3( 0.0014086f, 0.3011551f, 0.0609970f), - D3DXVECTOR3( 0.0014255f, 0.2938364f, 0.0701552f), - D3DXVECTOR3( 0.0014419f, 0.2866851f, 0.0791037f), - D3DXVECTOR3( 0.0014576f, 0.2798445f, 0.0876636f), - D3DXVECTOR3( 0.0014725f, 0.2734087f, 0.0957168f), - D3DXVECTOR3( 0.0014862f, 0.2674304f, 0.1031975f), - D3DXVECTOR3( 0.0014989f, 0.2619310f, 0.1100791f), - D3DXVECTOR3( 0.0015104f, 0.2569104f, 0.1163617f), - D3DXVECTOR3( 0.0015209f, 0.2523544f, 0.1220626f), - D3DXVECTOR3( 0.0015304f, 0.2482421f, 0.1272085f), - D3DXVECTOR3( 0.0015389f, 0.2445479f, 0.1318311f), - D3DXVECTOR3( 0.0015465f, 0.2412453f, 0.1359637f), - D3DXVECTOR3( 0.0015532f, 0.2383079f, 0.1396393f), - D3DXVECTOR3( 0.0015592f, 0.2357105f, 0.1428897f), - D3DXVECTOR3( 0.0015644f, 0.2334290f, 0.1457445f), - D3DXVECTOR3( 0.0015690f, 0.2314415f, 0.1482316f), - D3DXVECTOR3( 0.0015730f, 0.2297274f, 0.1503764f), - D3DXVECTOR3( 0.0015763f, 0.2282682f, 0.1522023f), - D3DXVECTOR3( 0.0015791f, 0.2270469f, 0.1537307f), - D3DXVECTOR3( 0.0015814f, 0.2260476f, 0.1549810f), - D3DXVECTOR3( 0.0015832f, 0.2252564f, 0.1559711f), - D3DXVECTOR3( 0.0015846f, 0.2246604f, 0.1567170f), - D3DXVECTOR3( 0.0015856f, 0.2242474f, 0.1572338f), - D3DXVECTOR3( 0.0015861f, 0.2240068f, 0.1575347f), - D3DXVECTOR3( 0.0015863f, 0.2239287f, 0.1576325f), -}; - -const D3DXVECTOR4 accessor_394[] = { - D3DXVECTOR4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), - D3DXVECTOR4( 0.0002590f, 0.0000002f, 0.0011287f, 0.9999993f), - D3DXVECTOR4( 0.0010745f, 0.0000010f, 0.0011287f, 0.9999988f), - D3DXVECTOR4( 0.0025048f, 0.0000024f, 0.0011286f, 0.9999962f), - D3DXVECTOR4( 0.0046051f, 0.0000045f, 0.0011286f, 0.9999888f), - D3DXVECTOR4( 0.0074198f, 0.0000072f, 0.0011286f, 0.9999719f), - D3DXVECTOR4( 0.0109714f, 0.0000106f, 0.0011285f, 0.9999392f), - D3DXVECTOR4( 0.0152469f, 0.0000148f, 0.0011285f, 0.9998831f), - D3DXVECTOR4( 0.0201850f, 0.0000195f, 0.0011284f, 0.9997957f), - D3DXVECTOR4( 0.0256711f, 0.0000248f, 0.0011282f, 0.9996698f), - D3DXVECTOR4( 0.0315443f, 0.0000305f, 0.0011281f, 0.9995017f), - D3DXVECTOR4( 0.0376184f, 0.0000364f, 0.0011278f, 0.9992915f), - D3DXVECTOR4( 0.0437093f, 0.0000423f, 0.0011276f, 0.9990436f), - D3DXVECTOR4( 0.0496592f, 0.0000481f, 0.0011273f, 0.9987656f), - D3DXVECTOR4( 0.0553491f, 0.0000536f, 0.0011271f, 0.9984664f), - D3DXVECTOR4( 0.0607005f, 0.0000587f, 0.0011268f, 0.9981554f), - D3DXVECTOR4( 0.0656700f, 0.0000636f, 0.0011265f, 0.9978408f), - D3DXVECTOR4( 0.0702399f, 0.0000680f, 0.0011262f, 0.9975295f), - D3DXVECTOR4( 0.0744109f, 0.0000720f, 0.0011259f, 0.9972271f), - D3DXVECTOR4( 0.0781945f, 0.0000757f, 0.0011257f, 0.9969375f), - D3DXVECTOR4( 0.0816088f, 0.0000790f, 0.0011254f, 0.9966638f), - D3DXVECTOR4( 0.0846752f, 0.0000820f, 0.0011252f, 0.9964080f), - D3DXVECTOR4( 0.0874157f, 0.0000846f, 0.0011250f, 0.9961713f), - D3DXVECTOR4( 0.0898527f, 0.0000870f, 0.0011248f, 0.9959545f), - D3DXVECTOR4( 0.0920072f, 0.0000891f, 0.0011246f, 0.9957578f), - D3DXVECTOR4( 0.0938993f, 0.0000909f, 0.0011245f, 0.9955811f), - D3DXVECTOR4( 0.0955473f, 0.0000925f, 0.0011243f, 0.9954243f), - D3DXVECTOR4( 0.0969684f, 0.0000939f, 0.0011242f, 0.9952869f), - D3DXVECTOR4( 0.0981780f, 0.0000950f, 0.0011241f, 0.9951683f), - D3DXVECTOR4( 0.0991903f, 0.0000960f, 0.0011240f, 0.9950678f), - D3DXVECTOR4( 0.1000184f, 0.0000968f, 0.0011240f, 0.9949850f), - D3DXVECTOR4( 0.1006742f, 0.0000974f, 0.0011239f, 0.9949188f), - D3DXVECTOR4( 0.1011681f, 0.0000979f, 0.0011238f, 0.9948688f), - D3DXVECTOR4( 0.1015103f, 0.0000983f, 0.0011238f, 0.9948339f), - D3DXVECTOR4( 0.1017097f, 0.0000985f, 0.0011238f, 0.9948135f), - D3DXVECTOR4( 0.1017744f, 0.0000985f, 0.0011238f, 0.9948069f), -}; - -const D3DXVECTOR3 accessor_395[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0003822f, 1.0000035f), - D3DXVECTOR3( 0.9999999f, 1.0015857f, 1.0000148f), - D3DXVECTOR3( 0.9999999f, 1.0036966f, 1.0000347f), - D3DXVECTOR3( 0.9999999f, 1.0067961f, 1.0000644f), - D3DXVECTOR3( 0.9999999f, 1.0109502f, 1.0001035f), - D3DXVECTOR3( 0.9999999f, 1.0161920f, 1.0001529f), - D3DXVECTOR3( 0.9999999f, 1.0225023f, 1.0002126f), - D3DXVECTOR3( 0.9999999f, 1.0297909f, 1.0002813f), - D3DXVECTOR3( 0.9999999f, 1.0378897f, 1.0003579f), - D3DXVECTOR3( 0.9999999f, 1.0465610f, 1.0004400f), - D3DXVECTOR3( 0.9999999f, 1.0555302f, 1.0005245f), - D3DXVECTOR3( 0.9999999f, 1.0645267f, 1.0006095f), - D3DXVECTOR3( 0.9999999f, 1.0733171f, 1.0006927f), - D3DXVECTOR3( 0.9999999f, 1.0817257f, 1.0007721f), - D3DXVECTOR3( 1.0000001f, 1.0896368f, 1.0008471f), - D3DXVECTOR3( 0.9999999f, 1.0969852f, 1.0009164f), - D3DXVECTOR3( 0.9999999f, 1.1037451f, 1.0009803f), - D3DXVECTOR3( 1.0000001f, 1.1099169f, 1.0010386f), - D3DXVECTOR3( 1.0000002f, 1.1155170f, 1.0010915f), - D3DXVECTOR3( 1.0000001f, 1.1205721f, 1.0011392f), - D3DXVECTOR3( 0.9999999f, 1.1251130f, 1.0011820f), - D3DXVECTOR3( 1.0000002f, 1.1291726f, 1.0012205f), - D3DXVECTOR3( 0.9999999f, 1.1327834f, 1.0012544f), - D3DXVECTOR3( 1.0000001f, 1.1359764f, 1.0012847f), - D3DXVECTOR3( 0.9999998f, 1.1387808f, 1.0013111f), - D3DXVECTOR3( 0.9999999f, 1.1412241f, 1.0013344f), - D3DXVECTOR3( 0.9999999f, 1.1433307f, 1.0013542f), - D3DXVECTOR3( 0.9999999f, 1.1451243f, 1.0013711f), - D3DXVECTOR3( 1.0000001f, 1.1466256f, 1.0013855f), - D3DXVECTOR3( 0.9999999f, 1.1478541f, 1.0013970f), - D3DXVECTOR3( 0.9999999f, 1.1488265f, 1.0014062f), - D3DXVECTOR3( 0.9999999f, 1.1495593f, 1.0014130f), - D3DXVECTOR3( 0.9999999f, 1.1500669f, 1.0014180f), - D3DXVECTOR3( 0.9999999f, 1.1503626f, 1.0014206f), - D3DXVECTOR3( 1.0000001f, 1.1504586f, 1.0014217f), -}; - -const D3DXVECTOR3 accessor_396[] = { - D3DXVECTOR3( 0.2488541f, 0.2033313f, -0.0450287f), - D3DXVECTOR3( 0.2488541f, 0.2033312f, -0.0450287f), -}; - -const D3DXVECTOR4 accessor_397[] = { - D3DXVECTOR4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), - D3DXVECTOR4(-0.1444001f, -0.0000000f, 0.0000000f, 0.9895194f), -}; - -const D3DXVECTOR3 accessor_398[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_399[] = { - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_400[] = { - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_401[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_402[] = { - D3DXVECTOR3(-0.0000000f, 0.1024613f, -0.0000003f), - D3DXVECTOR3( 0.0000000f, 0.1024673f, 0.0000021f), -}; - -const D3DXVECTOR4 accessor_403[] = { - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_404[] = { - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_405[] = { - D3DXVECTOR3(-0.0000000f, 0.1026039f, 0.0000002f), - D3DXVECTOR3( 0.0000000f, 0.1026061f, -0.0000003f), -}; - -const D3DXVECTOR4 accessor_406[] = { - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_407[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), - D3DXVECTOR3( 1.0000000f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_408[] = { - D3DXVECTOR3(-0.0000000f, 0.1033674f, -0.0000001f), - D3DXVECTOR3( 0.0000000f, 0.1033664f, 0.0000010f), -}; - -const D3DXVECTOR4 accessor_409[] = { - D3DXVECTOR4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181255f, 0.0003339f, 0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_410[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_411[] = { - D3DXVECTOR3(-0.0000000f, 0.1012845f, -0.0000001f), - D3DXVECTOR3( 0.0000000f, 0.1012722f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_412[] = { - D3DXVECTOR4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), - D3DXVECTOR4( 0.0250840f, -0.0004247f, 0.0000082f, 0.9996852f), -}; - -const D3DXVECTOR3 accessor_413[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999998f), -}; - -const D3DXVECTOR3 accessor_414[] = { - D3DXVECTOR3(-0.0000000f, 0.1024317f, -0.0000007f), - D3DXVECTOR3( 0.0000000f, 0.1024291f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_415[] = { - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), -}; - -const D3DXVECTOR3 accessor_416[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_417[] = { - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), -}; - -const D3DXVECTOR4 accessor_418[] = { - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), -}; - -const D3DXVECTOR3 accessor_419[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_420[] = { - D3DXVECTOR3(-0.0000000f, 0.3082130f, -0.0000004f), - D3DXVECTOR3( 0.0000000f, 0.3082150f, 0.0000001f), -}; - -const D3DXVECTOR4 accessor_421[] = { - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_422[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), - D3DXVECTOR3( 1.0000001f, 1.0000001f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_423[] = { - D3DXVECTOR3(-0.0000000f, 0.3055982f, -0.0000002f), - D3DXVECTOR3(-0.0000000f, 0.3056078f, -0.0000030f), -}; - -const D3DXVECTOR4 accessor_424[] = { - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), -}; - -const D3DXVECTOR3 accessor_425[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999998f), -}; - -const D3DXVECTOR3 accessor_426[] = { - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_427[] = { - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_428[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_429[] = { - D3DXVECTOR3( 0.0000000f, 0.1024598f, 0.0000012f), - D3DXVECTOR3(-0.0000000f, 0.1024574f, -0.0000004f), -}; - -const D3DXVECTOR4 accessor_430[] = { - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_431[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_432[] = { - D3DXVECTOR3(-0.0000000f, 0.1026044f, 0.0000000f), - D3DXVECTOR3(-0.0000001f, 0.1026062f, 0.0000004f), -}; - -const D3DXVECTOR4 accessor_433[] = { - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_434[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_435[] = { - D3DXVECTOR3( 0.0000000f, 0.1033676f, 0.0000003f), - D3DXVECTOR3(-0.0000000f, 0.1033718f, 0.0000001f), -}; - -const D3DXVECTOR4 accessor_436[] = { - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_437[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 1.0000001f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_438[] = { - D3DXVECTOR3(-0.0000000f, 0.1012811f, -0.0000002f), - D3DXVECTOR3( 0.0000000f, 0.1012763f, -0.0000033f), -}; - -const D3DXVECTOR4 accessor_439[] = { - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), -}; - -const D3DXVECTOR3 accessor_440[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_441[] = { - D3DXVECTOR3( 0.0000000f, 0.1024289f, -0.0000001f), - D3DXVECTOR3( 0.0000000f, 0.1024334f, -0.0000007f), -}; - -const D3DXVECTOR4 accessor_442[] = { - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), -}; - -const D3DXVECTOR3 accessor_443[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_444[] = { - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), -}; - -const D3DXVECTOR4 accessor_445[] = { - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), -}; - -const D3DXVECTOR3 accessor_446[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_447[] = { - D3DXVECTOR3(-0.0000000f, 0.2498146f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.2498151f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_448[] = { - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), -}; - -const D3DXVECTOR3 accessor_449[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_450[] = { - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_451[] = { - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_452[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_453[] = { - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_454[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_455[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_456[] = { - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733559f, -0.0045231f), - D3DXVECTOR3(-0.0020865f, 0.0732797f, -0.0046957f), - D3DXVECTOR3(-0.0020865f, 0.0731451f, -0.0049973f), - D3DXVECTOR3(-0.0020865f, 0.0729449f, -0.0054381f), - D3DXVECTOR3(-0.0020865f, 0.0726721f, -0.0060248f), - D3DXVECTOR3(-0.0020865f, 0.0723206f, -0.0067582f), - D3DXVECTOR3(-0.0020865f, 0.0718868f, -0.0076310f), - D3DXVECTOR3(-0.0020865f, 0.0713715f, -0.0086251f), - D3DXVECTOR3(-0.0020865f, 0.0707815f, -0.0097117f), - D3DXVECTOR3(-0.0020865f, 0.0701300f, -0.0108540f), - D3DXVECTOR3(-0.0020865f, 0.0694352f, -0.0120121f), - D3DXVECTOR3(-0.0020865f, 0.0687175f, -0.0131494f), - D3DXVECTOR3(-0.0020865f, 0.0679968f, -0.0142367f), - D3DXVECTOR3(-0.0020865f, 0.0672901f, -0.0152545f), - D3DXVECTOR3(-0.0020865f, 0.0666102f, -0.0161917f), - D3DXVECTOR3(-0.0020865f, 0.0659661f, -0.0170446f), - D3DXVECTOR3(-0.0020865f, 0.0653632f, -0.0178139f), - D3DXVECTOR3(-0.0020865f, 0.0648043f, -0.0185033f), - D3DXVECTOR3(-0.0020865f, 0.0642904f, -0.0191182f), - D3DXVECTOR3(-0.0020865f, 0.0638212f, -0.0196645f), - D3DXVECTOR3(-0.0020865f, 0.0633954f, -0.0201480f), - D3DXVECTOR3(-0.0020865f, 0.0630115f, -0.0205745f), - D3DXVECTOR3(-0.0020865f, 0.0626674f, -0.0209492f), - D3DXVECTOR3(-0.0020865f, 0.0623612f, -0.0212770f), - D3DXVECTOR3(-0.0020865f, 0.0620907f, -0.0215621f), - D3DXVECTOR3(-0.0020865f, 0.0618539f, -0.0218083f), - D3DXVECTOR3(-0.0020865f, 0.0616489f, -0.0220190f), - D3DXVECTOR3(-0.0020865f, 0.0614737f, -0.0221972f), - D3DXVECTOR3(-0.0020865f, 0.0613267f, -0.0223456f), - D3DXVECTOR3(-0.0020865f, 0.0612061f, -0.0224664f), - D3DXVECTOR3(-0.0020865f, 0.0611105f, -0.0225617f), - D3DXVECTOR3(-0.0020865f, 0.0610383f, -0.0226332f), - D3DXVECTOR3(-0.0020865f, 0.0609883f, -0.0226827f), - D3DXVECTOR3(-0.0020865f, 0.0609591f, -0.0227115f), - D3DXVECTOR3(-0.0020865f, 0.0609496f, -0.0227208f), -}; - -const D3DXVECTOR4 accessor_457[] = { - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_458[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_459[] = { - D3DXVECTOR3(-0.0000000f, 0.0919256f, -0.0000004f), - D3DXVECTOR3( 0.0000000f, 0.0919258f, 0.0000003f), -}; - -const D3DXVECTOR4 accessor_460[] = { - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_461[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_462[] = { - D3DXVECTOR3(-0.0000001f, 0.1196961f, 0.0000000f), - D3DXVECTOR3( 0.0000006f, 0.1196968f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_463[] = { - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_464[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_465[] = { - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_466[] = { - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_467[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_468[] = { - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_469[] = { - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_470[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_471[] = { - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_472[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_473[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_474[] = { - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733559f, -0.0045231f), - D3DXVECTOR3(-0.0020865f, 0.0732797f, -0.0046957f), - D3DXVECTOR3(-0.0020865f, 0.0731451f, -0.0049973f), - D3DXVECTOR3(-0.0020865f, 0.0729449f, -0.0054381f), - D3DXVECTOR3(-0.0020865f, 0.0726721f, -0.0060248f), - D3DXVECTOR3(-0.0020865f, 0.0723206f, -0.0067582f), - D3DXVECTOR3(-0.0020865f, 0.0718868f, -0.0076310f), - D3DXVECTOR3(-0.0020865f, 0.0713715f, -0.0086251f), - D3DXVECTOR3(-0.0020865f, 0.0707815f, -0.0097117f), - D3DXVECTOR3(-0.0020865f, 0.0701300f, -0.0108540f), - D3DXVECTOR3(-0.0020865f, 0.0694352f, -0.0120121f), - D3DXVECTOR3(-0.0020865f, 0.0687175f, -0.0131494f), - D3DXVECTOR3(-0.0020865f, 0.0679968f, -0.0142367f), - D3DXVECTOR3(-0.0020865f, 0.0672901f, -0.0152545f), - D3DXVECTOR3(-0.0020865f, 0.0666102f, -0.0161917f), - D3DXVECTOR3(-0.0020865f, 0.0659661f, -0.0170446f), - D3DXVECTOR3(-0.0020865f, 0.0653632f, -0.0178139f), - D3DXVECTOR3(-0.0020865f, 0.0648043f, -0.0185033f), - D3DXVECTOR3(-0.0020865f, 0.0642904f, -0.0191182f), - D3DXVECTOR3(-0.0020865f, 0.0638212f, -0.0196645f), - D3DXVECTOR3(-0.0020865f, 0.0633954f, -0.0201480f), - D3DXVECTOR3(-0.0020865f, 0.0630115f, -0.0205745f), - D3DXVECTOR3(-0.0020865f, 0.0626674f, -0.0209492f), - D3DXVECTOR3(-0.0020865f, 0.0623612f, -0.0212770f), - D3DXVECTOR3(-0.0020865f, 0.0620907f, -0.0215621f), - D3DXVECTOR3(-0.0020865f, 0.0618539f, -0.0218083f), - D3DXVECTOR3(-0.0020865f, 0.0616489f, -0.0220190f), - D3DXVECTOR3(-0.0020865f, 0.0614737f, -0.0221972f), - D3DXVECTOR3(-0.0020865f, 0.0613267f, -0.0223456f), - D3DXVECTOR3(-0.0020865f, 0.0612061f, -0.0224664f), - D3DXVECTOR3(-0.0020865f, 0.0611105f, -0.0225617f), - D3DXVECTOR3(-0.0020865f, 0.0610383f, -0.0226332f), - D3DXVECTOR3(-0.0020865f, 0.0609883f, -0.0226827f), - D3DXVECTOR3(-0.0020865f, 0.0609591f, -0.0227115f), - D3DXVECTOR3(-0.0020865f, 0.0609496f, -0.0227208f), -}; - -const D3DXVECTOR4 accessor_475[] = { - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_476[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_477[] = { - D3DXVECTOR3(-0.0000000f, 0.0919253f, 0.0000002f), - D3DXVECTOR3(-0.0000000f, 0.0919256f, 0.0000006f), -}; - -const D3DXVECTOR4 accessor_478[] = { - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_479[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_480[] = { - D3DXVECTOR3( 0.0000001f, 0.1196966f, 0.0000001f), - D3DXVECTOR3(-0.0000006f, 0.1196973f, -0.0000000f), -}; - -const D3DXVECTOR4 accessor_481[] = { - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_482[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_483[] = { - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_484[] = { - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_485[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const float accessor_486[] = { - 0.0, - 0.0416666679084301, - 0.0833333358168602, - 0.125, - 0.1666666716337204, - 0.2083333283662796, - 0.25, - 0.2916666567325592, - 0.3333333432674408, - 0.375, - 0.4166666567325592, - 0.4583333432674408, - 0.5, - 0.5416666865348816, - 0.5833333134651184, - 0.625, - 0.6666666865348816, - 0.7083333134651184, - 0.75, - 0.7916666865348816, - 0.8333333134651184, - 0.875, -}; - -const D3DXVECTOR3 accessor_487[] = { - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0003281f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, 0.0095444f), - D3DXVECTOR3(-0.0020865f, 0.3033686f, 0.0213277f), - D3DXVECTOR3(-0.0020865f, 0.4733778f, 0.0312002f), - D3DXVECTOR3(-0.0020865f, 0.5038654f, 0.0353403f), - D3DXVECTOR3(-0.0020865f, 0.4858294f, 0.0337737f), - D3DXVECTOR3(-0.0020865f, 0.4345431f, 0.0294427f), - D3DXVECTOR3(-0.0020865f, 0.3543602f, 0.0229001f), - D3DXVECTOR3(-0.0020865f, 0.2512789f, 0.0146988f), - D3DXVECTOR3(-0.0020865f, 0.1325482f, 0.0053917f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0060253f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_488[] = { - D3DXVECTOR4(-0.1597332f, 0.0000000f, 0.0000000f, 0.9871602f), - D3DXVECTOR4(-0.1100656f, 0.0000000f, 0.0000000f, 0.9939243f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), - D3DXVECTOR4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), - D3DXVECTOR4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), - D3DXVECTOR4( 0.1069211f, 0.0000000f, 0.0000000f, 0.9942675f), - D3DXVECTOR4( 0.0142430f, 0.0000000f, 0.0000000f, 0.9998986f), - D3DXVECTOR4(-0.0964659f, 0.0000000f, 0.0000000f, 0.9953363f), - D3DXVECTOR4(-0.1883665f, 0.0000000f, 0.0000000f, 0.9820988f), - D3DXVECTOR4(-0.2264674f, 0.0000000f, 0.0000000f, 0.9740188f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_489[] = { - D3DXVECTOR3( 1.0000000f, 0.8592988f, 0.9980907f), - D3DXVECTOR3( 1.0000000f, 0.8614748f, 0.9981202f), - D3DXVECTOR3( 1.0000000f, 0.8698399f, 0.9982337f), - D3DXVECTOR3( 1.0000000f, 0.8906376f, 0.9985160f), - D3DXVECTOR3( 1.0000000f, 0.9550318f, 0.9993898f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9842575f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9449011f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8937379f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8425746f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8032182f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.7874756f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8095782f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.8622842f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9251915f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 0.9778975f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const float accessor_490[] = { - 0.0, - 0.875, -}; - -const D3DXVECTOR3 accessor_491[] = { - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), - D3DXVECTOR3( 0.0004585f, 0.0671507f, 0.0012744f), -}; - -const D3DXVECTOR4 accessor_492[] = { - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), - D3DXVECTOR4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), -}; - -const D3DXVECTOR3 accessor_493[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_494[] = { - D3DXVECTOR3(-0.0000000f, 0.2498147f, 0.0000002f), - D3DXVECTOR3( 0.0000000f, 0.2498153f, 0.0000001f), -}; - -const D3DXVECTOR4 accessor_495[] = { - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), - D3DXVECTOR4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), -}; - -const D3DXVECTOR3 accessor_496[] = { - D3DXVECTOR3( 0.9999998f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999998f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_497[] = { - D3DXVECTOR3( 0.0000000f, 0.1246096f, -0.0425520f), - D3DXVECTOR3( 0.0000000f, 0.1246156f, -0.0109113f), - D3DXVECTOR3( 0.0000000f, 0.1246802f, 0.0365921f), - D3DXVECTOR3( 0.0000000f, 0.1250358f, 0.0385340f), - D3DXVECTOR3( 0.0000000f, 0.1282260f, 0.0287673f), - D3DXVECTOR3( 0.0000000f, 0.1395849f, 0.0060578f), - D3DXVECTOR3( 0.0000000f, 0.1484680f, 0.0039087f), - D3DXVECTOR3( 0.0000000f, 0.1566569f, 0.0033386f), - D3DXVECTOR3( 0.0000000f, 0.1634163f, 0.0032947f), - D3DXVECTOR3( 0.0000000f, 0.1680112f, 0.0027246f), - D3DXVECTOR3( 0.0000000f, 0.1697062f, 0.0005755f), - D3DXVECTOR3( 0.0000000f, 0.1406879f, -0.0024007f), - D3DXVECTOR3( 0.0000000f, 0.1054590f, 0.0167925f), - D3DXVECTOR3( 0.0000000f, 0.1006418f, 0.0259067f), - D3DXVECTOR3( 0.0000000f, 0.1068951f, 0.0211514f), - D3DXVECTOR3( 0.0000000f, 0.1242217f, 0.0079754f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_498[] = { - D3DXVECTOR4(-0.1225295f, 0.0000000f, 0.0000000f, 0.9924649f), - D3DXVECTOR4(-0.0567851f, 0.0000000f, 0.0000000f, 0.9983864f), - D3DXVECTOR4( 0.0156293f, 0.0000000f, 0.0000000f, 0.9998779f), - D3DXVECTOR4( 0.0233419f, 0.0000000f, 0.0000000f, 0.9997275f), - D3DXVECTOR4( 0.0253458f, 0.0000000f, 0.0000000f, 0.9996787f), - D3DXVECTOR4( 0.0284723f, 0.0000000f, 0.0000000f, 0.9995946f), - D3DXVECTOR4( 0.0373068f, 0.0000000f, 0.0000000f, 0.9993039f), - D3DXVECTOR4( 0.0474630f, 0.0000000f, 0.0000000f, 0.9988730f), - D3DXVECTOR4( 0.0523185f, 0.0000000f, 0.0000000f, 0.9986304f), - D3DXVECTOR4( 0.0365627f, 0.0000000f, 0.0000000f, 0.9993314f), - D3DXVECTOR4( 0.0124368f, 0.0000000f, 0.0000000f, 0.9999226f), - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4(-0.0243295f, 0.0000000f, 0.0000000f, 0.9997039f), - D3DXVECTOR4(-0.0313913f, 0.0000000f, -0.0000000f, 0.9995072f), - D3DXVECTOR4(-0.0308447f, 0.0000000f, -0.0000000f, 0.9995242f), - D3DXVECTOR4(-0.0278059f, -0.0000000f, -0.0000000f, 0.9996133f), - D3DXVECTOR4(-0.0206368f, 0.0000000f, -0.0000000f, 0.9997871f), - D3DXVECTOR4(-0.0113960f, -0.0000000f, -0.0000000f, 0.9999351f), - D3DXVECTOR4(-0.0034049f, -0.0000000f, -0.0000000f, 0.9999942f), - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), - D3DXVECTOR4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_499[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_500[] = { - D3DXVECTOR3( 0.0000000f, 0.0502506f, -0.0410090f), - D3DXVECTOR3( 0.0000000f, 0.0519126f, -0.0208252f), - D3DXVECTOR3( 0.0000000f, 0.0583019f, 0.0102387f), - D3DXVECTOR3( 0.0000000f, 0.0741867f, 0.0143119f), - D3DXVECTOR3( 0.0000000f, 0.1233698f, 0.0151672f), - D3DXVECTOR3(-0.0000000f, 0.1577158f, 0.0105115f), - D3DXVECTOR3(-0.0000004f, 0.1572729f, 0.0034530f), - D3DXVECTOR3(-0.0000010f, 0.1562570f, -0.0120758f), - D3DXVECTOR3(-0.0000011f, 0.1551369f, -0.0276046f), - D3DXVECTOR3(-0.0000000f, 0.1543814f, -0.0346631f), - D3DXVECTOR3( 0.0000020f, 0.1537476f, -0.0246590f), - D3DXVECTOR3( 0.0000000f, 0.1501444f, -0.0060799f), - D3DXVECTOR3(-0.0001929f, 0.0552515f, 0.0039242f), - D3DXVECTOR3(-0.0000572f, 0.0875769f, 0.0018956f), - D3DXVECTOR3( 0.0000000f, 0.1049712f, 0.0007144f), - D3DXVECTOR3( 0.0000036f, 0.1077900f, 0.0004141f), - D3DXVECTOR3( 0.0000021f, 0.1105564f, 0.0001193f), - D3DXVECTOR3( 0.0000000f, 0.1116755f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1116755f, -0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1116755f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1116755f, 0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.1116755f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_501[] = { - D3DXVECTOR4( 0.1687535f, 0.0001485f, -0.0011179f, 0.9856576f), - D3DXVECTOR4( 0.1641874f, 0.0001452f, -0.0011030f, 0.9864286f), - D3DXVECTOR4( 0.1321307f, 0.0001089f, -0.0010700f, 0.9912317f), - D3DXVECTOR4( 0.0425173f, 0.0000089f, -0.0010504f, 0.9990952f), - D3DXVECTOR4(-0.0663257f, -0.0001048f, -0.0010704f, 0.9977974f), - D3DXVECTOR4(-0.1190329f, -0.0001542f, -0.0011139f, 0.9928897f), - D3DXVECTOR4(-0.1009015f, -0.0001354f, -0.0011171f, 0.9948958f), - D3DXVECTOR4(-0.0608890f, -0.0000947f, -0.0011108f, 0.9981439f), - D3DXVECTOR4(-0.0207444f, -0.0000514f, -0.0011062f, 0.9997842f), - D3DXVECTOR4(-0.0024017f, -0.0000256f, -0.0011052f, 0.9999965f), - D3DXVECTOR4(-0.0023224f, -0.0000197f, -0.0010893f, 0.9999967f), - D3DXVECTOR4(-0.0023111f, -0.0000189f, -0.0010735f, 0.9999968f), - D3DXVECTOR4(-0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), - D3DXVECTOR4( 0.1278805f, 0.0001342f, -0.0011007f, 0.9917890f), - D3DXVECTOR4( 0.1862075f, 0.0001850f, -0.0011025f, 0.9825098f), - D3DXVECTOR4( 0.1473228f, 0.0001447f, -0.0010961f, 0.9890879f), - D3DXVECTOR4( 0.0385725f, 0.0000335f, -0.0011042f, 0.9992552f), - D3DXVECTOR4(-0.0275612f, -0.0000311f, -0.0011282f, 0.9996195f), - D3DXVECTOR4(-0.0204169f, -0.0000230f, -0.0011289f, 0.9997909f), - D3DXVECTOR4(-0.0071464f, -0.0000081f, -0.0011287f, 0.9999738f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), - D3DXVECTOR4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), -}; - -const D3DXVECTOR3 accessor_502[] = { - D3DXVECTOR3( 1.1119719f, 0.8161622f, 0.9900533f), - D3DXVECTOR3( 1.1116862f, 0.8166313f, 0.9900787f), - D3DXVECTOR3( 1.1107776f, 0.8181230f, 0.9901594f), - D3DXVECTOR3( 1.1091545f, 0.8207878f, 0.9903035f), - D3DXVECTOR3( 1.1066993f, 0.8248183f, 0.9905214f), - D3DXVECTOR3( 1.1032585f, 0.8304676f, 0.9908272f), - D3DXVECTOR3( 1.0986220f, 0.8380802f, 0.9912391f), - D3DXVECTOR3( 1.0924909f, 0.8481464f, 0.9917837f), - D3DXVECTOR3( 1.0844150f, 0.8614054f, 0.9925011f), - D3DXVECTOR3( 1.0736568f, 0.8790682f, 0.9934567f), - D3DXVECTOR3( 1.0588560f, 0.9033691f, 0.9947716f), - D3DXVECTOR3( 1.0370246f, 0.9392121f, 0.9967110f), - D3DXVECTOR3( 1.1109213f, 0.9073296f, 1.0868970f), - D3DXVECTOR3( 1.0325081f, 0.9749655f, 1.0277655f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_503[] = { - D3DXVECTOR3(-0.0005524f, 0.0688295f, -0.0213631f), - D3DXVECTOR3(-0.0005524f, 0.0688295f, -0.0213631f), -}; - -const D3DXVECTOR4 accessor_504[] = { - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), - D3DXVECTOR4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), -}; - -const D3DXVECTOR3 accessor_505[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_506[] = { - D3DXVECTOR3( 0.0000000f, 0.3082177f, -0.0000000f), - D3DXVECTOR3( 0.0000000f, 0.3082207f, -0.0000010f), -}; - -const D3DXVECTOR4 accessor_507[] = { - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_508[] = { - D3DXVECTOR3( 1.0000001f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_509[] = { - D3DXVECTOR3( 0.0000000f, 0.3056000f, 0.0000004f), - D3DXVECTOR3( 0.0000000f, 0.3056044f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_510[] = { - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), - D3DXVECTOR4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), -}; - -const D3DXVECTOR3 accessor_511[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_512[] = { - D3DXVECTOR3( 0.0015863f, 0.2239287f, 0.1576325f), - D3DXVECTOR3( 0.0015815f, 0.2262271f, 0.1552215f), - D3DXVECTOR3( 0.0015629f, 0.2350632f, 0.1459529f), - D3DXVECTOR3( 0.0015168f, 0.2570313f, 0.1229092f), - D3DXVECTOR3( 0.0013740f, 0.3250495f, 0.0515609f), - D3DXVECTOR3( 0.0012743f, 0.3725485f, 0.0017364f), - D3DXVECTOR3( 0.0012766f, 0.3712253f, 0.0058122f), - D3DXVECTOR3( 0.0012820f, 0.3679919f, 0.0155316f), - D3DXVECTOR3( 0.0012880f, 0.3639544f, 0.0271321f), - D3DXVECTOR3( 0.0012921f, 0.3602186f, 0.0368514f), - D3DXVECTOR3( 0.0012917f, 0.3578899f, 0.0409273f), - D3DXVECTOR3( 0.0012936f, 0.3542452f, 0.0314968f), - D3DXVECTOR3( 0.0013404f, 0.3312452f, 0.0139831f), - D3DXVECTOR3( 0.0014914f, 0.2635176f, 0.0045527f), - D3DXVECTOR3( 0.0014853f, 0.2667689f, 0.0306998f), - D3DXVECTOR3( 0.0014733f, 0.2723543f, 0.0431161f), - D3DXVECTOR3( 0.0014353f, 0.2890725f, 0.0360221f), - D3DXVECTOR3( 0.0013524f, 0.3254242f, 0.0163660f), - D3DXVECTOR3( 0.0013047f, 0.3463302f, 0.0044682f), - D3DXVECTOR3( 0.0013047f, 0.3463302f, 0.0044682f), - D3DXVECTOR3( 0.0013047f, 0.3463302f, 0.0044682f), - D3DXVECTOR3( 0.0013047f, 0.3463302f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_513[] = { - D3DXVECTOR4( 0.1017744f, 0.0000985f, 0.0011238f, 0.9948069f), - D3DXVECTOR4(-0.0664422f, -0.0001058f, 0.0011271f, 0.9977896f), - D3DXVECTOR4( 0.0065792f, 0.0000296f, 0.0011345f, 0.9999777f), - D3DXVECTOR4( 0.0795655f, 0.0002030f, 0.0011273f, 0.9968290f), - D3DXVECTOR4(-0.0557642f, 0.0000156f, 0.0011423f, 0.9984434f), - D3DXVECTOR4(-0.1900725f, -0.0002299f, 0.0011131f, 0.9817694f), - D3DXVECTOR4(-0.1829499f, -0.0002084f, 0.0011183f, 0.9831216f), - D3DXVECTOR4(-0.1601227f, -0.0001502f, 0.0011308f, 0.9870965f), - D3DXVECTOR4(-0.1193056f, -0.0000647f, 0.0011439f, 0.9928569f), - D3DXVECTOR4(-0.0581474f, 0.0000385f, 0.0011502f, 0.9983073f), - D3DXVECTOR4( 0.0255451f, 0.0001485f, 0.0011437f, 0.9996730f), - D3DXVECTOR4( 0.1210316f, 0.0002301f, 0.0011107f, 0.9926480f), - D3DXVECTOR4( 0.1685038f, 0.0002263f, 0.0010911f, 0.9857004f), - D3DXVECTOR4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), - D3DXVECTOR4( 0.1871809f, 0.0002103f, 0.0011074f, 0.9823248f), - D3DXVECTOR4( 0.2741494f, 0.0003082f, 0.0010835f, 0.9616865f), - D3DXVECTOR4( 0.2213597f, 0.0002489f, 0.0010990f, 0.9751916f), - D3DXVECTOR4( 0.0721470f, 0.0000812f, 0.0011251f, 0.9973934f), - D3DXVECTOR4(-0.0191888f, -0.0000217f, 0.0011285f, 0.9998153f), - D3DXVECTOR4(-0.0142143f, -0.0000161f, 0.0011286f, 0.9998984f), - D3DXVECTOR4(-0.0049752f, -0.0000056f, 0.0011287f, 0.9999870f), - D3DXVECTOR4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), -}; - -const D3DXVECTOR3 accessor_514[] = { - D3DXVECTOR3( 1.0000001f, 1.1504586f, 1.0014217f), - D3DXVECTOR3( 0.9999999f, 1.1500801f, 1.0014180f), - D3DXVECTOR3( 0.9999999f, 1.1488986f, 1.0014070f), - D3DXVECTOR3( 0.9999999f, 1.1468363f, 1.0013874f), - D3DXVECTOR3( 1.0000001f, 1.1437984f, 1.0013586f), - D3DXVECTOR3( 0.9999999f, 1.1396673f, 1.0013196f), - D3DXVECTOR3( 1.0000001f, 1.1342896f, 1.0012687f), - D3DXVECTOR3( 1.0000001f, 1.1274629f, 1.0012043f), - D3DXVECTOR3( 0.9999999f, 1.1189024f, 1.0011234f), - D3DXVECTOR3( 0.9999999f, 1.1081806f, 1.0010223f), - D3DXVECTOR3( 0.9999999f, 1.0945803f, 1.0008935f), - D3DXVECTOR3( 1.0000001f, 1.0766469f, 1.0007242f), - D3DXVECTOR3( 0.9999999f, 1.0497506f, 1.0004700f), - D3DXVECTOR3( 0.9999999f, 1.0188085f, 1.0001775f), - D3DXVECTOR3( 1.0000001f, 1.0037625f, 1.0000355f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 1.0000001f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000001f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_515[] = { - D3DXVECTOR3( 0.2488541f, 0.2033312f, -0.0450287f), - D3DXVECTOR3( 0.2488541f, 0.2033313f, -0.0450287f), -}; - -const D3DXVECTOR4 accessor_516[] = { - D3DXVECTOR4(-0.1444001f, -0.0000000f, 0.0000000f, 0.9895194f), - D3DXVECTOR4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), -}; - -const D3DXVECTOR3 accessor_517[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_518[] = { - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), - D3DXVECTOR3(-0.0004506f, 0.2559274f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_519[] = { - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), - D3DXVECTOR4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_520[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_521[] = { - D3DXVECTOR3( 0.0000000f, 0.1024673f, 0.0000021f), - D3DXVECTOR3(-0.0000000f, 0.1024613f, -0.0000003f), -}; - -const D3DXVECTOR4 accessor_522[] = { - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_523[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_524[] = { - D3DXVECTOR3( 0.0000000f, 0.1026061f, -0.0000003f), - D3DXVECTOR3(-0.0000000f, 0.1026039f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_525[] = { - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_526[] = { - D3DXVECTOR3( 1.0000000f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_527[] = { - D3DXVECTOR3( 0.0000000f, 0.1033664f, 0.0000010f), - D3DXVECTOR3(-0.0000000f, 0.1033674f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_528[] = { - D3DXVECTOR4( 0.0181255f, 0.0003339f, 0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_529[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_530[] = { - D3DXVECTOR3( 0.0000000f, 0.1012722f, -0.0000000f), - D3DXVECTOR3(-0.0000000f, 0.1012845f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_531[] = { - D3DXVECTOR4( 0.0250840f, -0.0004247f, 0.0000082f, 0.9996852f), - D3DXVECTOR4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), -}; - -const D3DXVECTOR3 accessor_532[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999998f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_533[] = { - D3DXVECTOR3( 0.0000000f, 0.1024291f, 0.0000000f), - D3DXVECTOR3(-0.0000000f, 0.1024317f, -0.0000007f), -}; - -const D3DXVECTOR4 accessor_534[] = { - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), - D3DXVECTOR4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), -}; - -const D3DXVECTOR3 accessor_535[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_536[] = { - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), - D3DXVECTOR3(-0.0027646f, 0.0680362f, -0.0078378f), -}; - -const D3DXVECTOR4 accessor_537[] = { - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), - D3DXVECTOR4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), -}; - -const D3DXVECTOR3 accessor_538[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_539[] = { - D3DXVECTOR3( 0.0000000f, 0.3082150f, 0.0000001f), - D3DXVECTOR3(-0.0000000f, 0.3082130f, -0.0000004f), -}; - -const D3DXVECTOR4 accessor_540[] = { - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), - D3DXVECTOR4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), -}; - -const D3DXVECTOR3 accessor_541[] = { - D3DXVECTOR3( 1.0000001f, 1.0000001f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_542[] = { - D3DXVECTOR3(-0.0000000f, 0.3056078f, -0.0000030f), - D3DXVECTOR3(-0.0000000f, 0.3055982f, -0.0000002f), -}; - -const D3DXVECTOR4 accessor_543[] = { - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), - D3DXVECTOR4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), -}; - -const D3DXVECTOR3 accessor_544[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999998f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_545[] = { - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), - D3DXVECTOR3(-0.0013285f, 0.2559254f, 0.0044682f), -}; - -const D3DXVECTOR4 accessor_546[] = { - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), - D3DXVECTOR4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), -}; - -const D3DXVECTOR3 accessor_547[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100001f), -}; - -const D3DXVECTOR3 accessor_548[] = { - D3DXVECTOR3(-0.0000000f, 0.1024574f, -0.0000004f), - D3DXVECTOR3( 0.0000000f, 0.1024598f, 0.0000012f), -}; - -const D3DXVECTOR4 accessor_549[] = { - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), - D3DXVECTOR4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), -}; - -const D3DXVECTOR3 accessor_550[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_551[] = { - D3DXVECTOR3(-0.0000001f, 0.1026062f, 0.0000004f), - D3DXVECTOR3(-0.0000000f, 0.1026044f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_552[] = { - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), - D3DXVECTOR4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), -}; - -const D3DXVECTOR3 accessor_553[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000001f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), -}; - -const D3DXVECTOR3 accessor_554[] = { - D3DXVECTOR3(-0.0000000f, 0.1033718f, 0.0000001f), - D3DXVECTOR3( 0.0000000f, 0.1033676f, 0.0000003f), -}; - -const D3DXVECTOR4 accessor_555[] = { - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), - D3DXVECTOR4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), -}; - -const D3DXVECTOR3 accessor_556[] = { - D3DXVECTOR3( 1.0000001f, 1.0000001f, 1.0000001f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_557[] = { - D3DXVECTOR3( 0.0000000f, 0.1012763f, -0.0000033f), - D3DXVECTOR3(-0.0000000f, 0.1012811f, -0.0000002f), -}; - -const D3DXVECTOR4 accessor_558[] = { - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), - D3DXVECTOR4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), -}; - -const D3DXVECTOR3 accessor_559[] = { - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_560[] = { - D3DXVECTOR3( 0.0000000f, 0.1024334f, -0.0000007f), - D3DXVECTOR3( 0.0000000f, 0.1024289f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_561[] = { - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), - D3DXVECTOR4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), -}; - -const D3DXVECTOR3 accessor_562[] = { - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_563[] = { - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), - D3DXVECTOR3(-0.0000347f, 0.0679304f, -0.0016926f), -}; - -const D3DXVECTOR4 accessor_564[] = { - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), - D3DXVECTOR4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), -}; - -const D3DXVECTOR3 accessor_565[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_566[] = { - D3DXVECTOR3( 0.0000000f, 0.2498151f, 0.0000002f), - D3DXVECTOR3(-0.0000000f, 0.2498146f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_567[] = { - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), - D3DXVECTOR4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), -}; - -const D3DXVECTOR3 accessor_568[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_569[] = { - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_570[] = { - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_571[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_572[] = { - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_573[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_574[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_575[] = { - D3DXVECTOR3(-0.0020865f, 0.0609496f, -0.0227208f), - D3DXVECTOR3(-0.0020865f, 0.0626438f, -0.0130235f), - D3DXVECTOR3(-0.0020865f, 0.0646131f, 0.0095444f), - D3DXVECTOR3(-0.0020865f, 0.3623514f, 0.0322673f), - D3DXVECTOR3(-0.0020865f, 0.5354798f, 0.0479684f), - D3DXVECTOR3(-0.0020865f, 0.5683661f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.5506080f, 0.0478690f), - D3DXVECTOR3(-0.0020865f, 0.4981608f, 0.0312555f), - D3DXVECTOR3(-0.0020865f, 0.4134374f, 0.0113403f), - D3DXVECTOR3(-0.0020865f, 0.3040031f, -0.0062986f), - D3DXVECTOR3(-0.0020865f, 0.1810994f, -0.0184756f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0605542f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0641042f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0683413f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0718913f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_576[] = { - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_577[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_578[] = { - D3DXVECTOR3( 0.0000000f, 0.0919258f, 0.0000003f), - D3DXVECTOR3(-0.0000000f, 0.0919256f, -0.0000004f), -}; - -const D3DXVECTOR4 accessor_579[] = { - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_580[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_581[] = { - D3DXVECTOR3( 0.0000006f, 0.1196968f, -0.0000000f), - D3DXVECTOR3(-0.0000001f, 0.1196961f, 0.0000000f), -}; - -const D3DXVECTOR4 accessor_582[] = { - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_583[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_584[] = { - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_585[] = { - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_586[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_587[] = { - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), -}; - -const D3DXVECTOR4 accessor_588[] = { - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), - D3DXVECTOR4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), -}; - -const D3DXVECTOR3 accessor_589[] = { - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_590[] = { - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), -}; - -const D3DXVECTOR4 accessor_591[] = { - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), - D3DXVECTOR4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), -}; - -const D3DXVECTOR3 accessor_592[] = { - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), -}; - -const D3DXVECTOR3 accessor_593[] = { - D3DXVECTOR3(-0.0020865f, 0.0609496f, -0.0227208f), - D3DXVECTOR3(-0.0020865f, 0.0626438f, -0.0130235f), - D3DXVECTOR3(-0.0020865f, 0.0646131f, 0.0095444f), - D3DXVECTOR3(-0.0020865f, 0.3623514f, 0.0322673f), - D3DXVECTOR3(-0.0020865f, 0.5354798f, 0.0479684f), - D3DXVECTOR3(-0.0020865f, 0.5683661f, 0.0547394f), - D3DXVECTOR3(-0.0020865f, 0.5506080f, 0.0478690f), - D3DXVECTOR3(-0.0020865f, 0.4981608f, 0.0312555f), - D3DXVECTOR3(-0.0020865f, 0.4134374f, 0.0113403f), - D3DXVECTOR3(-0.0020865f, 0.3040031f, -0.0062986f), - D3DXVECTOR3(-0.0020865f, 0.1810994f, -0.0184756f), - D3DXVECTOR3(-0.0020865f, 0.0590655f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0605542f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0641042f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0683413f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0718913f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), - D3DXVECTOR3(-0.0020865f, 0.0733800f, -0.0044682f), -}; - -const D3DXVECTOR4 accessor_594[] = { - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), - D3DXVECTOR4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), -}; - -const D3DXVECTOR3 accessor_595[] = { - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), - D3DXVECTOR3( 0.0100000f, 0.0100000f, 0.0100000f), -}; - -const D3DXVECTOR3 accessor_596[] = { - D3DXVECTOR3(-0.0000000f, 0.0919256f, 0.0000006f), - D3DXVECTOR3(-0.0000000f, 0.0919253f, 0.0000002f), -}; - -const D3DXVECTOR4 accessor_597[] = { - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), - D3DXVECTOR4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), -}; - -const D3DXVECTOR3 accessor_598[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999996f), -}; - -const D3DXVECTOR3 accessor_599[] = { - D3DXVECTOR3(-0.0000006f, 0.1196973f, -0.0000000f), - D3DXVECTOR3( 0.0000001f, 0.1196966f, 0.0000001f), -}; - -const D3DXVECTOR4 accessor_600[] = { - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), - D3DXVECTOR4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), -}; - -const D3DXVECTOR3 accessor_601[] = { - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 0.9999999f, 0.9999999f), -}; - -const D3DXVECTOR3 accessor_602[] = { - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), -}; - -const D3DXVECTOR4 accessor_603[] = { - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), - D3DXVECTOR4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), -}; - -const D3DXVECTOR3 accessor_604[] = { - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), -}; - -const Mesh mesh_0 = { - accessor_0, // position - accessor_0__size, - accessor_1, // normal - accessor_1__size, - accessor_2, // texcoord_0 - accessor_2__size, - accessor_7, // weights_0 - accessor_7__size, - accessor_6, // joints_0 - accessor_6__size, - accessor_8, // indices - accessor_8__size, -}; - -extern const Skin skin_0; -const Node node_0 = { - 1, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.2498147f, 0.0000000f), // translation - D3DXQUATERNION( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), // rotation - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), // scale -}; - -const Node node_1 = { - 26, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.1015303f, -0.0197356f, -0.0097731f), // translation - D3DXQUATERNION( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000026f), // scale -}; - -const Node node_2 = { - 3, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.3056018f, 0.0000000f), // translation - D3DXQUATERNION( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994639f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), // scale -}; - -const Node node_3 = { - 4, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.3082168f, -0.0000000f), // translation - D3DXQUATERNION(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), // rotation - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), // scale -}; - -const Node node_4 = { - 22, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.2405819f, 0.2553854f, 0.0044682f), // translation - D3DXQUATERNION(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), // rotation - D3DXVECTOR3( 1.0000001f, 0.9999999f, 1.0000046f), // scale -}; - -const Node node_5 = { - 6, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.2488541f, 0.2033314f, -0.0450287f), // translation - D3DXQUATERNION(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_6 = { - 22, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0013047f, 0.3463302f, 0.0044682f), // translation - D3DXQUATERNION(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), // rotation - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), // scale -}; - -const Node node_7 = { - 8, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.1024318f, -0.0000000f), // translation - D3DXQUATERNION( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), // rotation - D3DXVECTOR3( 1.0000001f, 1.0000000f, 1.0000001f), // scale -}; - -const Node node_8 = { - 9, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.1012803f, 0.0000000f), // translation - D3DXQUATERNION( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996853f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), // scale -}; - -const Node node_9 = { - 10, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.1033667f, 0.0000000f), // translation - D3DXQUATERNION( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), // scale -}; - -const Node node_10 = { - 11, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.1026060f, 0.0000000f), // translation - D3DXQUATERNION( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), // rotation - D3DXVECTOR3( 1.0000000f, 0.9999999f, 1.0000000f), // scale -}; - -const Node node_11 = { - 12, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.1024613f, -0.0000000f), // translation - D3DXQUATERNION( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000002f, 1.0000001f), // scale -}; - -const Node node_12 = { - 22, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.2396202f, 0.2553876f, 0.0044682f), // translation - D3DXQUATERNION(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), // rotation - D3DXVECTOR3( 0.9999999f, 1.0000001f, 1.0000131f), // scale -}; - -const Node node_13 = { - 14, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.3056017f, 0.0000000f), // translation - D3DXQUATERNION( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), // scale -}; - -const Node node_14 = { - 15, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.3082169f, 0.0000000f), // translation - D3DXQUATERNION(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), // rotation - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), // scale -}; - -const Node node_15 = { - 22, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.2435993f, 0.2564784f, 0.0044683f), // translation - D3DXQUATERNION(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), // rotation - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000043f), // scale -}; - -const Node node_16 = { - 17, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.1024322f, 0.0000000f), // translation - D3DXQUATERNION( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), // scale -}; - -const Node node_17 = { - 18, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.1012807f, 0.0000000f), // translation - D3DXQUATERNION( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000001f), // scale -}; - -const Node node_18 = { - 19, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.1033668f, 0.0000000f), // translation - D3DXQUATERNION( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), // rotation - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), // scale -}; - -const Node node_19 = { - 20, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.1026065f, -0.0000000f), // translation - D3DXQUATERNION( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), // rotation - D3DXVECTOR3( 1.0000000f, 0.9999999f, 0.9999999f), // scale -}; - -const Node node_20 = { - 21, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.1024597f, -0.0000000f), // translation - D3DXQUATERNION( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000001f, 1.0000000f), // scale -}; - -const Node node_21 = { - 22, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.2426377f, 0.2564762f, 0.0044682f), // translation - D3DXQUATERNION(-0.0000471f, 0.0425161f, -0.9990951f, 0.0011277f), // rotation - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000130f), // scale -}; - -const Node node_22 = { - 23, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.1116755f, 0.0000000f), // translation - D3DXQUATERNION( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), // rotation - D3DXVECTOR3( 0.9999999f, 0.9999999f, 1.0000000f), // scale -}; - -const Node node_23 = { - 26, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.1347095f, 0.0000000f), // translation - D3DXQUATERNION( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_24 = { - 25, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.2498148f, -0.0000000f), // translation - D3DXQUATERNION( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_25 = { - 26, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.1057032f, -0.0197356f, -0.0097731f), // translation - D3DXQUATERNION( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), // rotation - D3DXVECTOR3( 0.9999999f, 1.0000000f, 1.0000025f), // scale -}; - -const Node node_26 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0020865f, 0.6493472f, -0.0044682f), // translation - D3DXQUATERNION(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_27 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.2411295f, 0.5391477f, -0.0000001f), // translation - D3DXQUATERNION(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), // rotation - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), // scale -}; - -const Node node_28 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.2411295f, 0.8440942f, -0.0870393f), // translation - D3DXQUATERNION(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_29 = { - 30, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.1196966f, 0.0000000f), // translation - D3DXQUATERNION( 0.0162064f, 0.0000001f, 0.3402117f, 0.9402092f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_30 = { - 31, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.0919261f, 0.0000000f), // translation - D3DXQUATERNION(-0.6492796f, -0.2451639f, 0.6809445f, 0.2337631f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999996f), // scale -}; - -const Node node_31 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.1054246f, 0.0000000f, -0.0106811f), // translation - D3DXQUATERNION( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), // scale -}; - -const Node node_32 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.1026551f, 0.3802050f, 0.2318209f), // translation - D3DXQUATERNION( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), // rotation - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), // scale -}; - -const Node node_33 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.2411295f, 0.5391477f, -0.0000001f), // translation - D3DXQUATERNION(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), // rotation - D3DXVECTOR3( 0.9999999f, 0.9999998f, 0.9999999f), // scale -}; - -const Node node_34 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.2411295f, 0.8440942f, -0.0870393f), // translation - D3DXQUATERNION(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_35 = { - 36, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3(-0.0000000f, 0.1196966f, 0.0000000f), // translation - D3DXQUATERNION( 0.0162064f, -0.0000001f, -0.3402117f, 0.9402092f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_36 = { - 37, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.0919261f, 0.0000000f), // translation - D3DXQUATERNION(-0.6492796f, 0.2451639f, -0.6809445f, 0.2337631f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999996f), // scale -}; - -const Node node_37 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.1054246f, 0.0000000f, -0.0106811f), // translation - D3DXQUATERNION( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 0.9999999f), // scale -}; - -const Node node_38 = { - 40, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.1026551f, 0.3802050f, 0.2318209f), // translation - D3DXQUATERNION( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), // rotation - D3DXVECTOR3( 0.9999999f, 1.0000000f, 0.9999999f), // scale -}; - -const Node node_39 = { - 40, // parent_ix - &skin_0, // skin - &mesh_0, // mesh - D3DXVECTOR3( 0.0000000f, 0.0000000f, 0.0000000f), // translation - D3DXQUATERNION( 0.0000000f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node node_40 = { - (DWORD)-1, // parent_ix - NULL, // skin - NULL, // mesh - D3DXVECTOR3( 0.0000000f, 0.0000000f, 0.0000000f), // translation - D3DXQUATERNION( 0.0000000f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation - D3DXVECTOR3( 1.0000000f, 1.0000000f, 1.0000000f), // scale -}; - -const Node * nodes[] = { - &node_0, - &node_1, - &node_2, - &node_3, - &node_4, - &node_5, - &node_6, - &node_7, - &node_8, - &node_9, - &node_10, - &node_11, - &node_12, - &node_13, - &node_14, - &node_15, - &node_16, - &node_17, - &node_18, - &node_19, - &node_20, - &node_21, - &node_22, - &node_23, - &node_24, - &node_25, - &node_26, - &node_27, - &node_28, - &node_29, - &node_30, - &node_31, - &node_32, - &node_33, - &node_34, - &node_35, - &node_36, - &node_37, - &node_38, - &node_39, - &node_40, -}; - -const int skin_0__joints[] = { - 26, - 1, - 0, - 23, - 22, - 4, - 3, - 2, - 6, - 5, - 12, - 11, - 10, - 9, - 8, - 7, - 15, - 14, - 13, - 21, - 20, - 19, - 18, - 17, - 16, - 25, - 24, - 27, - 28, - 31, - 30, - 29, - 32, - 33, - 34, - 37, - 36, - 35, - 38, -}; - -const Skin skin_0 = { - accessor_9, // inverse bind matrices - skin_0__joints, // joints - 39, // joints length -}; - -const AnimationSampler animation_0__sampler_0 = { - accessor_10, // input, keyframe timestamps - accessor_11, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_1 = { - accessor_10, // input, keyframe timestamps - accessor_12, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_2 = { - accessor_10, // input, keyframe timestamps - accessor_13, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_3 = { - accessor_14, // input, keyframe timestamps - accessor_15, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_4 = { - accessor_14, // input, keyframe timestamps - accessor_16, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_5 = { - accessor_14, // input, keyframe timestamps - accessor_17, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_6 = { - accessor_14, // input, keyframe timestamps - accessor_18, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_7 = { - accessor_14, // input, keyframe timestamps - accessor_19, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_8 = { - accessor_14, // input, keyframe timestamps - accessor_20, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_9 = { - accessor_10, // input, keyframe timestamps - accessor_21, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_10 = { - accessor_10, // input, keyframe timestamps - accessor_22, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_11 = { - accessor_14, // input, keyframe timestamps - accessor_23, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_12 = { - accessor_10, // input, keyframe timestamps - accessor_24, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_13 = { - accessor_10, // input, keyframe timestamps - accessor_25, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_14 = { - accessor_14, // input, keyframe timestamps - accessor_26, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_15 = { - accessor_14, // input, keyframe timestamps - accessor_27, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_16 = { - accessor_14, // input, keyframe timestamps - accessor_28, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_17 = { - accessor_14, // input, keyframe timestamps - accessor_29, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_18 = { - accessor_14, // input, keyframe timestamps - accessor_30, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_19 = { - accessor_14, // input, keyframe timestamps - accessor_31, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_20 = { - accessor_14, // input, keyframe timestamps - accessor_32, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_21 = { - accessor_14, // input, keyframe timestamps - accessor_33, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_22 = { - accessor_14, // input, keyframe timestamps - accessor_34, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_23 = { - accessor_14, // input, keyframe timestamps - accessor_35, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_24 = { - accessor_10, // input, keyframe timestamps - accessor_36, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_25 = { - accessor_10, // input, keyframe timestamps - accessor_37, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_26 = { - accessor_14, // input, keyframe timestamps - accessor_38, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_27 = { - accessor_14, // input, keyframe timestamps - accessor_39, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_28 = { - accessor_14, // input, keyframe timestamps - accessor_40, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_29 = { - accessor_14, // input, keyframe timestamps - accessor_41, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_30 = { - accessor_14, // input, keyframe timestamps - accessor_42, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_31 = { - accessor_14, // input, keyframe timestamps - accessor_43, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_32 = { - accessor_14, // input, keyframe timestamps - accessor_44, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_33 = { - accessor_14, // input, keyframe timestamps - accessor_45, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_34 = { - accessor_14, // input, keyframe timestamps - accessor_46, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_35 = { - accessor_14, // input, keyframe timestamps - accessor_47, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_36 = { - accessor_14, // input, keyframe timestamps - accessor_48, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_37 = { - accessor_14, // input, keyframe timestamps - accessor_49, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_38 = { - accessor_14, // input, keyframe timestamps - accessor_50, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_39 = { - accessor_14, // input, keyframe timestamps - accessor_51, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_40 = { - accessor_14, // input, keyframe timestamps - accessor_52, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_41 = { - accessor_14, // input, keyframe timestamps - accessor_53, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_42 = { - accessor_14, // input, keyframe timestamps - accessor_54, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_43 = { - accessor_14, // input, keyframe timestamps - accessor_55, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_44 = { - accessor_14, // input, keyframe timestamps - accessor_56, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_45 = { - accessor_14, // input, keyframe timestamps - accessor_57, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_46 = { - accessor_14, // input, keyframe timestamps - accessor_58, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_47 = { - accessor_14, // input, keyframe timestamps - accessor_59, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_48 = { - accessor_14, // input, keyframe timestamps - accessor_60, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_49 = { - accessor_14, // input, keyframe timestamps - accessor_61, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_50 = { - accessor_14, // input, keyframe timestamps - accessor_62, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_51 = { - accessor_14, // input, keyframe timestamps - accessor_63, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_52 = { - accessor_14, // input, keyframe timestamps - accessor_64, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_53 = { - accessor_14, // input, keyframe timestamps - accessor_65, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_54 = { - accessor_14, // input, keyframe timestamps - accessor_66, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_55 = { - accessor_14, // input, keyframe timestamps - accessor_67, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_56 = { - accessor_14, // input, keyframe timestamps - accessor_68, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_57 = { - accessor_14, // input, keyframe timestamps - accessor_69, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_58 = { - accessor_14, // input, keyframe timestamps - accessor_70, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_59 = { - accessor_14, // input, keyframe timestamps - accessor_71, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_60 = { - accessor_14, // input, keyframe timestamps - accessor_72, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_61 = { - accessor_14, // input, keyframe timestamps - accessor_73, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_62 = { - accessor_14, // input, keyframe timestamps - accessor_74, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_63 = { - accessor_14, // input, keyframe timestamps - accessor_75, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_64 = { - accessor_14, // input, keyframe timestamps - accessor_76, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_65 = { - accessor_14, // input, keyframe timestamps - accessor_77, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_66 = { - accessor_14, // input, keyframe timestamps - accessor_78, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_67 = { - accessor_14, // input, keyframe timestamps - accessor_79, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_68 = { - accessor_14, // input, keyframe timestamps - accessor_80, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_69 = { - accessor_14, // input, keyframe timestamps - accessor_81, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_70 = { - accessor_14, // input, keyframe timestamps - accessor_82, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_71 = { - accessor_14, // input, keyframe timestamps - accessor_83, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_72 = { - accessor_14, // input, keyframe timestamps - accessor_84, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_73 = { - accessor_14, // input, keyframe timestamps - accessor_85, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_74 = { - accessor_14, // input, keyframe timestamps - accessor_86, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_75 = { - accessor_14, // input, keyframe timestamps - accessor_87, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_76 = { - accessor_14, // input, keyframe timestamps - accessor_88, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_77 = { - accessor_14, // input, keyframe timestamps - accessor_89, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_78 = { - accessor_14, // input, keyframe timestamps - accessor_90, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_79 = { - accessor_14, // input, keyframe timestamps - accessor_91, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_80 = { - accessor_14, // input, keyframe timestamps - accessor_92, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_81 = { - accessor_14, // input, keyframe timestamps - accessor_93, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_82 = { - accessor_14, // input, keyframe timestamps - accessor_94, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_83 = { - accessor_14, // input, keyframe timestamps - accessor_95, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_84 = { - accessor_14, // input, keyframe timestamps - accessor_96, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_85 = { - accessor_14, // input, keyframe timestamps - accessor_97, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_86 = { - accessor_14, // input, keyframe timestamps - accessor_98, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_87 = { - accessor_10, // input, keyframe timestamps - accessor_99, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_88 = { - accessor_14, // input, keyframe timestamps - accessor_100, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_89 = { - accessor_14, // input, keyframe timestamps - accessor_101, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_90 = { - accessor_14, // input, keyframe timestamps - accessor_102, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_91 = { - accessor_14, // input, keyframe timestamps - accessor_103, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_92 = { - accessor_14, // input, keyframe timestamps - accessor_104, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_93 = { - accessor_14, // input, keyframe timestamps - accessor_105, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_94 = { - accessor_14, // input, keyframe timestamps - accessor_106, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_95 = { - accessor_14, // input, keyframe timestamps - accessor_107, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_96 = { - accessor_14, // input, keyframe timestamps - accessor_108, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_97 = { - accessor_14, // input, keyframe timestamps - accessor_109, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_98 = { - accessor_14, // input, keyframe timestamps - accessor_110, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_99 = { - accessor_14, // input, keyframe timestamps - accessor_111, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_100 = { - accessor_14, // input, keyframe timestamps - accessor_112, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_101 = { - accessor_14, // input, keyframe timestamps - accessor_113, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_102 = { - accessor_14, // input, keyframe timestamps - accessor_114, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_103 = { - accessor_14, // input, keyframe timestamps - accessor_115, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_104 = { - accessor_14, // input, keyframe timestamps - accessor_116, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_105 = { - accessor_10, // input, keyframe timestamps - accessor_117, // output, keyframe values (void *) - accessor_10__length, // length -}; - -const AnimationSampler animation_0__sampler_106 = { - accessor_14, // input, keyframe timestamps - accessor_118, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_107 = { - accessor_14, // input, keyframe timestamps - accessor_119, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_108 = { - accessor_14, // input, keyframe timestamps - accessor_120, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_109 = { - accessor_14, // input, keyframe timestamps - accessor_121, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_110 = { - accessor_14, // input, keyframe timestamps - accessor_122, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_111 = { - accessor_14, // input, keyframe timestamps - accessor_123, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_112 = { - accessor_14, // input, keyframe timestamps - accessor_124, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_113 = { - accessor_14, // input, keyframe timestamps - accessor_125, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_114 = { - accessor_14, // input, keyframe timestamps - accessor_126, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_115 = { - accessor_14, // input, keyframe timestamps - accessor_127, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationSampler animation_0__sampler_116 = { - accessor_14, // input, keyframe timestamps - accessor_128, // output, keyframe values (void *) - accessor_14__length, // length -}; - -const AnimationChannel animation_0__channels[] = { - &animation_0__sampler_0, // animation sampler - { - 26, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_1, // animation sampler - { - 26, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_2, // animation sampler - { - 26, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_3, // animation sampler - { - 1, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_4, // animation sampler - { - 1, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_5, // animation sampler - { - 1, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_6, // animation sampler - { - 0, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_7, // animation sampler - { - 0, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_8, // animation sampler - { - 0, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_9, // animation sampler - { - 23, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_10, // animation sampler - { - 23, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_11, // animation sampler - { - 23, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_12, // animation sampler - { - 22, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_13, // animation sampler - { - 22, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_14, // animation sampler - { - 22, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_15, // animation sampler - { - 4, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_16, // animation sampler - { - 4, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_17, // animation sampler - { - 4, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_18, // animation sampler - { - 3, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_19, // animation sampler - { - 3, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_20, // animation sampler - { - 3, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_21, // animation sampler - { - 2, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_22, // animation sampler - { - 2, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_23, // animation sampler - { - 2, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_24, // animation sampler - { - 6, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_25, // animation sampler - { - 6, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_26, // animation sampler - { - 6, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_27, // animation sampler - { - 5, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_28, // animation sampler - { - 5, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_29, // animation sampler - { - 5, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_30, // animation sampler - { - 12, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_31, // animation sampler - { - 12, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_32, // animation sampler - { - 12, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_33, // animation sampler - { - 11, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_34, // animation sampler - { - 11, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_35, // animation sampler - { - 11, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_36, // animation sampler - { - 10, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_37, // animation sampler - { - 10, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_38, // animation sampler - { - 10, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_39, // animation sampler - { - 9, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_40, // animation sampler - { - 9, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_41, // animation sampler - { - 9, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_42, // animation sampler - { - 8, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_43, // animation sampler - { - 8, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_44, // animation sampler - { - 8, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_45, // animation sampler - { - 7, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_46, // animation sampler - { - 7, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_47, // animation sampler - { - 7, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_48, // animation sampler - { - 15, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_49, // animation sampler - { - 15, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_50, // animation sampler - { - 15, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_51, // animation sampler - { - 14, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_52, // animation sampler - { - 14, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_53, // animation sampler - { - 14, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_54, // animation sampler - { - 13, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_55, // animation sampler - { - 13, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_56, // animation sampler - { - 13, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_57, // animation sampler - { - 21, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_58, // animation sampler - { - 21, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_59, // animation sampler - { - 21, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_60, // animation sampler - { - 20, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_61, // animation sampler - { - 20, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_62, // animation sampler - { - 20, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_63, // animation sampler - { - 19, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_64, // animation sampler - { - 19, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_65, // animation sampler - { - 19, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_66, // animation sampler - { - 18, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_67, // animation sampler - { - 18, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_68, // animation sampler - { - 18, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_69, // animation sampler - { - 17, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_70, // animation sampler - { - 17, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_71, // animation sampler - { - 17, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_72, // animation sampler - { - 16, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_73, // animation sampler - { - 16, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_74, // animation sampler - { - 16, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_75, // animation sampler - { - 25, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_76, // animation sampler - { - 25, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_77, // animation sampler - { - 25, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_78, // animation sampler - { - 24, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_79, // animation sampler - { - 24, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_80, // animation sampler - { - 24, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_81, // animation sampler - { - 27, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_82, // animation sampler - { - 27, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_83, // animation sampler - { - 27, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_84, // animation sampler - { - 28, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_85, // animation sampler - { - 28, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_86, // animation sampler - { - 28, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_87, // animation sampler - { - 31, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_88, // animation sampler - { - 31, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_89, // animation sampler - { - 31, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_90, // animation sampler - { - 30, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_91, // animation sampler - { - 30, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_92, // animation sampler - { - 30, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_93, // animation sampler - { - 29, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_94, // animation sampler - { - 29, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_95, // animation sampler - { - 29, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_96, // animation sampler - { - 32, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_97, // animation sampler - { - 32, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_98, // animation sampler - { - 32, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_99, // animation sampler - { - 33, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_100, // animation sampler - { - 33, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_101, // animation sampler - { - 33, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_102, // animation sampler - { - 34, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_103, // animation sampler - { - 34, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_104, // animation sampler - { - 34, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_105, // animation sampler - { - 37, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_106, // animation sampler - { - 37, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_107, // animation sampler - { - 37, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_108, // animation sampler - { - 36, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_109, // animation sampler - { - 36, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_110, // animation sampler - { - 36, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_111, // animation sampler - { - 35, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_112, // animation sampler - { - 35, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_113, // animation sampler - { - 35, // target node index - ACP__SCALE, // target path - }, - &animation_0__sampler_114, // animation sampler - { - 38, // target node index - ACP__TRANSLATION, // target path - }, - &animation_0__sampler_115, // animation sampler - { - 38, // target node index - ACP__ROTATION, // target path - }, - &animation_0__sampler_116, // animation sampler - { - 38, // target node index - ACP__SCALE, // target path - }, -}; - -const AnimationSampler animation_1__sampler_0 = { - accessor_129, // input, keyframe timestamps - accessor_130, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_1 = { - accessor_129, // input, keyframe timestamps - accessor_131, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_2 = { - accessor_132, // input, keyframe timestamps - accessor_133, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_3 = { - accessor_132, // input, keyframe timestamps - accessor_134, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_4 = { - accessor_132, // input, keyframe timestamps - accessor_135, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_5 = { - accessor_132, // input, keyframe timestamps - accessor_136, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_6 = { - accessor_132, // input, keyframe timestamps - accessor_137, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_7 = { - accessor_132, // input, keyframe timestamps - accessor_138, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_8 = { - accessor_132, // input, keyframe timestamps - accessor_139, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_9 = { - accessor_129, // input, keyframe timestamps - accessor_140, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_10 = { - accessor_129, // input, keyframe timestamps - accessor_141, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_11 = { - accessor_132, // input, keyframe timestamps - accessor_142, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_12 = { - accessor_129, // input, keyframe timestamps - accessor_143, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_13 = { - accessor_129, // input, keyframe timestamps - accessor_144, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_14 = { - accessor_129, // input, keyframe timestamps - accessor_145, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_15 = { - accessor_132, // input, keyframe timestamps - accessor_146, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_16 = { - accessor_132, // input, keyframe timestamps - accessor_147, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_17 = { - accessor_132, // input, keyframe timestamps - accessor_148, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_18 = { - accessor_132, // input, keyframe timestamps - accessor_149, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_19 = { - accessor_132, // input, keyframe timestamps - accessor_150, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_20 = { - accessor_132, // input, keyframe timestamps - accessor_151, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_21 = { - accessor_132, // input, keyframe timestamps - accessor_152, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_22 = { - accessor_132, // input, keyframe timestamps - accessor_153, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_23 = { - accessor_132, // input, keyframe timestamps - accessor_154, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_24 = { - accessor_129, // input, keyframe timestamps - accessor_155, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_25 = { - accessor_129, // input, keyframe timestamps - accessor_156, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_26 = { - accessor_132, // input, keyframe timestamps - accessor_157, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_27 = { - accessor_132, // input, keyframe timestamps - accessor_158, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_28 = { - accessor_132, // input, keyframe timestamps - accessor_159, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_29 = { - accessor_132, // input, keyframe timestamps - accessor_160, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_30 = { - accessor_132, // input, keyframe timestamps - accessor_161, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_31 = { - accessor_132, // input, keyframe timestamps - accessor_162, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_32 = { - accessor_132, // input, keyframe timestamps - accessor_163, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_33 = { - accessor_132, // input, keyframe timestamps - accessor_164, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_34 = { - accessor_132, // input, keyframe timestamps - accessor_165, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_35 = { - accessor_132, // input, keyframe timestamps - accessor_166, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_36 = { - accessor_132, // input, keyframe timestamps - accessor_167, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_37 = { - accessor_132, // input, keyframe timestamps - accessor_168, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_38 = { - accessor_132, // input, keyframe timestamps - accessor_169, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_39 = { - accessor_132, // input, keyframe timestamps - accessor_170, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_40 = { - accessor_132, // input, keyframe timestamps - accessor_171, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_41 = { - accessor_132, // input, keyframe timestamps - accessor_172, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_42 = { - accessor_132, // input, keyframe timestamps - accessor_173, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_43 = { - accessor_132, // input, keyframe timestamps - accessor_174, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_44 = { - accessor_132, // input, keyframe timestamps - accessor_175, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_45 = { - accessor_132, // input, keyframe timestamps - accessor_176, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_46 = { - accessor_132, // input, keyframe timestamps - accessor_177, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_47 = { - accessor_132, // input, keyframe timestamps - accessor_178, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_48 = { - accessor_132, // input, keyframe timestamps - accessor_179, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_49 = { - accessor_132, // input, keyframe timestamps - accessor_180, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_50 = { - accessor_132, // input, keyframe timestamps - accessor_181, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_51 = { - accessor_132, // input, keyframe timestamps - accessor_182, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_52 = { - accessor_132, // input, keyframe timestamps - accessor_183, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_53 = { - accessor_132, // input, keyframe timestamps - accessor_184, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_54 = { - accessor_132, // input, keyframe timestamps - accessor_185, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_55 = { - accessor_132, // input, keyframe timestamps - accessor_186, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_56 = { - accessor_132, // input, keyframe timestamps - accessor_187, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_57 = { - accessor_132, // input, keyframe timestamps - accessor_188, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_58 = { - accessor_132, // input, keyframe timestamps - accessor_189, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_59 = { - accessor_132, // input, keyframe timestamps - accessor_190, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_60 = { - accessor_132, // input, keyframe timestamps - accessor_191, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_61 = { - accessor_132, // input, keyframe timestamps - accessor_192, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_62 = { - accessor_132, // input, keyframe timestamps - accessor_193, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_63 = { - accessor_132, // input, keyframe timestamps - accessor_194, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_64 = { - accessor_132, // input, keyframe timestamps - accessor_195, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_65 = { - accessor_132, // input, keyframe timestamps - accessor_196, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_66 = { - accessor_132, // input, keyframe timestamps - accessor_197, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_67 = { - accessor_132, // input, keyframe timestamps - accessor_198, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_68 = { - accessor_132, // input, keyframe timestamps - accessor_199, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_69 = { - accessor_132, // input, keyframe timestamps - accessor_200, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_70 = { - accessor_132, // input, keyframe timestamps - accessor_201, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_71 = { - accessor_132, // input, keyframe timestamps - accessor_202, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_72 = { - accessor_132, // input, keyframe timestamps - accessor_203, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_73 = { - accessor_132, // input, keyframe timestamps - accessor_204, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_74 = { - accessor_132, // input, keyframe timestamps - accessor_205, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_75 = { - accessor_132, // input, keyframe timestamps - accessor_206, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_76 = { - accessor_132, // input, keyframe timestamps - accessor_207, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_77 = { - accessor_132, // input, keyframe timestamps - accessor_208, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_78 = { - accessor_132, // input, keyframe timestamps - accessor_209, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_79 = { - accessor_132, // input, keyframe timestamps - accessor_210, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_80 = { - accessor_132, // input, keyframe timestamps - accessor_211, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_81 = { - accessor_132, // input, keyframe timestamps - accessor_212, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_82 = { - accessor_132, // input, keyframe timestamps - accessor_213, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_83 = { - accessor_132, // input, keyframe timestamps - accessor_214, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_84 = { - accessor_132, // input, keyframe timestamps - accessor_215, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_85 = { - accessor_132, // input, keyframe timestamps - accessor_216, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_86 = { - accessor_132, // input, keyframe timestamps - accessor_217, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_87 = { - accessor_129, // input, keyframe timestamps - accessor_218, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_88 = { - accessor_132, // input, keyframe timestamps - accessor_219, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_89 = { - accessor_132, // input, keyframe timestamps - accessor_220, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_90 = { - accessor_132, // input, keyframe timestamps - accessor_221, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_91 = { - accessor_132, // input, keyframe timestamps - accessor_222, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_92 = { - accessor_132, // input, keyframe timestamps - accessor_223, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_93 = { - accessor_132, // input, keyframe timestamps - accessor_224, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_94 = { - accessor_132, // input, keyframe timestamps - accessor_225, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_95 = { - accessor_132, // input, keyframe timestamps - accessor_226, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_96 = { - accessor_132, // input, keyframe timestamps - accessor_227, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_97 = { - accessor_132, // input, keyframe timestamps - accessor_228, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_98 = { - accessor_132, // input, keyframe timestamps - accessor_229, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_99 = { - accessor_132, // input, keyframe timestamps - accessor_230, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_100 = { - accessor_132, // input, keyframe timestamps - accessor_231, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_101 = { - accessor_132, // input, keyframe timestamps - accessor_232, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_102 = { - accessor_132, // input, keyframe timestamps - accessor_233, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_103 = { - accessor_132, // input, keyframe timestamps - accessor_234, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_104 = { - accessor_132, // input, keyframe timestamps - accessor_235, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_105 = { - accessor_129, // input, keyframe timestamps - accessor_236, // output, keyframe values (void *) - accessor_129__length, // length -}; - -const AnimationSampler animation_1__sampler_106 = { - accessor_132, // input, keyframe timestamps - accessor_237, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_107 = { - accessor_132, // input, keyframe timestamps - accessor_238, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_108 = { - accessor_132, // input, keyframe timestamps - accessor_239, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_109 = { - accessor_132, // input, keyframe timestamps - accessor_240, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_110 = { - accessor_132, // input, keyframe timestamps - accessor_241, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_111 = { - accessor_132, // input, keyframe timestamps - accessor_242, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_112 = { - accessor_132, // input, keyframe timestamps - accessor_243, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_113 = { - accessor_132, // input, keyframe timestamps - accessor_244, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_114 = { - accessor_132, // input, keyframe timestamps - accessor_245, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_115 = { - accessor_132, // input, keyframe timestamps - accessor_246, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationSampler animation_1__sampler_116 = { - accessor_132, // input, keyframe timestamps - accessor_247, // output, keyframe values (void *) - accessor_132__length, // length -}; - -const AnimationChannel animation_1__channels[] = { - &animation_1__sampler_0, // animation sampler - { - 26, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_1, // animation sampler - { - 26, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_2, // animation sampler - { - 26, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_3, // animation sampler - { - 1, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_4, // animation sampler - { - 1, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_5, // animation sampler - { - 1, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_6, // animation sampler - { - 0, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_7, // animation sampler - { - 0, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_8, // animation sampler - { - 0, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_9, // animation sampler - { - 23, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_10, // animation sampler - { - 23, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_11, // animation sampler - { - 23, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_12, // animation sampler - { - 22, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_13, // animation sampler - { - 22, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_14, // animation sampler - { - 22, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_15, // animation sampler - { - 4, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_16, // animation sampler - { - 4, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_17, // animation sampler - { - 4, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_18, // animation sampler - { - 3, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_19, // animation sampler - { - 3, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_20, // animation sampler - { - 3, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_21, // animation sampler - { - 2, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_22, // animation sampler - { - 2, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_23, // animation sampler - { - 2, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_24, // animation sampler - { - 6, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_25, // animation sampler - { - 6, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_26, // animation sampler - { - 6, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_27, // animation sampler - { - 5, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_28, // animation sampler - { - 5, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_29, // animation sampler - { - 5, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_30, // animation sampler - { - 12, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_31, // animation sampler - { - 12, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_32, // animation sampler - { - 12, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_33, // animation sampler - { - 11, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_34, // animation sampler - { - 11, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_35, // animation sampler - { - 11, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_36, // animation sampler - { - 10, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_37, // animation sampler - { - 10, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_38, // animation sampler - { - 10, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_39, // animation sampler - { - 9, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_40, // animation sampler - { - 9, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_41, // animation sampler - { - 9, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_42, // animation sampler - { - 8, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_43, // animation sampler - { - 8, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_44, // animation sampler - { - 8, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_45, // animation sampler - { - 7, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_46, // animation sampler - { - 7, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_47, // animation sampler - { - 7, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_48, // animation sampler - { - 15, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_49, // animation sampler - { - 15, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_50, // animation sampler - { - 15, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_51, // animation sampler - { - 14, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_52, // animation sampler - { - 14, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_53, // animation sampler - { - 14, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_54, // animation sampler - { - 13, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_55, // animation sampler - { - 13, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_56, // animation sampler - { - 13, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_57, // animation sampler - { - 21, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_58, // animation sampler - { - 21, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_59, // animation sampler - { - 21, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_60, // animation sampler - { - 20, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_61, // animation sampler - { - 20, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_62, // animation sampler - { - 20, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_63, // animation sampler - { - 19, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_64, // animation sampler - { - 19, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_65, // animation sampler - { - 19, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_66, // animation sampler - { - 18, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_67, // animation sampler - { - 18, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_68, // animation sampler - { - 18, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_69, // animation sampler - { - 17, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_70, // animation sampler - { - 17, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_71, // animation sampler - { - 17, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_72, // animation sampler - { - 16, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_73, // animation sampler - { - 16, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_74, // animation sampler - { - 16, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_75, // animation sampler - { - 25, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_76, // animation sampler - { - 25, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_77, // animation sampler - { - 25, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_78, // animation sampler - { - 24, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_79, // animation sampler - { - 24, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_80, // animation sampler - { - 24, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_81, // animation sampler - { - 27, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_82, // animation sampler - { - 27, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_83, // animation sampler - { - 27, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_84, // animation sampler - { - 28, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_85, // animation sampler - { - 28, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_86, // animation sampler - { - 28, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_87, // animation sampler - { - 31, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_88, // animation sampler - { - 31, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_89, // animation sampler - { - 31, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_90, // animation sampler - { - 30, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_91, // animation sampler - { - 30, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_92, // animation sampler - { - 30, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_93, // animation sampler - { - 29, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_94, // animation sampler - { - 29, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_95, // animation sampler - { - 29, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_96, // animation sampler - { - 32, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_97, // animation sampler - { - 32, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_98, // animation sampler - { - 32, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_99, // animation sampler - { - 33, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_100, // animation sampler - { - 33, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_101, // animation sampler - { - 33, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_102, // animation sampler - { - 34, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_103, // animation sampler - { - 34, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_104, // animation sampler - { - 34, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_105, // animation sampler - { - 37, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_106, // animation sampler - { - 37, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_107, // animation sampler - { - 37, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_108, // animation sampler - { - 36, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_109, // animation sampler - { - 36, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_110, // animation sampler - { - 36, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_111, // animation sampler - { - 35, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_112, // animation sampler - { - 35, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_113, // animation sampler - { - 35, // target node index - ACP__SCALE, // target path - }, - &animation_1__sampler_114, // animation sampler - { - 38, // target node index - ACP__TRANSLATION, // target path - }, - &animation_1__sampler_115, // animation sampler - { - 38, // target node index - ACP__ROTATION, // target path - }, - &animation_1__sampler_116, // animation sampler - { - 38, // target node index - ACP__SCALE, // target path - }, -}; - -const AnimationSampler animation_2__sampler_0 = { - accessor_248, // input, keyframe timestamps - accessor_249, // output, keyframe values (void *) - accessor_248__length, // length -}; - -const AnimationSampler animation_2__sampler_1 = { - accessor_248, // input, keyframe timestamps - accessor_250, // output, keyframe values (void *) - accessor_248__length, // length -}; - -const AnimationSampler animation_2__sampler_2 = { - accessor_251, // input, keyframe timestamps - accessor_252, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_3 = { - accessor_251, // input, keyframe timestamps - accessor_253, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_4 = { - accessor_251, // input, keyframe timestamps - accessor_254, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_5 = { - accessor_251, // input, keyframe timestamps - accessor_255, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_6 = { - accessor_251, // input, keyframe timestamps - accessor_256, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_7 = { - accessor_251, // input, keyframe timestamps - accessor_257, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_8 = { - accessor_251, // input, keyframe timestamps - accessor_258, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_9 = { - accessor_251, // input, keyframe timestamps - accessor_259, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_10 = { - accessor_248, // input, keyframe timestamps - accessor_260, // output, keyframe values (void *) - accessor_248__length, // length -}; - -const AnimationSampler animation_2__sampler_11 = { - accessor_251, // input, keyframe timestamps - accessor_261, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_12 = { - accessor_251, // input, keyframe timestamps - accessor_262, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_13 = { - accessor_248, // input, keyframe timestamps - accessor_263, // output, keyframe values (void *) - accessor_248__length, // length -}; - -const AnimationSampler animation_2__sampler_14 = { - accessor_251, // input, keyframe timestamps - accessor_264, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_15 = { - accessor_251, // input, keyframe timestamps - accessor_265, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_16 = { - accessor_251, // input, keyframe timestamps - accessor_266, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_17 = { - accessor_251, // input, keyframe timestamps - accessor_267, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_18 = { - accessor_251, // input, keyframe timestamps - accessor_268, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_19 = { - accessor_251, // input, keyframe timestamps - accessor_269, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_20 = { - accessor_251, // input, keyframe timestamps - accessor_270, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_21 = { - accessor_251, // input, keyframe timestamps - accessor_271, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_22 = { - accessor_251, // input, keyframe timestamps - accessor_272, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_23 = { - accessor_251, // input, keyframe timestamps - accessor_273, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_24 = { - accessor_251, // input, keyframe timestamps - accessor_274, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_25 = { - accessor_248, // input, keyframe timestamps - accessor_275, // output, keyframe values (void *) - accessor_248__length, // length -}; - -const AnimationSampler animation_2__sampler_26 = { - accessor_248, // input, keyframe timestamps - accessor_276, // output, keyframe values (void *) - accessor_248__length, // length -}; - -const AnimationSampler animation_2__sampler_27 = { - accessor_251, // input, keyframe timestamps - accessor_277, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_28 = { - accessor_251, // input, keyframe timestamps - accessor_278, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_29 = { - accessor_251, // input, keyframe timestamps - accessor_279, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_30 = { - accessor_251, // input, keyframe timestamps - accessor_280, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_31 = { - accessor_251, // input, keyframe timestamps - accessor_281, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_32 = { - accessor_251, // input, keyframe timestamps - accessor_282, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_33 = { - accessor_251, // input, keyframe timestamps - accessor_283, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_34 = { - accessor_251, // input, keyframe timestamps - accessor_284, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_35 = { - accessor_251, // input, keyframe timestamps - accessor_285, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_36 = { - accessor_251, // input, keyframe timestamps - accessor_286, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_37 = { - accessor_251, // input, keyframe timestamps - accessor_287, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_38 = { - accessor_251, // input, keyframe timestamps - accessor_288, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_39 = { - accessor_251, // input, keyframe timestamps - accessor_289, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_40 = { - accessor_251, // input, keyframe timestamps - accessor_290, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_41 = { - accessor_251, // input, keyframe timestamps - accessor_291, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_42 = { - accessor_251, // input, keyframe timestamps - accessor_292, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_43 = { - accessor_251, // input, keyframe timestamps - accessor_293, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_44 = { - accessor_251, // input, keyframe timestamps - accessor_294, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_45 = { - accessor_251, // input, keyframe timestamps - accessor_295, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_46 = { - accessor_251, // input, keyframe timestamps - accessor_296, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_47 = { - accessor_251, // input, keyframe timestamps - accessor_297, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_48 = { - accessor_251, // input, keyframe timestamps - accessor_298, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_49 = { - accessor_251, // input, keyframe timestamps - accessor_299, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_50 = { - accessor_251, // input, keyframe timestamps - accessor_300, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_51 = { - accessor_251, // input, keyframe timestamps - accessor_301, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_52 = { - accessor_251, // input, keyframe timestamps - accessor_302, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_53 = { - accessor_251, // input, keyframe timestamps - accessor_303, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_54 = { - accessor_251, // input, keyframe timestamps - accessor_304, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_55 = { - accessor_251, // input, keyframe timestamps - accessor_305, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_56 = { - accessor_251, // input, keyframe timestamps - accessor_306, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_57 = { - accessor_251, // input, keyframe timestamps - accessor_307, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_58 = { - accessor_251, // input, keyframe timestamps - accessor_308, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_59 = { - accessor_251, // input, keyframe timestamps - accessor_309, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_60 = { - accessor_251, // input, keyframe timestamps - accessor_310, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_61 = { - accessor_251, // input, keyframe timestamps - accessor_311, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_62 = { - accessor_251, // input, keyframe timestamps - accessor_312, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_63 = { - accessor_251, // input, keyframe timestamps - accessor_313, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_64 = { - accessor_251, // input, keyframe timestamps - accessor_314, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_65 = { - accessor_251, // input, keyframe timestamps - accessor_315, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_66 = { - accessor_251, // input, keyframe timestamps - accessor_316, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_67 = { - accessor_251, // input, keyframe timestamps - accessor_317, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_68 = { - accessor_251, // input, keyframe timestamps - accessor_318, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_69 = { - accessor_251, // input, keyframe timestamps - accessor_319, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_70 = { - accessor_251, // input, keyframe timestamps - accessor_320, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_71 = { - accessor_251, // input, keyframe timestamps - accessor_321, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_72 = { - accessor_251, // input, keyframe timestamps - accessor_322, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_73 = { - accessor_251, // input, keyframe timestamps - accessor_323, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_74 = { - accessor_251, // input, keyframe timestamps - accessor_324, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_75 = { - accessor_251, // input, keyframe timestamps - accessor_325, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_76 = { - accessor_251, // input, keyframe timestamps - accessor_326, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_77 = { - accessor_251, // input, keyframe timestamps - accessor_327, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_78 = { - accessor_251, // input, keyframe timestamps - accessor_328, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_79 = { - accessor_251, // input, keyframe timestamps - accessor_329, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_80 = { - accessor_251, // input, keyframe timestamps - accessor_330, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_81 = { - accessor_251, // input, keyframe timestamps - accessor_331, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_82 = { - accessor_251, // input, keyframe timestamps - accessor_332, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_83 = { - accessor_251, // input, keyframe timestamps - accessor_333, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_84 = { - accessor_251, // input, keyframe timestamps - accessor_334, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_85 = { - accessor_251, // input, keyframe timestamps - accessor_335, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_86 = { - accessor_251, // input, keyframe timestamps - accessor_336, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_87 = { - accessor_248, // input, keyframe timestamps - accessor_337, // output, keyframe values (void *) - accessor_248__length, // length -}; - -const AnimationSampler animation_2__sampler_88 = { - accessor_251, // input, keyframe timestamps - accessor_338, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_89 = { - accessor_251, // input, keyframe timestamps - accessor_339, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_90 = { - accessor_251, // input, keyframe timestamps - accessor_340, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_91 = { - accessor_251, // input, keyframe timestamps - accessor_341, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_92 = { - accessor_251, // input, keyframe timestamps - accessor_342, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_93 = { - accessor_251, // input, keyframe timestamps - accessor_343, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_94 = { - accessor_251, // input, keyframe timestamps - accessor_344, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_95 = { - accessor_251, // input, keyframe timestamps - accessor_345, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_96 = { - accessor_251, // input, keyframe timestamps - accessor_346, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_97 = { - accessor_251, // input, keyframe timestamps - accessor_347, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_98 = { - accessor_251, // input, keyframe timestamps - accessor_348, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_99 = { - accessor_251, // input, keyframe timestamps - accessor_349, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_100 = { - accessor_251, // input, keyframe timestamps - accessor_350, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_101 = { - accessor_251, // input, keyframe timestamps - accessor_351, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_102 = { - accessor_251, // input, keyframe timestamps - accessor_352, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_103 = { - accessor_251, // input, keyframe timestamps - accessor_353, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_104 = { - accessor_251, // input, keyframe timestamps - accessor_354, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_105 = { - accessor_248, // input, keyframe timestamps - accessor_355, // output, keyframe values (void *) - accessor_248__length, // length -}; - -const AnimationSampler animation_2__sampler_106 = { - accessor_251, // input, keyframe timestamps - accessor_356, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_107 = { - accessor_251, // input, keyframe timestamps - accessor_357, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_108 = { - accessor_251, // input, keyframe timestamps - accessor_358, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_109 = { - accessor_251, // input, keyframe timestamps - accessor_359, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_110 = { - accessor_251, // input, keyframe timestamps - accessor_360, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_111 = { - accessor_251, // input, keyframe timestamps - accessor_361, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_112 = { - accessor_251, // input, keyframe timestamps - accessor_362, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_113 = { - accessor_251, // input, keyframe timestamps - accessor_363, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_114 = { - accessor_251, // input, keyframe timestamps - accessor_364, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_115 = { - accessor_251, // input, keyframe timestamps - accessor_365, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationSampler animation_2__sampler_116 = { - accessor_251, // input, keyframe timestamps - accessor_366, // output, keyframe values (void *) - accessor_251__length, // length -}; - -const AnimationChannel animation_2__channels[] = { - &animation_2__sampler_0, // animation sampler - { - 26, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_1, // animation sampler - { - 26, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_2, // animation sampler - { - 26, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_3, // animation sampler - { - 1, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_4, // animation sampler - { - 1, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_5, // animation sampler - { - 1, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_6, // animation sampler - { - 0, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_7, // animation sampler - { - 0, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_8, // animation sampler - { - 0, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_9, // animation sampler - { - 23, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_10, // animation sampler - { - 23, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_11, // animation sampler - { - 23, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_12, // animation sampler - { - 22, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_13, // animation sampler - { - 22, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_14, // animation sampler - { - 22, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_15, // animation sampler - { - 4, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_16, // animation sampler - { - 4, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_17, // animation sampler - { - 4, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_18, // animation sampler - { - 3, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_19, // animation sampler - { - 3, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_20, // animation sampler - { - 3, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_21, // animation sampler - { - 2, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_22, // animation sampler - { - 2, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_23, // animation sampler - { - 2, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_24, // animation sampler - { - 6, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_25, // animation sampler - { - 6, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_26, // animation sampler - { - 6, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_27, // animation sampler - { - 5, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_28, // animation sampler - { - 5, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_29, // animation sampler - { - 5, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_30, // animation sampler - { - 12, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_31, // animation sampler - { - 12, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_32, // animation sampler - { - 12, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_33, // animation sampler - { - 11, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_34, // animation sampler - { - 11, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_35, // animation sampler - { - 11, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_36, // animation sampler - { - 10, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_37, // animation sampler - { - 10, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_38, // animation sampler - { - 10, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_39, // animation sampler - { - 9, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_40, // animation sampler - { - 9, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_41, // animation sampler - { - 9, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_42, // animation sampler - { - 8, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_43, // animation sampler - { - 8, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_44, // animation sampler - { - 8, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_45, // animation sampler - { - 7, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_46, // animation sampler - { - 7, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_47, // animation sampler - { - 7, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_48, // animation sampler - { - 15, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_49, // animation sampler - { - 15, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_50, // animation sampler - { - 15, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_51, // animation sampler - { - 14, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_52, // animation sampler - { - 14, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_53, // animation sampler - { - 14, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_54, // animation sampler - { - 13, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_55, // animation sampler - { - 13, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_56, // animation sampler - { - 13, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_57, // animation sampler - { - 21, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_58, // animation sampler - { - 21, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_59, // animation sampler - { - 21, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_60, // animation sampler - { - 20, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_61, // animation sampler - { - 20, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_62, // animation sampler - { - 20, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_63, // animation sampler - { - 19, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_64, // animation sampler - { - 19, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_65, // animation sampler - { - 19, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_66, // animation sampler - { - 18, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_67, // animation sampler - { - 18, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_68, // animation sampler - { - 18, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_69, // animation sampler - { - 17, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_70, // animation sampler - { - 17, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_71, // animation sampler - { - 17, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_72, // animation sampler - { - 16, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_73, // animation sampler - { - 16, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_74, // animation sampler - { - 16, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_75, // animation sampler - { - 25, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_76, // animation sampler - { - 25, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_77, // animation sampler - { - 25, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_78, // animation sampler - { - 24, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_79, // animation sampler - { - 24, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_80, // animation sampler - { - 24, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_81, // animation sampler - { - 27, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_82, // animation sampler - { - 27, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_83, // animation sampler - { - 27, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_84, // animation sampler - { - 28, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_85, // animation sampler - { - 28, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_86, // animation sampler - { - 28, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_87, // animation sampler - { - 31, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_88, // animation sampler - { - 31, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_89, // animation sampler - { - 31, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_90, // animation sampler - { - 30, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_91, // animation sampler - { - 30, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_92, // animation sampler - { - 30, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_93, // animation sampler - { - 29, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_94, // animation sampler - { - 29, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_95, // animation sampler - { - 29, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_96, // animation sampler - { - 32, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_97, // animation sampler - { - 32, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_98, // animation sampler - { - 32, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_99, // animation sampler - { - 33, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_100, // animation sampler - { - 33, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_101, // animation sampler - { - 33, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_102, // animation sampler - { - 34, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_103, // animation sampler - { - 34, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_104, // animation sampler - { - 34, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_105, // animation sampler - { - 37, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_106, // animation sampler - { - 37, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_107, // animation sampler - { - 37, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_108, // animation sampler - { - 36, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_109, // animation sampler - { - 36, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_110, // animation sampler - { - 36, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_111, // animation sampler - { - 35, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_112, // animation sampler - { - 35, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_113, // animation sampler - { - 35, // target node index - ACP__SCALE, // target path - }, - &animation_2__sampler_114, // animation sampler - { - 38, // target node index - ACP__TRANSLATION, // target path - }, - &animation_2__sampler_115, // animation sampler - { - 38, // target node index - ACP__ROTATION, // target path - }, - &animation_2__sampler_116, // animation sampler - { - 38, // target node index - ACP__SCALE, // target path - }, -}; - -const AnimationSampler animation_3__sampler_0 = { - accessor_367, // input, keyframe timestamps - accessor_368, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_1 = { - accessor_369, // input, keyframe timestamps - accessor_370, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_2 = { - accessor_369, // input, keyframe timestamps - accessor_371, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_3 = { - accessor_367, // input, keyframe timestamps - accessor_372, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_4 = { - accessor_367, // input, keyframe timestamps - accessor_373, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_5 = { - accessor_367, // input, keyframe timestamps - accessor_374, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_6 = { - accessor_367, // input, keyframe timestamps - accessor_375, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_7 = { - accessor_367, // input, keyframe timestamps - accessor_376, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_8 = { - accessor_367, // input, keyframe timestamps - accessor_377, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_9 = { - accessor_369, // input, keyframe timestamps - accessor_378, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_10 = { - accessor_369, // input, keyframe timestamps - accessor_379, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_11 = { - accessor_367, // input, keyframe timestamps - accessor_380, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_12 = { - accessor_369, // input, keyframe timestamps - accessor_381, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_13 = { - accessor_369, // input, keyframe timestamps - accessor_382, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_14 = { - accessor_369, // input, keyframe timestamps - accessor_383, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_15 = { - accessor_367, // input, keyframe timestamps - accessor_384, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_16 = { - accessor_367, // input, keyframe timestamps - accessor_385, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_17 = { - accessor_367, // input, keyframe timestamps - accessor_386, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_18 = { - accessor_367, // input, keyframe timestamps - accessor_387, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_19 = { - accessor_367, // input, keyframe timestamps - accessor_388, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_20 = { - accessor_367, // input, keyframe timestamps - accessor_389, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_21 = { - accessor_367, // input, keyframe timestamps - accessor_390, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_22 = { - accessor_367, // input, keyframe timestamps - accessor_391, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_23 = { - accessor_367, // input, keyframe timestamps - accessor_392, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_24 = { - accessor_369, // input, keyframe timestamps - accessor_393, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_25 = { - accessor_369, // input, keyframe timestamps - accessor_394, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_26 = { - accessor_369, // input, keyframe timestamps - accessor_395, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_27 = { - accessor_367, // input, keyframe timestamps - accessor_396, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_28 = { - accessor_367, // input, keyframe timestamps - accessor_397, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_29 = { - accessor_367, // input, keyframe timestamps - accessor_398, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_30 = { - accessor_367, // input, keyframe timestamps - accessor_399, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_31 = { - accessor_367, // input, keyframe timestamps - accessor_400, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_32 = { - accessor_367, // input, keyframe timestamps - accessor_401, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_33 = { - accessor_367, // input, keyframe timestamps - accessor_402, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_34 = { - accessor_367, // input, keyframe timestamps - accessor_403, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_35 = { - accessor_367, // input, keyframe timestamps - accessor_404, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_36 = { - accessor_367, // input, keyframe timestamps - accessor_405, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_37 = { - accessor_367, // input, keyframe timestamps - accessor_406, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_38 = { - accessor_367, // input, keyframe timestamps - accessor_407, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_39 = { - accessor_367, // input, keyframe timestamps - accessor_408, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_40 = { - accessor_367, // input, keyframe timestamps - accessor_409, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_41 = { - accessor_367, // input, keyframe timestamps - accessor_410, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_42 = { - accessor_367, // input, keyframe timestamps - accessor_411, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_43 = { - accessor_367, // input, keyframe timestamps - accessor_412, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_44 = { - accessor_367, // input, keyframe timestamps - accessor_413, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_45 = { - accessor_367, // input, keyframe timestamps - accessor_414, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_46 = { - accessor_367, // input, keyframe timestamps - accessor_415, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_47 = { - accessor_367, // input, keyframe timestamps - accessor_416, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_48 = { - accessor_367, // input, keyframe timestamps - accessor_417, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_49 = { - accessor_367, // input, keyframe timestamps - accessor_418, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_50 = { - accessor_367, // input, keyframe timestamps - accessor_419, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_51 = { - accessor_367, // input, keyframe timestamps - accessor_420, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_52 = { - accessor_367, // input, keyframe timestamps - accessor_421, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_53 = { - accessor_367, // input, keyframe timestamps - accessor_422, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_54 = { - accessor_367, // input, keyframe timestamps - accessor_423, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_55 = { - accessor_367, // input, keyframe timestamps - accessor_424, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_56 = { - accessor_367, // input, keyframe timestamps - accessor_425, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_57 = { - accessor_367, // input, keyframe timestamps - accessor_426, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_58 = { - accessor_367, // input, keyframe timestamps - accessor_427, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_59 = { - accessor_367, // input, keyframe timestamps - accessor_428, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_60 = { - accessor_367, // input, keyframe timestamps - accessor_429, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_61 = { - accessor_367, // input, keyframe timestamps - accessor_430, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_62 = { - accessor_367, // input, keyframe timestamps - accessor_431, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_63 = { - accessor_367, // input, keyframe timestamps - accessor_432, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_64 = { - accessor_367, // input, keyframe timestamps - accessor_433, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_65 = { - accessor_367, // input, keyframe timestamps - accessor_434, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_66 = { - accessor_367, // input, keyframe timestamps - accessor_435, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_67 = { - accessor_367, // input, keyframe timestamps - accessor_436, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_68 = { - accessor_367, // input, keyframe timestamps - accessor_437, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_69 = { - accessor_367, // input, keyframe timestamps - accessor_438, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_70 = { - accessor_367, // input, keyframe timestamps - accessor_439, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_71 = { - accessor_367, // input, keyframe timestamps - accessor_440, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_72 = { - accessor_367, // input, keyframe timestamps - accessor_441, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_73 = { - accessor_367, // input, keyframe timestamps - accessor_442, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_74 = { - accessor_367, // input, keyframe timestamps - accessor_443, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_75 = { - accessor_367, // input, keyframe timestamps - accessor_444, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_76 = { - accessor_367, // input, keyframe timestamps - accessor_445, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_77 = { - accessor_367, // input, keyframe timestamps - accessor_446, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_78 = { - accessor_367, // input, keyframe timestamps - accessor_447, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_79 = { - accessor_367, // input, keyframe timestamps - accessor_448, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_80 = { - accessor_367, // input, keyframe timestamps - accessor_449, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_81 = { - accessor_367, // input, keyframe timestamps - accessor_450, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_82 = { - accessor_367, // input, keyframe timestamps - accessor_451, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_83 = { - accessor_367, // input, keyframe timestamps - accessor_452, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_84 = { - accessor_367, // input, keyframe timestamps - accessor_453, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_85 = { - accessor_367, // input, keyframe timestamps - accessor_454, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_86 = { - accessor_367, // input, keyframe timestamps - accessor_455, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_87 = { - accessor_369, // input, keyframe timestamps - accessor_456, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_88 = { - accessor_367, // input, keyframe timestamps - accessor_457, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_89 = { - accessor_367, // input, keyframe timestamps - accessor_458, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_90 = { - accessor_367, // input, keyframe timestamps - accessor_459, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_91 = { - accessor_367, // input, keyframe timestamps - accessor_460, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_92 = { - accessor_367, // input, keyframe timestamps - accessor_461, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_93 = { - accessor_367, // input, keyframe timestamps - accessor_462, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_94 = { - accessor_367, // input, keyframe timestamps - accessor_463, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_95 = { - accessor_367, // input, keyframe timestamps - accessor_464, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_96 = { - accessor_367, // input, keyframe timestamps - accessor_465, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_97 = { - accessor_367, // input, keyframe timestamps - accessor_466, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_98 = { - accessor_367, // input, keyframe timestamps - accessor_467, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_99 = { - accessor_367, // input, keyframe timestamps - accessor_468, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_100 = { - accessor_367, // input, keyframe timestamps - accessor_469, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_101 = { - accessor_367, // input, keyframe timestamps - accessor_470, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_102 = { - accessor_367, // input, keyframe timestamps - accessor_471, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_103 = { - accessor_367, // input, keyframe timestamps - accessor_472, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_104 = { - accessor_367, // input, keyframe timestamps - accessor_473, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_105 = { - accessor_369, // input, keyframe timestamps - accessor_474, // output, keyframe values (void *) - accessor_369__length, // length -}; - -const AnimationSampler animation_3__sampler_106 = { - accessor_367, // input, keyframe timestamps - accessor_475, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_107 = { - accessor_367, // input, keyframe timestamps - accessor_476, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_108 = { - accessor_367, // input, keyframe timestamps - accessor_477, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_109 = { - accessor_367, // input, keyframe timestamps - accessor_478, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_110 = { - accessor_367, // input, keyframe timestamps - accessor_479, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_111 = { - accessor_367, // input, keyframe timestamps - accessor_480, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_112 = { - accessor_367, // input, keyframe timestamps - accessor_481, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_113 = { - accessor_367, // input, keyframe timestamps - accessor_482, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_114 = { - accessor_367, // input, keyframe timestamps - accessor_483, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_115 = { - accessor_367, // input, keyframe timestamps - accessor_484, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationSampler animation_3__sampler_116 = { - accessor_367, // input, keyframe timestamps - accessor_485, // output, keyframe values (void *) - accessor_367__length, // length -}; - -const AnimationChannel animation_3__channels[] = { - &animation_3__sampler_0, // animation sampler - { - 26, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_1, // animation sampler - { - 26, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_2, // animation sampler - { - 26, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_3, // animation sampler - { - 1, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_4, // animation sampler - { - 1, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_5, // animation sampler - { - 1, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_6, // animation sampler - { - 0, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_7, // animation sampler - { - 0, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_8, // animation sampler - { - 0, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_9, // animation sampler - { - 23, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_10, // animation sampler - { - 23, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_11, // animation sampler - { - 23, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_12, // animation sampler - { - 22, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_13, // animation sampler - { - 22, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_14, // animation sampler - { - 22, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_15, // animation sampler - { - 4, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_16, // animation sampler - { - 4, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_17, // animation sampler - { - 4, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_18, // animation sampler - { - 3, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_19, // animation sampler - { - 3, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_20, // animation sampler - { - 3, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_21, // animation sampler - { - 2, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_22, // animation sampler - { - 2, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_23, // animation sampler - { - 2, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_24, // animation sampler - { - 6, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_25, // animation sampler - { - 6, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_26, // animation sampler - { - 6, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_27, // animation sampler - { - 5, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_28, // animation sampler - { - 5, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_29, // animation sampler - { - 5, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_30, // animation sampler - { - 12, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_31, // animation sampler - { - 12, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_32, // animation sampler - { - 12, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_33, // animation sampler - { - 11, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_34, // animation sampler - { - 11, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_35, // animation sampler - { - 11, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_36, // animation sampler - { - 10, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_37, // animation sampler - { - 10, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_38, // animation sampler - { - 10, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_39, // animation sampler - { - 9, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_40, // animation sampler - { - 9, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_41, // animation sampler - { - 9, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_42, // animation sampler - { - 8, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_43, // animation sampler - { - 8, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_44, // animation sampler - { - 8, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_45, // animation sampler - { - 7, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_46, // animation sampler - { - 7, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_47, // animation sampler - { - 7, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_48, // animation sampler - { - 15, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_49, // animation sampler - { - 15, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_50, // animation sampler - { - 15, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_51, // animation sampler - { - 14, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_52, // animation sampler - { - 14, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_53, // animation sampler - { - 14, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_54, // animation sampler - { - 13, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_55, // animation sampler - { - 13, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_56, // animation sampler - { - 13, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_57, // animation sampler - { - 21, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_58, // animation sampler - { - 21, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_59, // animation sampler - { - 21, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_60, // animation sampler - { - 20, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_61, // animation sampler - { - 20, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_62, // animation sampler - { - 20, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_63, // animation sampler - { - 19, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_64, // animation sampler - { - 19, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_65, // animation sampler - { - 19, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_66, // animation sampler - { - 18, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_67, // animation sampler - { - 18, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_68, // animation sampler - { - 18, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_69, // animation sampler - { - 17, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_70, // animation sampler - { - 17, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_71, // animation sampler - { - 17, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_72, // animation sampler - { - 16, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_73, // animation sampler - { - 16, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_74, // animation sampler - { - 16, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_75, // animation sampler - { - 25, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_76, // animation sampler - { - 25, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_77, // animation sampler - { - 25, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_78, // animation sampler - { - 24, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_79, // animation sampler - { - 24, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_80, // animation sampler - { - 24, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_81, // animation sampler - { - 27, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_82, // animation sampler - { - 27, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_83, // animation sampler - { - 27, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_84, // animation sampler - { - 28, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_85, // animation sampler - { - 28, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_86, // animation sampler - { - 28, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_87, // animation sampler - { - 31, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_88, // animation sampler - { - 31, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_89, // animation sampler - { - 31, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_90, // animation sampler - { - 30, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_91, // animation sampler - { - 30, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_92, // animation sampler - { - 30, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_93, // animation sampler - { - 29, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_94, // animation sampler - { - 29, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_95, // animation sampler - { - 29, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_96, // animation sampler - { - 32, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_97, // animation sampler - { - 32, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_98, // animation sampler - { - 32, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_99, // animation sampler - { - 33, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_100, // animation sampler - { - 33, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_101, // animation sampler - { - 33, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_102, // animation sampler - { - 34, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_103, // animation sampler - { - 34, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_104, // animation sampler - { - 34, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_105, // animation sampler - { - 37, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_106, // animation sampler - { - 37, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_107, // animation sampler - { - 37, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_108, // animation sampler - { - 36, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_109, // animation sampler - { - 36, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_110, // animation sampler - { - 36, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_111, // animation sampler - { - 35, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_112, // animation sampler - { - 35, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_113, // animation sampler - { - 35, // target node index - ACP__SCALE, // target path - }, - &animation_3__sampler_114, // animation sampler - { - 38, // target node index - ACP__TRANSLATION, // target path - }, - &animation_3__sampler_115, // animation sampler - { - 38, // target node index - ACP__ROTATION, // target path - }, - &animation_3__sampler_116, // animation sampler - { - 38, // target node index - ACP__SCALE, // target path - }, -}; - -const AnimationSampler animation_4__sampler_0 = { - accessor_486, // input, keyframe timestamps - accessor_487, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_1 = { - accessor_486, // input, keyframe timestamps - accessor_488, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_2 = { - accessor_486, // input, keyframe timestamps - accessor_489, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_3 = { - accessor_490, // input, keyframe timestamps - accessor_491, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_4 = { - accessor_490, // input, keyframe timestamps - accessor_492, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_5 = { - accessor_490, // input, keyframe timestamps - accessor_493, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_6 = { - accessor_490, // input, keyframe timestamps - accessor_494, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_7 = { - accessor_490, // input, keyframe timestamps - accessor_495, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_8 = { - accessor_490, // input, keyframe timestamps - accessor_496, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_9 = { - accessor_486, // input, keyframe timestamps - accessor_497, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_10 = { - accessor_486, // input, keyframe timestamps - accessor_498, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_11 = { - accessor_490, // input, keyframe timestamps - accessor_499, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_12 = { - accessor_486, // input, keyframe timestamps - accessor_500, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_13 = { - accessor_486, // input, keyframe timestamps - accessor_501, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_14 = { - accessor_486, // input, keyframe timestamps - accessor_502, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_15 = { - accessor_490, // input, keyframe timestamps - accessor_503, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_16 = { - accessor_490, // input, keyframe timestamps - accessor_504, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_17 = { - accessor_490, // input, keyframe timestamps - accessor_505, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_18 = { - accessor_490, // input, keyframe timestamps - accessor_506, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_19 = { - accessor_490, // input, keyframe timestamps - accessor_507, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_20 = { - accessor_490, // input, keyframe timestamps - accessor_508, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_21 = { - accessor_490, // input, keyframe timestamps - accessor_509, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_22 = { - accessor_490, // input, keyframe timestamps - accessor_510, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_23 = { - accessor_490, // input, keyframe timestamps - accessor_511, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_24 = { - accessor_486, // input, keyframe timestamps - accessor_512, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_25 = { - accessor_486, // input, keyframe timestamps - accessor_513, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_26 = { - accessor_486, // input, keyframe timestamps - accessor_514, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_27 = { - accessor_490, // input, keyframe timestamps - accessor_515, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_28 = { - accessor_490, // input, keyframe timestamps - accessor_516, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_29 = { - accessor_490, // input, keyframe timestamps - accessor_517, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_30 = { - accessor_490, // input, keyframe timestamps - accessor_518, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_31 = { - accessor_490, // input, keyframe timestamps - accessor_519, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_32 = { - accessor_490, // input, keyframe timestamps - accessor_520, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_33 = { - accessor_490, // input, keyframe timestamps - accessor_521, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_34 = { - accessor_490, // input, keyframe timestamps - accessor_522, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_35 = { - accessor_490, // input, keyframe timestamps - accessor_523, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_36 = { - accessor_490, // input, keyframe timestamps - accessor_524, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_37 = { - accessor_490, // input, keyframe timestamps - accessor_525, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_38 = { - accessor_490, // input, keyframe timestamps - accessor_526, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_39 = { - accessor_490, // input, keyframe timestamps - accessor_527, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_40 = { - accessor_490, // input, keyframe timestamps - accessor_528, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_41 = { - accessor_490, // input, keyframe timestamps - accessor_529, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_42 = { - accessor_490, // input, keyframe timestamps - accessor_530, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_43 = { - accessor_490, // input, keyframe timestamps - accessor_531, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_44 = { - accessor_490, // input, keyframe timestamps - accessor_532, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_45 = { - accessor_490, // input, keyframe timestamps - accessor_533, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_46 = { - accessor_490, // input, keyframe timestamps - accessor_534, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_47 = { - accessor_490, // input, keyframe timestamps - accessor_535, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_48 = { - accessor_490, // input, keyframe timestamps - accessor_536, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_49 = { - accessor_490, // input, keyframe timestamps - accessor_537, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_50 = { - accessor_490, // input, keyframe timestamps - accessor_538, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_51 = { - accessor_490, // input, keyframe timestamps - accessor_539, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_52 = { - accessor_490, // input, keyframe timestamps - accessor_540, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_53 = { - accessor_490, // input, keyframe timestamps - accessor_541, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_54 = { - accessor_490, // input, keyframe timestamps - accessor_542, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_55 = { - accessor_490, // input, keyframe timestamps - accessor_543, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_56 = { - accessor_490, // input, keyframe timestamps - accessor_544, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_57 = { - accessor_490, // input, keyframe timestamps - accessor_545, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_58 = { - accessor_490, // input, keyframe timestamps - accessor_546, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_59 = { - accessor_490, // input, keyframe timestamps - accessor_547, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_60 = { - accessor_490, // input, keyframe timestamps - accessor_548, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_61 = { - accessor_490, // input, keyframe timestamps - accessor_549, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_62 = { - accessor_490, // input, keyframe timestamps - accessor_550, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_63 = { - accessor_490, // input, keyframe timestamps - accessor_551, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_64 = { - accessor_490, // input, keyframe timestamps - accessor_552, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_65 = { - accessor_490, // input, keyframe timestamps - accessor_553, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_66 = { - accessor_490, // input, keyframe timestamps - accessor_554, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_67 = { - accessor_490, // input, keyframe timestamps - accessor_555, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_68 = { - accessor_490, // input, keyframe timestamps - accessor_556, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_69 = { - accessor_490, // input, keyframe timestamps - accessor_557, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_70 = { - accessor_490, // input, keyframe timestamps - accessor_558, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_71 = { - accessor_490, // input, keyframe timestamps - accessor_559, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_72 = { - accessor_490, // input, keyframe timestamps - accessor_560, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_73 = { - accessor_490, // input, keyframe timestamps - accessor_561, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_74 = { - accessor_490, // input, keyframe timestamps - accessor_562, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_75 = { - accessor_490, // input, keyframe timestamps - accessor_563, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_76 = { - accessor_490, // input, keyframe timestamps - accessor_564, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_77 = { - accessor_490, // input, keyframe timestamps - accessor_565, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_78 = { - accessor_490, // input, keyframe timestamps - accessor_566, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_79 = { - accessor_490, // input, keyframe timestamps - accessor_567, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_80 = { - accessor_490, // input, keyframe timestamps - accessor_568, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_81 = { - accessor_490, // input, keyframe timestamps - accessor_569, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_82 = { - accessor_490, // input, keyframe timestamps - accessor_570, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_83 = { - accessor_490, // input, keyframe timestamps - accessor_571, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_84 = { - accessor_490, // input, keyframe timestamps - accessor_572, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_85 = { - accessor_490, // input, keyframe timestamps - accessor_573, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_86 = { - accessor_490, // input, keyframe timestamps - accessor_574, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_87 = { - accessor_486, // input, keyframe timestamps - accessor_575, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_88 = { - accessor_490, // input, keyframe timestamps - accessor_576, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_89 = { - accessor_490, // input, keyframe timestamps - accessor_577, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_90 = { - accessor_490, // input, keyframe timestamps - accessor_578, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_91 = { - accessor_490, // input, keyframe timestamps - accessor_579, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_92 = { - accessor_490, // input, keyframe timestamps - accessor_580, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_93 = { - accessor_490, // input, keyframe timestamps - accessor_581, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_94 = { - accessor_490, // input, keyframe timestamps - accessor_582, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_95 = { - accessor_490, // input, keyframe timestamps - accessor_583, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_96 = { - accessor_490, // input, keyframe timestamps - accessor_584, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_97 = { - accessor_490, // input, keyframe timestamps - accessor_585, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_98 = { - accessor_490, // input, keyframe timestamps - accessor_586, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_99 = { - accessor_490, // input, keyframe timestamps - accessor_587, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_100 = { - accessor_490, // input, keyframe timestamps - accessor_588, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_101 = { - accessor_490, // input, keyframe timestamps - accessor_589, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_102 = { - accessor_490, // input, keyframe timestamps - accessor_590, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_103 = { - accessor_490, // input, keyframe timestamps - accessor_591, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_104 = { - accessor_490, // input, keyframe timestamps - accessor_592, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_105 = { - accessor_486, // input, keyframe timestamps - accessor_593, // output, keyframe values (void *) - accessor_486__length, // length -}; - -const AnimationSampler animation_4__sampler_106 = { - accessor_490, // input, keyframe timestamps - accessor_594, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_107 = { - accessor_490, // input, keyframe timestamps - accessor_595, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_108 = { - accessor_490, // input, keyframe timestamps - accessor_596, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_109 = { - accessor_490, // input, keyframe timestamps - accessor_597, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_110 = { - accessor_490, // input, keyframe timestamps - accessor_598, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_111 = { - accessor_490, // input, keyframe timestamps - accessor_599, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_112 = { - accessor_490, // input, keyframe timestamps - accessor_600, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_113 = { - accessor_490, // input, keyframe timestamps - accessor_601, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_114 = { - accessor_490, // input, keyframe timestamps - accessor_602, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_115 = { - accessor_490, // input, keyframe timestamps - accessor_603, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationSampler animation_4__sampler_116 = { - accessor_490, // input, keyframe timestamps - accessor_604, // output, keyframe values (void *) - accessor_490__length, // length -}; - -const AnimationChannel animation_4__channels[] = { - &animation_4__sampler_0, // animation sampler - { - 26, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_1, // animation sampler - { - 26, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_2, // animation sampler - { - 26, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_3, // animation sampler - { - 1, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_4, // animation sampler - { - 1, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_5, // animation sampler - { - 1, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_6, // animation sampler - { - 0, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_7, // animation sampler - { - 0, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_8, // animation sampler - { - 0, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_9, // animation sampler - { - 23, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_10, // animation sampler - { - 23, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_11, // animation sampler - { - 23, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_12, // animation sampler - { - 22, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_13, // animation sampler - { - 22, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_14, // animation sampler - { - 22, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_15, // animation sampler - { - 4, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_16, // animation sampler - { - 4, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_17, // animation sampler - { - 4, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_18, // animation sampler - { - 3, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_19, // animation sampler - { - 3, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_20, // animation sampler - { - 3, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_21, // animation sampler - { - 2, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_22, // animation sampler - { - 2, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_23, // animation sampler - { - 2, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_24, // animation sampler - { - 6, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_25, // animation sampler - { - 6, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_26, // animation sampler - { - 6, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_27, // animation sampler - { - 5, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_28, // animation sampler - { - 5, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_29, // animation sampler - { - 5, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_30, // animation sampler - { - 12, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_31, // animation sampler - { - 12, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_32, // animation sampler - { - 12, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_33, // animation sampler - { - 11, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_34, // animation sampler - { - 11, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_35, // animation sampler - { - 11, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_36, // animation sampler - { - 10, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_37, // animation sampler - { - 10, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_38, // animation sampler - { - 10, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_39, // animation sampler - { - 9, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_40, // animation sampler - { - 9, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_41, // animation sampler - { - 9, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_42, // animation sampler - { - 8, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_43, // animation sampler - { - 8, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_44, // animation sampler - { - 8, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_45, // animation sampler - { - 7, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_46, // animation sampler - { - 7, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_47, // animation sampler - { - 7, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_48, // animation sampler - { - 15, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_49, // animation sampler - { - 15, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_50, // animation sampler - { - 15, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_51, // animation sampler - { - 14, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_52, // animation sampler - { - 14, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_53, // animation sampler - { - 14, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_54, // animation sampler - { - 13, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_55, // animation sampler - { - 13, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_56, // animation sampler - { - 13, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_57, // animation sampler - { - 21, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_58, // animation sampler - { - 21, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_59, // animation sampler - { - 21, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_60, // animation sampler - { - 20, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_61, // animation sampler - { - 20, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_62, // animation sampler - { - 20, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_63, // animation sampler - { - 19, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_64, // animation sampler - { - 19, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_65, // animation sampler - { - 19, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_66, // animation sampler - { - 18, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_67, // animation sampler - { - 18, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_68, // animation sampler - { - 18, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_69, // animation sampler - { - 17, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_70, // animation sampler - { - 17, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_71, // animation sampler - { - 17, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_72, // animation sampler - { - 16, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_73, // animation sampler - { - 16, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_74, // animation sampler - { - 16, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_75, // animation sampler - { - 25, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_76, // animation sampler - { - 25, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_77, // animation sampler - { - 25, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_78, // animation sampler - { - 24, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_79, // animation sampler - { - 24, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_80, // animation sampler - { - 24, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_81, // animation sampler - { - 27, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_82, // animation sampler - { - 27, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_83, // animation sampler - { - 27, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_84, // animation sampler - { - 28, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_85, // animation sampler - { - 28, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_86, // animation sampler - { - 28, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_87, // animation sampler - { - 31, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_88, // animation sampler - { - 31, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_89, // animation sampler - { - 31, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_90, // animation sampler - { - 30, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_91, // animation sampler - { - 30, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_92, // animation sampler - { - 30, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_93, // animation sampler - { - 29, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_94, // animation sampler - { - 29, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_95, // animation sampler - { - 29, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_96, // animation sampler - { - 32, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_97, // animation sampler - { - 32, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_98, // animation sampler - { - 32, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_99, // animation sampler - { - 33, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_100, // animation sampler - { - 33, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_101, // animation sampler - { - 33, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_102, // animation sampler - { - 34, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_103, // animation sampler - { - 34, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_104, // animation sampler - { - 34, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_105, // animation sampler - { - 37, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_106, // animation sampler - { - 37, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_107, // animation sampler - { - 37, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_108, // animation sampler - { - 36, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_109, // animation sampler - { - 36, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_110, // animation sampler - { - 36, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_111, // animation sampler - { - 35, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_112, // animation sampler - { - 35, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_113, // animation sampler - { - 35, // target node index - ACP__SCALE, // target path - }, - &animation_4__sampler_114, // animation sampler - { - 38, // target node index - ACP__TRANSLATION, // target path - }, - &animation_4__sampler_115, // animation sampler - { - 38, // target node index - ACP__ROTATION, // target path - }, - &animation_4__sampler_116, // animation sampler - { - 38, // target node index - ACP__SCALE, // target path - }, -}; +#include "directxmath/directxmath.h" +#include "gltf.hpp" +#include "robot_player.hpp" +namespace robot_player { +const XMFLOAT3 accessor_0[] = { + XMFLOAT3( 0.2478682f, 1.9170125f, -0.1466553f), + XMFLOAT3( 0.2478682f, 1.4493124f, -0.0112957f), + XMFLOAT3( 0.2478682f, 1.8967085f, -0.2168102f), + XMFLOAT3( 0.2478682f, 1.4290085f, -0.0814507f), + XMFLOAT3( 0.2434468f, 1.7109127f, -0.2434468f), + XMFLOAT3( 0.2434468f, 1.7109127f, -0.2434468f), + XMFLOAT3( 0.2434468f, 1.7109127f, -0.2434468f), + XMFLOAT3( 0.2434468f, 1.2240191f, -0.2434468f), + XMFLOAT3( 0.2434468f, 1.2240191f, -0.2434468f), + XMFLOAT3( 0.2434468f, 1.2240191f, -0.2434468f), + XMFLOAT3( 0.2434468f, 1.7109127f, 0.2434468f), + XMFLOAT3( 0.2434468f, 1.7109127f, 0.2434468f), + XMFLOAT3( 0.2434468f, 1.7109127f, 0.2434468f), + XMFLOAT3( 0.2434468f, 1.2240191f, 0.2434468f), + XMFLOAT3( 0.2434468f, 1.2240191f, 0.2434468f), + XMFLOAT3( 0.2434468f, 1.2240191f, 0.2434468f), + XMFLOAT3(-0.2434468f, 1.7109127f, -0.2434468f), + XMFLOAT3(-0.2434468f, 1.7109127f, -0.2434468f), + XMFLOAT3(-0.2434468f, 1.7109127f, -0.2434468f), + XMFLOAT3(-0.2434468f, 1.2240191f, -0.2434468f), + XMFLOAT3(-0.2434468f, 1.2240191f, -0.2434468f), + XMFLOAT3(-0.2434468f, 1.2240191f, -0.2434468f), + XMFLOAT3(-0.2434468f, 1.7109127f, 0.2434468f), + XMFLOAT3(-0.2434468f, 1.7109127f, 0.2434468f), + XMFLOAT3(-0.2434468f, 1.7109127f, 0.2434468f), + XMFLOAT3(-0.2434468f, 1.2240191f, 0.2434468f), + XMFLOAT3(-0.2434468f, 1.2240191f, 0.2434468f), + XMFLOAT3(-0.2434468f, 1.2240191f, 0.2434468f), + XMFLOAT3( 0.1708783f, 0.7860264f, -0.1460781f), + XMFLOAT3( 0.1708783f, 0.7860264f, -0.1460781f), + XMFLOAT3( 0.1708783f, 0.7860264f, -0.1460781f), + XMFLOAT3( 0.1708938f, 0.6400828f, -0.1464314f), + XMFLOAT3( 0.1708938f, 0.6400828f, -0.1464314f), + XMFLOAT3( 0.1708938f, 0.6400828f, -0.1464314f), + XMFLOAT3( 0.1709236f, 0.7858657f, 0.1457004f), + XMFLOAT3( 0.1709236f, 0.7858657f, 0.1457004f), + XMFLOAT3( 0.1709236f, 0.7858657f, 0.1457004f), + XMFLOAT3( 0.1709363f, 0.6402411f, 0.1461338f), + XMFLOAT3( 0.1709363f, 0.6402411f, 0.1461338f), + XMFLOAT3( 0.1709363f, 0.6402411f, 0.1461338f), + XMFLOAT3(-0.1700494f, 0.7855206f, -0.1460781f), + XMFLOAT3(-0.1700494f, 0.7855206f, -0.1460781f), + XMFLOAT3(-0.1700494f, 0.7855206f, -0.1460781f), + XMFLOAT3(-0.1700649f, 0.6395770f, -0.1464314f), + XMFLOAT3(-0.1700649f, 0.6395770f, -0.1464314f), + XMFLOAT3(-0.1700649f, 0.6395770f, -0.1464314f), + XMFLOAT3(-0.1700947f, 0.7853599f, 0.1457004f), + XMFLOAT3(-0.1700947f, 0.7853599f, 0.1457004f), + XMFLOAT3(-0.1700947f, 0.7853599f, 0.1457004f), + XMFLOAT3(-0.1701074f, 0.6397355f, 0.1461338f), + XMFLOAT3(-0.1701074f, 0.6397355f, 0.1461338f), + XMFLOAT3(-0.1701074f, 0.6397355f, 0.1461338f), + XMFLOAT3( 0.1492691f, 0.6476287f, -0.0631194f), + XMFLOAT3( 0.1492691f, 0.6476287f, -0.0631194f), + XMFLOAT3( 0.1492691f, 0.6476287f, -0.0631194f), + XMFLOAT3( 0.1492691f, 0.3873219f, -0.0631194f), + XMFLOAT3( 0.1492691f, 0.3873219f, -0.0631194f), + XMFLOAT3( 0.1492691f, 0.3873219f, -0.0631194f), + XMFLOAT3( 0.1492691f, 0.6455109f, 0.0321217f), + XMFLOAT3( 0.1492691f, 0.6455109f, 0.0321217f), + XMFLOAT3( 0.1492691f, 0.6455109f, 0.0321217f), + XMFLOAT3( 0.1492691f, 0.3852040f, 0.0321217f), + XMFLOAT3( 0.1492691f, 0.3852040f, 0.0321217f), + XMFLOAT3( 0.1492691f, 0.3852040f, 0.0321217f), + XMFLOAT3( 0.0571711f, 0.6476287f, -0.0631194f), + XMFLOAT3( 0.0571711f, 0.6476287f, -0.0631194f), + XMFLOAT3( 0.0571711f, 0.6476287f, -0.0631194f), + XMFLOAT3( 0.0571711f, 0.3873219f, -0.0631194f), + XMFLOAT3( 0.0571711f, 0.3873219f, -0.0631194f), + XMFLOAT3( 0.0571711f, 0.3873219f, -0.0631194f), + XMFLOAT3( 0.0571711f, 0.6455109f, 0.0321217f), + XMFLOAT3( 0.0571711f, 0.6455109f, 0.0321217f), + XMFLOAT3( 0.0571711f, 0.6455109f, 0.0321217f), + XMFLOAT3( 0.0571711f, 0.3852040f, 0.0321217f), + XMFLOAT3( 0.0571711f, 0.3852040f, 0.0321217f), + XMFLOAT3( 0.0571711f, 0.3852040f, 0.0321217f), + XMFLOAT3(-0.1492691f, 0.6476287f, -0.0631194f), + XMFLOAT3(-0.1492691f, 0.6476287f, -0.0631194f), + XMFLOAT3(-0.1492691f, 0.6476287f, -0.0631194f), + XMFLOAT3(-0.1492691f, 0.3873219f, -0.0631194f), + XMFLOAT3(-0.1492691f, 0.3873219f, -0.0631194f), + XMFLOAT3(-0.1492691f, 0.3873219f, -0.0631194f), + XMFLOAT3(-0.1492691f, 0.6455109f, 0.0321217f), + XMFLOAT3(-0.1492691f, 0.6455109f, 0.0321217f), + XMFLOAT3(-0.1492691f, 0.6455109f, 0.0321217f), + XMFLOAT3(-0.1492691f, 0.3852040f, 0.0321217f), + XMFLOAT3(-0.1492691f, 0.3852040f, 0.0321217f), + XMFLOAT3(-0.1492691f, 0.3852040f, 0.0321217f), + XMFLOAT3(-0.0571711f, 0.6476287f, -0.0631194f), + XMFLOAT3(-0.0571711f, 0.6476287f, -0.0631194f), + XMFLOAT3(-0.0571711f, 0.6476287f, -0.0631194f), + XMFLOAT3(-0.0571711f, 0.3873219f, -0.0631194f), + XMFLOAT3(-0.0571711f, 0.3873219f, -0.0631194f), + XMFLOAT3(-0.0571711f, 0.3873219f, -0.0631194f), + XMFLOAT3(-0.0571711f, 0.6455109f, 0.0321217f), + XMFLOAT3(-0.0571711f, 0.6455109f, 0.0321217f), + XMFLOAT3(-0.0571711f, 0.6455109f, 0.0321217f), + XMFLOAT3(-0.0571711f, 0.3852040f, 0.0321217f), + XMFLOAT3(-0.0571711f, 0.3852040f, 0.0321217f), + XMFLOAT3(-0.0571711f, 0.3852040f, 0.0321217f), + XMFLOAT3( 0.1492691f, 0.3870092f, -0.0631194f), + XMFLOAT3( 0.1492691f, 0.3870092f, -0.0631194f), + XMFLOAT3( 0.1492691f, 0.3870092f, -0.0631194f), + XMFLOAT3( 0.1461064f, 0.1179388f, -0.0593852f), + XMFLOAT3( 0.1461064f, 0.1179388f, -0.0593852f), + XMFLOAT3( 0.1461064f, 0.1179388f, -0.0593852f), + XMFLOAT3( 0.1492691f, 0.3840750f, 0.0321217f), + XMFLOAT3( 0.1492691f, 0.3840750f, 0.0321217f), + XMFLOAT3( 0.1492691f, 0.3840750f, 0.0321217f), + XMFLOAT3( 0.1461064f, 0.1153777f, 0.0237443f), + XMFLOAT3( 0.1461064f, 0.1153777f, 0.0237443f), + XMFLOAT3( 0.1461064f, 0.1153777f, 0.0237443f), + XMFLOAT3( 0.0571711f, 0.3870092f, -0.0631194f), + XMFLOAT3( 0.0571711f, 0.3870092f, -0.0631194f), + XMFLOAT3( 0.0571711f, 0.3870092f, -0.0631194f), + XMFLOAT3( 0.0603339f, 0.1179388f, -0.0593852f), + XMFLOAT3( 0.0603339f, 0.1179388f, -0.0593852f), + XMFLOAT3( 0.0603339f, 0.1179388f, -0.0593852f), + XMFLOAT3( 0.0571711f, 0.3840750f, 0.0321217f), + XMFLOAT3( 0.0571711f, 0.3840750f, 0.0321217f), + XMFLOAT3( 0.0571711f, 0.3840750f, 0.0321217f), + XMFLOAT3( 0.0603339f, 0.1153777f, 0.0237443f), + XMFLOAT3( 0.0603339f, 0.1153777f, 0.0237443f), + XMFLOAT3( 0.0603339f, 0.1153777f, 0.0237443f), + XMFLOAT3(-0.1492691f, 0.3870092f, -0.0631194f), + XMFLOAT3(-0.1492691f, 0.3870092f, -0.0631194f), + XMFLOAT3(-0.1492691f, 0.3870092f, -0.0631194f), + XMFLOAT3(-0.1461064f, 0.1179388f, -0.0593852f), + XMFLOAT3(-0.1461064f, 0.1179388f, -0.0593852f), + XMFLOAT3(-0.1461064f, 0.1179388f, -0.0593852f), + XMFLOAT3(-0.1492691f, 0.3840749f, 0.0321217f), + XMFLOAT3(-0.1492691f, 0.3840749f, 0.0321217f), + XMFLOAT3(-0.1492691f, 0.3840749f, 0.0321217f), + XMFLOAT3(-0.1461064f, 0.1153776f, 0.0237443f), + XMFLOAT3(-0.1461064f, 0.1153776f, 0.0237443f), + XMFLOAT3(-0.1461064f, 0.1153776f, 0.0237443f), + XMFLOAT3(-0.0571711f, 0.3870092f, -0.0631194f), + XMFLOAT3(-0.0571711f, 0.3870092f, -0.0631194f), + XMFLOAT3(-0.0571711f, 0.3870092f, -0.0631194f), + XMFLOAT3(-0.0603339f, 0.1179388f, -0.0593852f), + XMFLOAT3(-0.0603339f, 0.1179388f, -0.0593852f), + XMFLOAT3(-0.0603339f, 0.1179388f, -0.0593852f), + XMFLOAT3(-0.0571711f, 0.3840749f, 0.0321217f), + XMFLOAT3(-0.0571711f, 0.3840749f, 0.0321217f), + XMFLOAT3(-0.0571711f, 0.3840749f, 0.0321217f), + XMFLOAT3(-0.0603339f, 0.1153776f, 0.0237443f), + XMFLOAT3(-0.0603339f, 0.1153776f, 0.0237443f), + XMFLOAT3(-0.0603339f, 0.1153776f, 0.0237443f), + XMFLOAT3( 0.1549126f, 0.0675215f, -0.0596355f), + XMFLOAT3( 0.1549126f, 0.0675215f, -0.0596355f), + XMFLOAT3( 0.1549126f, 0.0675215f, -0.0596355f), + XMFLOAT3( 0.1549126f, 0.0675215f, -0.0596355f), + XMFLOAT3( 0.1512716f, -0.0199962f, -0.0600954f), + XMFLOAT3( 0.1512716f, -0.0199962f, -0.0600954f), + XMFLOAT3( 0.1512716f, -0.0199962f, -0.0600954f), + XMFLOAT3( 0.1512716f, 0.0296853f, 0.1167542f), + XMFLOAT3( 0.1512716f, 0.0296853f, 0.1167542f), + XMFLOAT3( 0.1512716f, 0.0296853f, 0.1167542f), + XMFLOAT3( 0.1512716f, -0.0204037f, 0.1167542f), + XMFLOAT3( 0.1512716f, -0.0204037f, 0.1167542f), + XMFLOAT3( 0.1512716f, -0.0204037f, 0.1167542f), + XMFLOAT3( 0.0555327f, 0.0675215f, -0.0596355f), + XMFLOAT3( 0.0555327f, 0.0675215f, -0.0596355f), + XMFLOAT3( 0.0555327f, 0.0675215f, -0.0596355f), + XMFLOAT3( 0.0555327f, 0.0675215f, -0.0596355f), + XMFLOAT3( 0.0591736f, -0.0199962f, -0.0600954f), + XMFLOAT3( 0.0591736f, -0.0199962f, -0.0600954f), + XMFLOAT3( 0.0591736f, -0.0199962f, -0.0600954f), + XMFLOAT3( 0.0591736f, 0.0296853f, 0.1167542f), + XMFLOAT3( 0.0591736f, 0.0296853f, 0.1167542f), + XMFLOAT3( 0.0591736f, 0.0296853f, 0.1167542f), + XMFLOAT3( 0.0591736f, -0.0204037f, 0.1167542f), + XMFLOAT3( 0.0591736f, -0.0204037f, 0.1167542f), + XMFLOAT3( 0.0591736f, -0.0204037f, 0.1167542f), + XMFLOAT3( 0.0591736f, -0.0201999f, 0.0283294f), + XMFLOAT3( 0.0591736f, -0.0201999f, 0.0283294f), + XMFLOAT3( 0.0591736f, -0.0201999f, 0.0283294f), + XMFLOAT3( 0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3( 0.1512716f, -0.0201999f, 0.0283294f), + XMFLOAT3( 0.1512716f, -0.0201999f, 0.0283294f), + XMFLOAT3( 0.1512716f, -0.0201999f, 0.0283294f), + XMFLOAT3( 0.1646186f, 0.1424436f, -0.0725503f), + XMFLOAT3( 0.1646186f, 0.1424436f, -0.0725503f), + XMFLOAT3( 0.1646186f, 0.1424436f, -0.0725503f), + XMFLOAT3( 0.0458267f, 0.1424436f, -0.0725503f), + XMFLOAT3( 0.0458267f, 0.1424436f, -0.0725503f), + XMFLOAT3( 0.0458267f, 0.1424436f, -0.0725503f), + XMFLOAT3( 0.1646186f, 0.1421875f, 0.0385886f), + XMFLOAT3( 0.1646186f, 0.1421875f, 0.0385886f), + XMFLOAT3( 0.1646186f, 0.1421875f, 0.0385886f), + XMFLOAT3( 0.0458267f, 0.1421875f, 0.0385886f), + XMFLOAT3( 0.0458267f, 0.1421875f, 0.0385886f), + XMFLOAT3( 0.0458267f, 0.1421875f, 0.0385886f), + XMFLOAT3(-0.1549126f, 0.0675215f, -0.0596355f), + XMFLOAT3(-0.1549126f, 0.0675215f, -0.0596355f), + XMFLOAT3(-0.1549126f, 0.0675215f, -0.0596355f), + XMFLOAT3(-0.1549126f, 0.0675215f, -0.0596355f), + XMFLOAT3(-0.1512717f, -0.0199962f, -0.0600954f), + XMFLOAT3(-0.1512717f, -0.0199962f, -0.0600954f), + XMFLOAT3(-0.1512717f, -0.0199962f, -0.0600954f), + XMFLOAT3(-0.1512717f, 0.0296853f, 0.1167542f), + XMFLOAT3(-0.1512717f, 0.0296853f, 0.1167542f), + XMFLOAT3(-0.1512717f, 0.0296853f, 0.1167542f), + XMFLOAT3(-0.1512717f, -0.0204037f, 0.1167542f), + XMFLOAT3(-0.1512717f, -0.0204037f, 0.1167542f), + XMFLOAT3(-0.1512717f, -0.0204037f, 0.1167542f), + XMFLOAT3(-0.0555327f, 0.0675215f, -0.0596355f), + XMFLOAT3(-0.0555327f, 0.0675215f, -0.0596355f), + XMFLOAT3(-0.0555327f, 0.0675215f, -0.0596355f), + XMFLOAT3(-0.0555327f, 0.0675215f, -0.0596355f), + XMFLOAT3(-0.0591736f, -0.0199962f, -0.0600954f), + XMFLOAT3(-0.0591736f, -0.0199962f, -0.0600954f), + XMFLOAT3(-0.0591736f, -0.0199962f, -0.0600954f), + XMFLOAT3(-0.0591736f, 0.0296853f, 0.1167542f), + XMFLOAT3(-0.0591736f, 0.0296853f, 0.1167542f), + XMFLOAT3(-0.0591736f, 0.0296853f, 0.1167542f), + XMFLOAT3(-0.0591736f, -0.0204037f, 0.1167542f), + XMFLOAT3(-0.0591736f, -0.0204037f, 0.1167542f), + XMFLOAT3(-0.0591736f, -0.0204037f, 0.1167542f), + XMFLOAT3(-0.0591736f, -0.0201999f, 0.0283294f), + XMFLOAT3(-0.0591736f, -0.0201999f, 0.0283294f), + XMFLOAT3(-0.0591736f, -0.0201999f, 0.0283294f), + XMFLOAT3(-0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.1549126f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.0555327f, 0.0673177f, 0.0287893f), + XMFLOAT3(-0.1512717f, -0.0201999f, 0.0283294f), + XMFLOAT3(-0.1512717f, -0.0201999f, 0.0283294f), + XMFLOAT3(-0.1512717f, -0.0201999f, 0.0283294f), + XMFLOAT3(-0.1646186f, 0.1424436f, -0.0725503f), + XMFLOAT3(-0.1646186f, 0.1424436f, -0.0725503f), + XMFLOAT3(-0.1646186f, 0.1424436f, -0.0725503f), + XMFLOAT3(-0.0458267f, 0.1424436f, -0.0725503f), + XMFLOAT3(-0.0458267f, 0.1424436f, -0.0725503f), + XMFLOAT3(-0.0458267f, 0.1424436f, -0.0725503f), + XMFLOAT3(-0.1646186f, 0.1421874f, 0.0385886f), + XMFLOAT3(-0.1646186f, 0.1421874f, 0.0385886f), + XMFLOAT3(-0.1646186f, 0.1421874f, 0.0385886f), + XMFLOAT3(-0.0458267f, 0.1421874f, 0.0385886f), + XMFLOAT3(-0.0458267f, 0.1421874f, 0.0385886f), + XMFLOAT3(-0.0458267f, 0.1421874f, 0.0385886f), + XMFLOAT3( 0.2091001f, 0.8484664f, 0.0316646f), + XMFLOAT3( 0.2091001f, 0.8484664f, 0.0316646f), + XMFLOAT3( 0.2091001f, 0.8484664f, 0.0316646f), + XMFLOAT3( 0.2091001f, 1.1539041f, 0.0316646f), + XMFLOAT3( 0.2091001f, 1.1539041f, 0.0316646f), + XMFLOAT3( 0.2091001f, 1.1539041f, 0.0316646f), + XMFLOAT3( 0.2091001f, 0.8484664f, -0.0316648f), + XMFLOAT3( 0.2091001f, 0.8484664f, -0.0316648f), + XMFLOAT3( 0.2091001f, 0.8484664f, -0.0316648f), + XMFLOAT3( 0.2091001f, 1.1539041f, -0.0316648f), + XMFLOAT3( 0.2091001f, 1.1539041f, -0.0316648f), + XMFLOAT3( 0.2091001f, 1.1539041f, -0.0316648f), + XMFLOAT3( 0.2724295f, 0.8484664f, 0.0316646f), + XMFLOAT3( 0.2724295f, 0.8484664f, 0.0316646f), + XMFLOAT3( 0.2724295f, 0.8484664f, 0.0316646f), + XMFLOAT3( 0.2724295f, 1.1539041f, 0.0316646f), + XMFLOAT3( 0.2724295f, 1.1539041f, 0.0316646f), + XMFLOAT3( 0.2724295f, 1.1539041f, 0.0316646f), + XMFLOAT3( 0.2724295f, 0.8484664f, -0.0316648f), + XMFLOAT3( 0.2724295f, 0.8484664f, -0.0316648f), + XMFLOAT3( 0.2724295f, 0.8484664f, -0.0316648f), + XMFLOAT3( 0.2724295f, 1.1539041f, -0.0316648f), + XMFLOAT3( 0.2724295f, 1.1539041f, -0.0316648f), + XMFLOAT3( 0.2724295f, 1.1539041f, -0.0316648f), + XMFLOAT3(-0.2091001f, 0.8484664f, 0.0316646f), + XMFLOAT3(-0.2091001f, 0.8484664f, 0.0316646f), + XMFLOAT3(-0.2091001f, 0.8484664f, 0.0316646f), + XMFLOAT3(-0.2091001f, 1.1539041f, 0.0316646f), + XMFLOAT3(-0.2091001f, 1.1539041f, 0.0316646f), + XMFLOAT3(-0.2091001f, 1.1539041f, 0.0316646f), + XMFLOAT3(-0.2091001f, 0.8484664f, -0.0316648f), + XMFLOAT3(-0.2091001f, 0.8484664f, -0.0316648f), + XMFLOAT3(-0.2091001f, 0.8484664f, -0.0316648f), + XMFLOAT3(-0.2091001f, 1.1539041f, -0.0316648f), + XMFLOAT3(-0.2091001f, 1.1539041f, -0.0316648f), + XMFLOAT3(-0.2091001f, 1.1539041f, -0.0316648f), + XMFLOAT3(-0.2724295f, 0.8484664f, 0.0316646f), + XMFLOAT3(-0.2724295f, 0.8484664f, 0.0316646f), + XMFLOAT3(-0.2724295f, 0.8484664f, 0.0316646f), + XMFLOAT3(-0.2724295f, 1.1539041f, 0.0316646f), + XMFLOAT3(-0.2724295f, 1.1539041f, 0.0316646f), + XMFLOAT3(-0.2724295f, 1.1539041f, 0.0316646f), + XMFLOAT3(-0.2724295f, 0.8484664f, -0.0316648f), + XMFLOAT3(-0.2724295f, 0.8484664f, -0.0316648f), + XMFLOAT3(-0.2724295f, 0.8484664f, -0.0316648f), + XMFLOAT3(-0.2724295f, 1.1539041f, -0.0316648f), + XMFLOAT3(-0.2724295f, 1.1539041f, -0.0316648f), + XMFLOAT3(-0.2724295f, 1.1539041f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.5538269f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.5538269f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.5538269f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.8537946f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.8537946f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.8537946f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.5538269f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.5538269f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.5538269f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.8537946f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.8537946f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.8537946f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.5538269f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.5538269f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.5538269f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.8537946f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.8537946f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.8537946f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.5538269f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.5538269f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.5538269f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.8537946f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.8537946f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.8537946f, -0.0316648f), + XMFLOAT3(-0.2107263f, 0.5538269f, 0.0316646f), + XMFLOAT3(-0.2107263f, 0.5538269f, 0.0316646f), + XMFLOAT3(-0.2107263f, 0.5538269f, 0.0316646f), + XMFLOAT3(-0.2107263f, 0.8537946f, 0.0316646f), + XMFLOAT3(-0.2107263f, 0.8537946f, 0.0316646f), + XMFLOAT3(-0.2107263f, 0.8537946f, 0.0316646f), + XMFLOAT3(-0.2107263f, 0.5538269f, -0.0316648f), + XMFLOAT3(-0.2107263f, 0.5538269f, -0.0316648f), + XMFLOAT3(-0.2107263f, 0.5538269f, -0.0316648f), + XMFLOAT3(-0.2107263f, 0.8537946f, -0.0316648f), + XMFLOAT3(-0.2107263f, 0.8537946f, -0.0316648f), + XMFLOAT3(-0.2107263f, 0.8537946f, -0.0316648f), + XMFLOAT3(-0.2740558f, 0.5538269f, 0.0316646f), + XMFLOAT3(-0.2740558f, 0.5538269f, 0.0316646f), + XMFLOAT3(-0.2740558f, 0.5538269f, 0.0316646f), + XMFLOAT3(-0.2740558f, 0.8537946f, 0.0316646f), + XMFLOAT3(-0.2740558f, 0.8537946f, 0.0316646f), + XMFLOAT3(-0.2740558f, 0.8537946f, 0.0316646f), + XMFLOAT3(-0.2740558f, 0.5538269f, -0.0316648f), + XMFLOAT3(-0.2740558f, 0.5538269f, -0.0316648f), + XMFLOAT3(-0.2740558f, 0.5538269f, -0.0316648f), + XMFLOAT3(-0.2740558f, 0.8537946f, -0.0316648f), + XMFLOAT3(-0.2740558f, 0.8537946f, -0.0316648f), + XMFLOAT3(-0.2740558f, 0.8537946f, -0.0316648f), + XMFLOAT3( 0.1706610f, 1.1755617f, -0.1459927f), + XMFLOAT3( 0.1706610f, 1.1755617f, -0.1459927f), + XMFLOAT3( 0.1706610f, 1.1755617f, -0.1459927f), + XMFLOAT3( 0.1706989f, 0.8830866f, -0.1470453f), + XMFLOAT3( 0.1706989f, 0.8830866f, -0.1470453f), + XMFLOAT3( 0.1706989f, 0.8830866f, -0.1470453f), + XMFLOAT3( 0.1706610f, 1.1753165f, 0.1454204f), + XMFLOAT3( 0.1706610f, 1.1753165f, 0.1454204f), + XMFLOAT3( 0.1706610f, 1.1753165f, 0.1454204f), + XMFLOAT3( 0.1706989f, 0.8832734f, 0.1454337f), + XMFLOAT3( 0.1706989f, 0.8832734f, 0.1454337f), + XMFLOAT3( 0.1706989f, 0.8832734f, 0.1454337f), + XMFLOAT3(-0.1697909f, 1.1754774f, -0.1459927f), + XMFLOAT3(-0.1697909f, 1.1754774f, -0.1459927f), + XMFLOAT3(-0.1697909f, 1.1754774f, -0.1459927f), + XMFLOAT3(-0.1698710f, 0.8832552f, -0.1470453f), + XMFLOAT3(-0.1698710f, 0.8832552f, -0.1470453f), + XMFLOAT3(-0.1698710f, 0.8832552f, -0.1470453f), + XMFLOAT3(-0.1697909f, 1.1752322f, 0.1454204f), + XMFLOAT3(-0.1697909f, 1.1752322f, 0.1454204f), + XMFLOAT3(-0.1697909f, 1.1752322f, 0.1454204f), + XMFLOAT3(-0.1698710f, 0.8834420f, 0.1454337f), + XMFLOAT3(-0.1698710f, 0.8834420f, 0.1454337f), + XMFLOAT3(-0.1698710f, 0.8834420f, 0.1454337f), + XMFLOAT3( 0.0493873f, 0.9123309f, -0.0626242f), + XMFLOAT3( 0.0493873f, 0.9123309f, -0.0626242f), + XMFLOAT3( 0.0493873f, 0.9123309f, -0.0626242f), + XMFLOAT3( 0.0477787f, 0.7450795f, -0.0646072f), + XMFLOAT3( 0.0477787f, 0.7450795f, -0.0646072f), + XMFLOAT3( 0.0477787f, 0.7450795f, -0.0646072f), + XMFLOAT3( 0.0493873f, 0.9111561f, 0.0185991f), + XMFLOAT3( 0.0493873f, 0.9111561f, 0.0185991f), + XMFLOAT3( 0.0493873f, 0.9111561f, 0.0185991f), + XMFLOAT3( 0.0477786f, 0.7439046f, 0.0166160f), + XMFLOAT3( 0.0477786f, 0.7439046f, 0.0166160f), + XMFLOAT3( 0.0477786f, 0.7439046f, 0.0166160f), + XMFLOAT3(-0.0377652f, 0.9123309f, -0.0626242f), + XMFLOAT3(-0.0377652f, 0.9123309f, -0.0626242f), + XMFLOAT3(-0.0377652f, 0.9123309f, -0.0626242f), + XMFLOAT3(-0.0361565f, 0.7450795f, -0.0646072f), + XMFLOAT3(-0.0361565f, 0.7450795f, -0.0646072f), + XMFLOAT3(-0.0361565f, 0.7450795f, -0.0646072f), + XMFLOAT3(-0.0377652f, 0.9111561f, 0.0185991f), + XMFLOAT3(-0.0377652f, 0.9111561f, 0.0185991f), + XMFLOAT3(-0.0377652f, 0.9111561f, 0.0185991f), + XMFLOAT3(-0.0361565f, 0.7439046f, 0.0166160f), + XMFLOAT3(-0.0361565f, 0.7439046f, 0.0166160f), + XMFLOAT3(-0.0361565f, 0.7439046f, 0.0166160f), + XMFLOAT3(-0.2091001f, 0.9502789f, 0.0316646f), + XMFLOAT3(-0.2091001f, 0.9502789f, 0.0316646f), + XMFLOAT3(-0.2091001f, 1.0520915f, 0.0316646f), + XMFLOAT3(-0.2091001f, 1.0520915f, 0.0316646f), + XMFLOAT3(-0.2091001f, 1.0520915f, -0.0316648f), + XMFLOAT3(-0.2091001f, 1.0520915f, -0.0316648f), + XMFLOAT3(-0.2091001f, 0.9502789f, -0.0316648f), + XMFLOAT3(-0.2091001f, 0.9502789f, -0.0316648f), + XMFLOAT3(-0.2724295f, 1.0520915f, -0.0316648f), + XMFLOAT3(-0.2724295f, 1.0520915f, -0.0316648f), + XMFLOAT3(-0.2724295f, 0.9502789f, -0.0316648f), + XMFLOAT3(-0.2724295f, 0.9502789f, -0.0316648f), + XMFLOAT3(-0.2724295f, 1.0520915f, 0.0316646f), + XMFLOAT3(-0.2724295f, 1.0520915f, 0.0316646f), + XMFLOAT3(-0.2724295f, 0.9502789f, 0.0316646f), + XMFLOAT3(-0.2724295f, 0.9502789f, 0.0316646f), + XMFLOAT3(-0.2107262f, 0.6538162f, 0.0316646f), + XMFLOAT3(-0.2107262f, 0.6538162f, 0.0316646f), + XMFLOAT3(-0.2107262f, 0.7538054f, 0.0316646f), + XMFLOAT3(-0.2107262f, 0.7538054f, 0.0316646f), + XMFLOAT3(-0.2107262f, 0.7538053f, -0.0316648f), + XMFLOAT3(-0.2107262f, 0.7538053f, -0.0316648f), + XMFLOAT3(-0.2107262f, 0.6538161f, -0.0316648f), + XMFLOAT3(-0.2107262f, 0.6538161f, -0.0316648f), + XMFLOAT3(-0.2740557f, 0.7538053f, -0.0316648f), + XMFLOAT3(-0.2740557f, 0.7538053f, -0.0316648f), + XMFLOAT3(-0.2740557f, 0.6538161f, -0.0316648f), + XMFLOAT3(-0.2740557f, 0.6538161f, -0.0316648f), + XMFLOAT3(-0.2740557f, 0.7538053f, 0.0316646f), + XMFLOAT3(-0.2740557f, 0.7538053f, 0.0316646f), + XMFLOAT3(-0.2740557f, 0.6538161f, 0.0316646f), + XMFLOAT3(-0.2740557f, 0.6538161f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.6538162f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.6538162f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.7538054f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.7538054f, 0.0316646f), + XMFLOAT3( 0.2107262f, 0.7538053f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.7538053f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.6538161f, -0.0316648f), + XMFLOAT3( 0.2107262f, 0.6538161f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.7538053f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.7538053f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.6538161f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.6538161f, -0.0316648f), + XMFLOAT3( 0.2740557f, 0.7538053f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.7538053f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.6538161f, 0.0316646f), + XMFLOAT3( 0.2740557f, 0.6538161f, 0.0316646f), + XMFLOAT3( 0.2091001f, 0.9502789f, 0.0316646f), + XMFLOAT3( 0.2091001f, 0.9502789f, 0.0316646f), + XMFLOAT3( 0.2091001f, 1.0520915f, 0.0316646f), + XMFLOAT3( 0.2091001f, 1.0520915f, 0.0316646f), + XMFLOAT3( 0.2091001f, 1.0520915f, -0.0316648f), + XMFLOAT3( 0.2091001f, 1.0520915f, -0.0316648f), + XMFLOAT3( 0.2091001f, 0.9502789f, -0.0316648f), + XMFLOAT3( 0.2091001f, 0.9502789f, -0.0316648f), + XMFLOAT3( 0.2724295f, 1.0520915f, -0.0316648f), + XMFLOAT3( 0.2724295f, 1.0520915f, -0.0316648f), + XMFLOAT3( 0.2724295f, 0.9502789f, -0.0316648f), + XMFLOAT3( 0.2724295f, 0.9502789f, -0.0316648f), + XMFLOAT3( 0.2724295f, 1.0520915f, 0.0316646f), + XMFLOAT3( 0.2724295f, 1.0520915f, 0.0316646f), + XMFLOAT3( 0.2724295f, 0.9502789f, 0.0316646f), + XMFLOAT3( 0.2724295f, 0.9502789f, 0.0316646f), +}; + +const XMFLOAT3 accessor_1[] = { + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0024000f, -0.9999971f), + XMFLOAT3( 1.0000000f, 0.0001000f, -0.0002000f), + XMFLOAT3(-0.0015000f, 0.9999987f, 0.0006000f), + XMFLOAT3( 0.0000000f, 0.0024000f, -0.9999971f), + XMFLOAT3( 0.0015000f, -0.9999988f, 0.0005000f), + XMFLOAT3( 1.0000000f, 0.0001000f, -0.0002000f), + XMFLOAT3( 0.0000000f, 0.0030000f, 0.9999956f), + XMFLOAT3( 1.0000000f, 0.0001000f, -0.0002000f), + XMFLOAT3(-0.0015000f, 0.9999987f, 0.0006000f), + XMFLOAT3( 0.0000000f, 0.0030000f, 0.9999956f), + XMFLOAT3( 0.0015000f, -0.9999988f, 0.0005000f), + XMFLOAT3( 1.0000000f, 0.0001000f, -0.0002000f), + XMFLOAT3( 0.0000000f, 0.0024000f, -0.9999971f), + XMFLOAT3(-0.0015000f, 0.9999987f, 0.0006000f), + XMFLOAT3(-1.0000000f, 0.0001000f, -0.0002000f), + XMFLOAT3( 0.0000000f, 0.0024000f, -0.9999971f), + XMFLOAT3( 0.0015000f, -0.9999988f, 0.0005000f), + XMFLOAT3(-1.0000000f, 0.0001000f, -0.0002000f), + XMFLOAT3( 0.0000000f, 0.0030000f, 0.9999956f), + XMFLOAT3(-0.0015000f, 0.9999987f, 0.0006000f), + XMFLOAT3(-1.0000000f, 0.0001000f, -0.0002000f), + XMFLOAT3( 0.0000000f, 0.0030000f, 0.9999956f), + XMFLOAT3( 0.0015000f, -0.9999988f, 0.0005000f), + XMFLOAT3(-1.0000000f, 0.0001000f, -0.0002000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 0.9997536f, 0.0221990f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -0.9997536f, -0.0221990f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 0.9997536f, 0.0221990f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -0.9997536f, -0.0221990f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 0.9997536f, 0.0221990f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -0.9997536f, -0.0221990f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 0.9997536f, 0.0221990f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -0.9997536f, -0.0221990f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 0.9997536f, 0.0221990f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -0.9997536f, -0.0221990f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 0.9997536f, 0.0221990f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -0.9997536f, -0.0221990f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 0.9997536f, 0.0221990f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -0.9997536f, -0.0221990f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 0.9997536f, 0.0221990f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -0.9997536f, -0.0221990f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.9995255f, 0.0308008f), + XMFLOAT3( 0.0000000f, -0.0139000f, -0.9999034f), + XMFLOAT3( 0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, -0.0139000f, -0.9999034f), + XMFLOAT3( 0.0000000f, -0.9995255f, -0.0308008f), + XMFLOAT3( 0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, 0.9995255f, 0.0308008f), + XMFLOAT3( 0.0000000f, -0.0312004f, 0.9995131f), + XMFLOAT3( 0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, -0.0312004f, 0.9995131f), + XMFLOAT3( 0.0000000f, -0.9995255f, -0.0308008f), + XMFLOAT3( 0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, 0.9995255f, 0.0308008f), + XMFLOAT3( 0.0000000f, -0.0139000f, -0.9999034f), + XMFLOAT3(-0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, -0.0139000f, -0.9999034f), + XMFLOAT3( 0.0000000f, -0.9995255f, -0.0308008f), + XMFLOAT3(-0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, 0.9995255f, 0.0308008f), + XMFLOAT3( 0.0000000f, -0.0312004f, 0.9995131f), + XMFLOAT3(-0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, -0.0312004f, 0.9995131f), + XMFLOAT3( 0.0000000f, -0.9995255f, -0.0308008f), + XMFLOAT3(-0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, 0.9995255f, 0.0308008f), + XMFLOAT3( 0.0000000f, -0.0139000f, -0.9999034f), + XMFLOAT3(-0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, -0.0139000f, -0.9999034f), + XMFLOAT3( 0.0000000f, -0.9995255f, -0.0308008f), + XMFLOAT3(-0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, 0.9995255f, 0.0308008f), + XMFLOAT3( 0.0000000f, -0.0312004f, 0.9995131f), + XMFLOAT3(-0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, -0.0312004f, 0.9995131f), + XMFLOAT3( 0.0000000f, -0.9995255f, -0.0308008f), + XMFLOAT3(-0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, 0.9995255f, 0.0308008f), + XMFLOAT3( 0.0000000f, -0.0139000f, -0.9999034f), + XMFLOAT3( 0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, -0.0139000f, -0.9999034f), + XMFLOAT3( 0.0000000f, -0.9995255f, -0.0308008f), + XMFLOAT3( 0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, 0.9995255f, 0.0308008f), + XMFLOAT3( 0.0000000f, -0.0312004f, 0.9995131f), + XMFLOAT3( 0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, -0.0312004f, 0.9995131f), + XMFLOAT3( 0.0000000f, -0.9995255f, -0.0308008f), + XMFLOAT3( 0.9999303f, -0.0118004f, -0.0004000f), + XMFLOAT3( 0.0000000f, 0.0052999f, -0.9999860f), + XMFLOAT3( 0.0000000f, -0.1698935f, -0.9854625f), + XMFLOAT3( 0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0052999f, -0.9999860f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3( 0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 0.9194086f, 0.3933037f), + XMFLOAT3( 0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3( 0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.0052999f, -0.9999860f), + XMFLOAT3( 0.0000000f, -0.1698935f, -0.9854625f), + XMFLOAT3(-0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3(-0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0052999f, -0.9999860f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3(-0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 0.9194086f, 0.3933037f), + XMFLOAT3(-0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3(-0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3(-0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3(-0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.9194086f, 0.3933037f), + XMFLOAT3( 0.0000000f, -0.1298052f, 0.9915395f), + XMFLOAT3( 0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.9194086f, 0.3933037f), + XMFLOAT3( 0.0000000f, -0.1298052f, 0.9915395f), + XMFLOAT3(-0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3(-0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3(-0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3( 0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.9999974f, 0.0023000f), + XMFLOAT3( 0.0000000f, -0.1698935f, -0.9854625f), + XMFLOAT3( 0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.0000000f, 0.9999974f, 0.0023000f), + XMFLOAT3( 0.0000000f, -0.1698935f, -0.9854625f), + XMFLOAT3(-0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.0000000f, 0.9999974f, 0.0023000f), + XMFLOAT3( 0.0000000f, -0.1298052f, 0.9915395f), + XMFLOAT3( 0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.0000000f, 0.9999974f, 0.0023000f), + XMFLOAT3( 0.0000000f, -0.1298052f, 0.9915395f), + XMFLOAT3(-0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.0000000f, 0.0052999f, -0.9999860f), + XMFLOAT3( 0.0000000f, -0.1698935f, -0.9854625f), + XMFLOAT3(-0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3(-0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0052999f, -0.9999860f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3(-0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 0.9194086f, 0.3933037f), + XMFLOAT3(-0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3(-0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.0052999f, -0.9999860f), + XMFLOAT3( 0.0000000f, -0.1698935f, -0.9854625f), + XMFLOAT3( 0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0052999f, -0.9999860f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3( 0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 0.9194086f, 0.3933037f), + XMFLOAT3( 0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3( 0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3( 0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.9194086f, 0.3933037f), + XMFLOAT3( 0.0000000f, -0.1298052f, 0.9915395f), + XMFLOAT3(-0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3(-0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3(-0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.9194086f, 0.3933037f), + XMFLOAT3( 0.0000000f, -0.1298052f, 0.9915395f), + XMFLOAT3( 0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3( 0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, -0.9999974f, -0.0023000f), + XMFLOAT3(-0.9991343f, -0.0416014f, -0.0001000f), + XMFLOAT3(-0.9995378f, -0.0265010f, 0.0149006f), + XMFLOAT3( 0.0000000f, 0.9999974f, 0.0023000f), + XMFLOAT3( 0.0000000f, -0.1698935f, -0.9854625f), + XMFLOAT3(-0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.0000000f, 0.9999974f, 0.0023000f), + XMFLOAT3( 0.0000000f, -0.1698935f, -0.9854625f), + XMFLOAT3( 0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.0000000f, 0.9999974f, 0.0023000f), + XMFLOAT3( 0.0000000f, -0.1298052f, 0.9915395f), + XMFLOAT3(-0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.0000000f, 0.9999974f, 0.0023000f), + XMFLOAT3( 0.0000000f, -0.1298052f, 0.9915395f), + XMFLOAT3( 0.9917093f, -0.1285012f, -0.0003000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, -1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 0.0000000f, 1.0000000f, 0.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0036000f, -0.9999936f), + XMFLOAT3( 1.0000000f, 0.0001000f, 0.0000000f), + XMFLOAT3(-0.0002000f, 0.9999997f, 0.0008000f), + XMFLOAT3( 0.0000000f, 0.0036000f, -0.9999936f), + XMFLOAT3( 1.0000000f, 0.0001000f, 0.0000000f), + XMFLOAT3(-0.0005000f, -0.9999998f, 0.0006000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0001000f, 0.0000000f), + XMFLOAT3(-0.0002000f, 0.9999997f, 0.0008000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0001000f, 0.0000000f), + XMFLOAT3(-0.0005000f, -0.9999998f, 0.0006000f), + XMFLOAT3( 0.0000000f, 0.0036000f, -0.9999936f), + XMFLOAT3(-0.0002000f, 0.9999997f, 0.0008000f), + XMFLOAT3(-1.0000000f, 0.0003000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0036000f, -0.9999936f), + XMFLOAT3(-0.0005000f, -0.9999998f, 0.0006000f), + XMFLOAT3(-1.0000000f, 0.0003000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-0.0002000f, 0.9999997f, 0.0008000f), + XMFLOAT3(-1.0000000f, 0.0003000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-0.0005000f, -0.9999998f, 0.0006000f), + XMFLOAT3(-1.0000000f, 0.0003000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0119003f, -0.9999292f), + XMFLOAT3( 0.0000000f, 0.9998949f, 0.0144999f), + XMFLOAT3( 0.9999540f, -0.0095996f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0119003f, -0.9999292f), + XMFLOAT3( 0.0000000f, -0.9998949f, -0.0144999f), + XMFLOAT3( 0.9999540f, -0.0095996f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.9998949f, 0.0144999f), + XMFLOAT3( 0.0000000f, -0.0119003f, 0.9999292f), + XMFLOAT3( 0.9999540f, -0.0095996f, -0.0001000f), + XMFLOAT3( 0.0000000f, -0.0119003f, 0.9999292f), + XMFLOAT3( 0.0000000f, -0.9998949f, -0.0144999f), + XMFLOAT3( 0.9999540f, -0.0095996f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0119003f, -0.9999292f), + XMFLOAT3( 0.0000000f, 0.9998949f, 0.0144999f), + XMFLOAT3(-0.9999540f, -0.0095996f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0119003f, -0.9999292f), + XMFLOAT3( 0.0000000f, -0.9998949f, -0.0144999f), + XMFLOAT3(-0.9999540f, -0.0095996f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.9998949f, 0.0144999f), + XMFLOAT3( 0.0000000f, -0.0119003f, 0.9999292f), + XMFLOAT3(-0.9999540f, -0.0095996f, -0.0001000f), + XMFLOAT3( 0.0000000f, -0.0119003f, 0.9999292f), + XMFLOAT3( 0.0000000f, -0.9998949f, -0.0144999f), + XMFLOAT3(-0.9999540f, -0.0095996f, -0.0001000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3(-1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, -1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.0000000f, 0.0000000f), +}; + +const XMFLOAT2 accessor_2[] = { + XMFLOAT2( 0.7500000f, 0.0000000f), + XMFLOAT2( 0.7500000f, 0.2031250f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2031250f), + XMFLOAT2( 0.7500000f, 0.0000000f), + XMFLOAT2( 0.7500000f, 0.0468750f), + XMFLOAT2( 0.7500000f, 0.0000000f), + XMFLOAT2( 0.7500000f, 0.2500000f), + XMFLOAT2( 0.5478383f, 0.2441072f), + XMFLOAT2( 0.7500000f, 0.2500000f), + XMFLOAT2( 0.5000000f, 0.0000000f), + XMFLOAT2( 0.7031250f, 0.0468750f), + XMFLOAT2( 0.5000000f, 0.0000000f), + XMFLOAT2( 0.5000000f, 0.2500000f), + XMFLOAT2( 0.5478383f, 0.1873633f), + XMFLOAT2( 0.5000000f, 0.2500000f), + XMFLOAT2( 0.5156250f, 0.0000000f), + XMFLOAT2( 0.7500000f, 0.0000000f), + XMFLOAT2( 0.5000000f, 0.0000000f), + XMFLOAT2( 0.5156250f, 0.2500000f), + XMFLOAT2( 0.4910944f, 0.2441072f), + XMFLOAT2( 0.5000000f, 0.2500000f), + XMFLOAT2( 0.2500000f, 0.0000000f), + XMFLOAT2( 0.7031250f, 0.0000000f), + XMFLOAT2( 0.7500000f, 0.0000000f), + XMFLOAT2( 0.2500000f, 0.2500000f), + XMFLOAT2( 0.4910944f, 0.1873633f), + XMFLOAT2( 0.7500000f, 0.2500000f), + XMFLOAT2( 0.4375000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.3125000f), + XMFLOAT2( 0.4375000f, 0.5000000f), + XMFLOAT2( 0.8124686f, 0.3124686f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.8125000f, 0.4062500f), + XMFLOAT2( 0.4375000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.8125000f, 0.5000000f), + XMFLOAT2( 0.8125314f, 0.4999686f), + XMFLOAT2( 0.4375000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.4375000f, 0.3125000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.6249686f, 0.3125314f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.4062500f), + XMFLOAT2( 0.4375000f, 0.5000000f), + XMFLOAT2( 0.4375000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.6250314f, 0.5000314f), + XMFLOAT2( 0.4375000f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2(-0.0095541f, 0.5000000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2(-0.0095541f, -0.5191081f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2(-1.0286622f, 0.5000000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2(-1.0286622f, -0.5191081f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7958288f, 0.2513793f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2(-0.0095541f, 0.5000000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7958288f, 0.2620047f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2(-0.0095541f, -0.5191083f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7852035f, 0.2513793f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2(-1.0286624f, 0.5000000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7852035f, 0.2620047f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2(-1.0286624f, -0.5191083f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7738063f, 0.1937098f), + XMFLOAT2( 0.7738063f, 0.1937098f), + XMFLOAT2( 0.7738063f, 0.1937098f), + XMFLOAT2( 0.7744191f, 0.1960735f), + XMFLOAT2( 0.7744191f, 0.1960735f), + XMFLOAT2( 0.7744191f, 0.1960735f), + XMFLOAT2( 0.7752946f, 0.1945853f), + XMFLOAT2( 0.7752946f, 0.1945853f), + XMFLOAT2( 0.7752946f, 0.1945853f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7723181f, 0.1928343f), + XMFLOAT2( 0.7746819f, 0.1922215f), + XMFLOAT2( 0.7723181f, 0.1928343f), + XMFLOAT2( 0.7759073f, 0.1969490f), + XMFLOAT2( 0.7735436f, 0.1975617f), + XMFLOAT2( 0.7699544f, 0.1934471f), + XMFLOAT2( 0.7767828f, 0.1954608f), + XMFLOAT2( 0.7761701f, 0.1930971f), + XMFLOAT2( 0.7708299f, 0.1919588f), + XMFLOAT2( 0.7754260f, 0.1926593f), + XMFLOAT2( 0.7715740f, 0.1923965f), + XMFLOAT2( 0.7715740f, 0.1923965f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7727995f, 0.1971240f), + XMFLOAT2( 0.7727995f, 0.1971240f), + XMFLOAT2( 0.7706985f, 0.1938848f), + XMFLOAT2( 0.7706985f, 0.1938848f), + XMFLOAT2( 0.7706985f, 0.1938848f), + XMFLOAT2( 0.7745505f, 0.1941475f), + XMFLOAT2( 0.7745505f, 0.1941475f), + XMFLOAT2( 0.7745505f, 0.1941475f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7720553f, 0.1966863f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7727995f, 0.1971240f), + XMFLOAT2( 0.7727995f, 0.1971240f), + XMFLOAT2( 0.7706985f, 0.1938848f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7738063f, 0.1937098f), + XMFLOAT2( 0.7738063f, 0.1937098f), + XMFLOAT2( 0.7738063f, 0.1937098f), + XMFLOAT2( 0.7744191f, 0.1960735f), + XMFLOAT2( 0.7744191f, 0.1960735f), + XMFLOAT2( 0.7744191f, 0.1960735f), + XMFLOAT2( 0.7752946f, 0.1945853f), + XMFLOAT2( 0.7752946f, 0.1945853f), + XMFLOAT2( 0.7752946f, 0.1945853f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7723181f, 0.1928343f), + XMFLOAT2( 0.7746819f, 0.1922215f), + XMFLOAT2( 0.7723181f, 0.1928343f), + XMFLOAT2( 0.7759073f, 0.1969490f), + XMFLOAT2( 0.7735436f, 0.1975617f), + XMFLOAT2( 0.7699544f, 0.1934471f), + XMFLOAT2( 0.7767828f, 0.1954608f), + XMFLOAT2( 0.7761701f, 0.1930971f), + XMFLOAT2( 0.7708299f, 0.1919588f), + XMFLOAT2( 0.7754260f, 0.1926593f), + XMFLOAT2( 0.7715740f, 0.1923965f), + XMFLOAT2( 0.7715740f, 0.1923965f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7727995f, 0.1971240f), + XMFLOAT2( 0.7727995f, 0.1971240f), + XMFLOAT2( 0.7706985f, 0.1938848f), + XMFLOAT2( 0.7706985f, 0.1938848f), + XMFLOAT2( 0.7706985f, 0.1938848f), + XMFLOAT2( 0.7745505f, 0.1941475f), + XMFLOAT2( 0.7745505f, 0.1941475f), + XMFLOAT2( 0.7745505f, 0.1941475f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7729309f, 0.1951980f), + XMFLOAT2( 0.7720553f, 0.1966863f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7714426f, 0.1943225f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7736750f, 0.1956358f), + XMFLOAT2( 0.7727995f, 0.1971240f), + XMFLOAT2( 0.7727995f, 0.1971240f), + XMFLOAT2( 0.7706985f, 0.1938848f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.0000000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.0000000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2031250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2031250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7968750f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.8125000f, 0.2656250f), + XMFLOAT2( 0.7968750f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.8125000f, 0.1250000f), + XMFLOAT2( 0.7812500f, 0.2656250f), + XMFLOAT2( 0.7812500f, 0.1250000f), + XMFLOAT2( 0.4375000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.3125000f), + XMFLOAT2( 0.4375000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.8125000f, 0.5000000f), + XMFLOAT2( 0.4375000f, 0.2500000f), + XMFLOAT2( 0.4375000f, 0.2500000f), + XMFLOAT2( 0.8125000f, 0.5000000f), + XMFLOAT2( 0.4375000f, 0.5000000f), + XMFLOAT2( 0.4375000f, 0.5000000f), + XMFLOAT2( 0.8125000f, 0.3125000f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.3125000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.2500000f), + XMFLOAT2( 0.2500000f, 0.2500000f), + XMFLOAT2( 0.6250000f, 0.5000000f), + XMFLOAT2( 0.4375000f, 0.5000000f), + XMFLOAT2( 0.2500000f, 0.5000000f), + XMFLOAT2( 0.6250000f, 0.3125000f), + XMFLOAT2( 0.4375000f, 0.2500000f), + XMFLOAT2( 0.6951088f, 0.4779186f), + XMFLOAT2( 0.6998297f, 0.4787053f), + XMFLOAT2( 0.6974693f, 0.4779186f), + XMFLOAT2( 0.6951088f, 0.4810658f), + XMFLOAT2( 0.6998297f, 0.4810658f), + XMFLOAT2( 0.6974693f, 0.4810658f), + XMFLOAT2( 0.6998297f, 0.4810658f), + XMFLOAT2( 0.6951088f, 0.4779186f), + XMFLOAT2( 0.6951088f, 0.4779186f), + XMFLOAT2( 0.6951088f, 0.4810658f), + XMFLOAT2( 0.6998297f, 0.4787053f), + XMFLOAT2( 0.6951088f, 0.4810658f), + XMFLOAT2( 0.6974693f, 0.4779186f), + XMFLOAT2( 0.6974693f, 0.4787053f), + XMFLOAT2( 0.6974693f, 0.4810658f), + XMFLOAT2( 0.6974693f, 0.4810658f), + XMFLOAT2( 0.6974693f, 0.4810658f), + XMFLOAT2( 0.6974693f, 0.4779186f), + XMFLOAT2( 0.6974693f, 0.4810658f), + XMFLOAT2( 0.6927484f, 0.4779186f), + XMFLOAT2( 0.6951088f, 0.4810658f), + XMFLOAT2( 0.6927484f, 0.4810658f), + XMFLOAT2( 0.6974693f, 0.4787053f), + XMFLOAT2( 0.6951088f, 0.4779186f), + XMFLOAT2( 0.7812500f, 0.0833334f), + XMFLOAT2( 0.8125000f, 0.0833334f), + XMFLOAT2( 0.7812500f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0416667f), + XMFLOAT2( 0.7812500f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0833334f), + XMFLOAT2( 0.7812500f, 0.0833334f), + XMFLOAT2( 0.7812500f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0416667f), + XMFLOAT2( 0.7812500f, 0.0833334f), + XMFLOAT2( 0.8125000f, 0.0833334f), + XMFLOAT2( 0.8125000f, 0.0416667f), + XMFLOAT2( 0.7812500f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0833334f), + XMFLOAT2( 0.7812500f, 0.0833334f), + XMFLOAT2( 0.8125000f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.8125000f, 0.1718750f), + XMFLOAT2( 0.7812500f, 0.1718750f), + XMFLOAT2( 0.7812500f, 0.1718750f), + XMFLOAT2( 0.8125000f, 0.1718750f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.8125000f, 0.2187500f), + XMFLOAT2( 0.8125000f, 0.1718750f), + XMFLOAT2( 0.7812500f, 0.1718750f), + XMFLOAT2( 0.8125000f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.1718750f), + XMFLOAT2( 0.8125000f, 0.1718750f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.8125000f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.8125000f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.1718750f), + XMFLOAT2( 0.8125000f, 0.1718750f), + XMFLOAT2( 0.8125000f, 0.1718750f), + XMFLOAT2( 0.7812500f, 0.1718750f), + XMFLOAT2( 0.8125000f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.1718750f), + XMFLOAT2( 0.8125000f, 0.1718750f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.8125000f, 0.2187500f), + XMFLOAT2( 0.8125000f, 0.1718750f), + XMFLOAT2( 0.7812500f, 0.1718750f), + XMFLOAT2( 0.8125000f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.2187500f), + XMFLOAT2( 0.7812500f, 0.0833334f), + XMFLOAT2( 0.8125000f, 0.0833334f), + XMFLOAT2( 0.7812500f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0416667f), + XMFLOAT2( 0.7812500f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0833334f), + XMFLOAT2( 0.7812500f, 0.0833334f), + XMFLOAT2( 0.7812500f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0416667f), + XMFLOAT2( 0.7812500f, 0.0833334f), + XMFLOAT2( 0.8125000f, 0.0833334f), + XMFLOAT2( 0.8125000f, 0.0416667f), + XMFLOAT2( 0.7812500f, 0.0416667f), + XMFLOAT2( 0.8125000f, 0.0833334f), + XMFLOAT2( 0.7812500f, 0.0833334f), +}; + +const XMFLOAT4 accessor_3[] = { + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), + XMFLOAT4(255.0000000f, 255.0000000f, 255.0000000f, 255.0000000f), +}; + +const XMFLOAT4 accessor_4[] = { + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65347.0000000f, 65333.0000000f, 65385.0000000f, 65535.0000000f), + XMFLOAT4(65347.0000000f, 65333.0000000f, 65385.0000000f, 65535.0000000f), + XMFLOAT4(65347.0000000f, 65333.0000000f, 65385.0000000f, 65535.0000000f), + XMFLOAT4(48822.0000000f, 48754.0000000f, 53781.0000000f, 65535.0000000f), + XMFLOAT4(48822.0000000f, 48754.0000000f, 53781.0000000f, 65535.0000000f), + XMFLOAT4(48822.0000000f, 48754.0000000f, 53781.0000000f, 65535.0000000f), + XMFLOAT4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), + XMFLOAT4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), + XMFLOAT4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), + XMFLOAT4(62584.0000000f, 62561.0000000f, 63372.0000000f, 65535.0000000f), + XMFLOAT4(62584.0000000f, 62561.0000000f, 63372.0000000f, 65535.0000000f), + XMFLOAT4(62584.0000000f, 62561.0000000f, 63372.0000000f, 65535.0000000f), + XMFLOAT4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), + XMFLOAT4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), + XMFLOAT4(10.0000000f, 65532.0000000f, 65530.0000000f, 65535.0000000f), + XMFLOAT4( 4.0000000f, 65526.0000000f, 65526.0000000f, 65535.0000000f), + XMFLOAT4( 4.0000000f, 65526.0000000f, 65526.0000000f, 65535.0000000f), + XMFLOAT4( 4.0000000f, 65526.0000000f, 65526.0000000f, 65535.0000000f), + XMFLOAT4(65531.0000000f, 65517.0000000f, 65519.0000000f, 65535.0000000f), + XMFLOAT4(65531.0000000f, 65517.0000000f, 65519.0000000f, 65535.0000000f), + XMFLOAT4(65531.0000000f, 65517.0000000f, 65519.0000000f, 65535.0000000f), + XMFLOAT4(64996.0000000f, 64981.0000000f, 65128.0000000f, 65535.0000000f), + XMFLOAT4(64996.0000000f, 64981.0000000f, 65128.0000000f, 65535.0000000f), + XMFLOAT4(64996.0000000f, 64981.0000000f, 65128.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), +}; + +const XMFLOAT4 accessor_5[] = { + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), + XMFLOAT4(65535.0000000f, 65535.0000000f, 65535.0000000f, 65535.0000000f), +}; + +const XMFLOAT4 accessor_6[] = { + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 8.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(25.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(35.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(36.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(26.0000000f, 36.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(29.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(30.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 2.0000000f, 30.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 20.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 20.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 20.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 16.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 11.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 11.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 11.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 5.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 21.0000000f, 17.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(24.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(22.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 12.0000000f, 6.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(15.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(13.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 4.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 3.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 11.0000000f, 5.0000000f, 10.0000000f), + XMFLOAT4(12.0000000f, 11.0000000f, 5.0000000f, 10.0000000f), + XMFLOAT4(11.0000000f, 10.0000000f, 5.0000000f, 0.0000000f), + XMFLOAT4(11.0000000f, 10.0000000f, 5.0000000f, 0.0000000f), + XMFLOAT4(11.0000000f, 10.0000000f, 5.0000000f, 0.0000000f), + XMFLOAT4(11.0000000f, 10.0000000f, 5.0000000f, 0.0000000f), + XMFLOAT4(11.0000000f, 12.0000000f, 5.0000000f, 10.0000000f), + XMFLOAT4(11.0000000f, 12.0000000f, 5.0000000f, 10.0000000f), + XMFLOAT4(10.0000000f, 11.0000000f, 5.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 11.0000000f, 5.0000000f, 0.0000000f), + XMFLOAT4(11.0000000f, 12.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(11.0000000f, 12.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 11.0000000f, 5.0000000f, 0.0000000f), + XMFLOAT4(10.0000000f, 11.0000000f, 5.0000000f, 0.0000000f), + XMFLOAT4(12.0000000f, 11.0000000f, 5.0000000f, 10.0000000f), + XMFLOAT4(12.0000000f, 11.0000000f, 5.0000000f, 10.0000000f), + XMFLOAT4(14.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(14.0000000f, 6.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(23.0000000f, 17.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 20.0000000f, 16.0000000f, 19.0000000f), + XMFLOAT4(21.0000000f, 20.0000000f, 16.0000000f, 19.0000000f), + XMFLOAT4(20.0000000f, 19.0000000f, 16.0000000f, 0.0000000f), + XMFLOAT4(20.0000000f, 19.0000000f, 16.0000000f, 0.0000000f), + XMFLOAT4(20.0000000f, 19.0000000f, 16.0000000f, 0.0000000f), + XMFLOAT4(20.0000000f, 19.0000000f, 16.0000000f, 0.0000000f), + XMFLOAT4(20.0000000f, 21.0000000f, 16.0000000f, 19.0000000f), + XMFLOAT4(20.0000000f, 21.0000000f, 16.0000000f, 19.0000000f), + XMFLOAT4(19.0000000f, 20.0000000f, 16.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 20.0000000f, 16.0000000f, 0.0000000f), + XMFLOAT4(20.0000000f, 21.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(20.0000000f, 21.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 20.0000000f, 16.0000000f, 0.0000000f), + XMFLOAT4(19.0000000f, 20.0000000f, 16.0000000f, 0.0000000f), + XMFLOAT4(21.0000000f, 20.0000000f, 16.0000000f, 19.0000000f), + XMFLOAT4(21.0000000f, 20.0000000f, 16.0000000f, 19.0000000f), +}; + +const XMFLOAT4 accessor_7[] = { + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9696393f, 0.0303607f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9696393f, 0.0303607f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9696393f, 0.0303607f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9975708f, 0.0024292f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9975708f, 0.0024292f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9975708f, 0.0024292f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9827832f, 0.0172168f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9827832f, 0.0172168f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9827832f, 0.0172168f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997147f, 0.0002853f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997147f, 0.0002853f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997147f, 0.0002853f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9986997f, 0.0013003f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9986997f, 0.0013003f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9986997f, 0.0013003f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9971964f, 0.0028036f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9971964f, 0.0028036f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9971964f, 0.0028036f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9648562f, 0.0351438f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9648562f, 0.0351438f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9648562f, 0.0351438f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5998880f, 0.4001120f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5979702f, 0.4020298f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5999978f, 0.4000022f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.6000000f, 0.4000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9995227f, 0.0004773f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9995227f, 0.0004773f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9995227f, 0.0004773f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931508f, 0.0068492f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9997543f, 0.0002457f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9948146f, 0.0051854f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9903108f, 0.0096892f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9994707f, 0.0005293f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9994707f, 0.0005293f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9994707f, 0.0005293f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9992316f, 0.0007684f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), + XMFLOAT4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), + XMFLOAT4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), + XMFLOAT4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), + XMFLOAT4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9996318f, 0.0003682f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), + XMFLOAT4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), + XMFLOAT4( 0.9942689f, 0.0036694f, 0.0020617f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9635389f, 0.0364611f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9565650f, 0.0434350f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), + XMFLOAT4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), + XMFLOAT4( 0.9994737f, 0.0002891f, 0.0002372f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.8793960f, 0.1143886f, 0.0041458f, 0.0020696f), + XMFLOAT4( 0.8793960f, 0.1143886f, 0.0041458f, 0.0020696f), + XMFLOAT4( 0.6280711f, 0.3581006f, 0.0138284f, 0.0000000f), + XMFLOAT4( 0.6280711f, 0.3581006f, 0.0138284f, 0.0000000f), + XMFLOAT4( 0.7455971f, 0.1687225f, 0.0856804f, 0.0000000f), + XMFLOAT4( 0.7455971f, 0.1687225f, 0.0856804f, 0.0000000f), + XMFLOAT4( 0.7124659f, 0.2758425f, 0.0078146f, 0.0038770f), + XMFLOAT4( 0.7124659f, 0.2758425f, 0.0078146f, 0.0038770f), + XMFLOAT4( 0.7838106f, 0.1889184f, 0.0272710f, 0.0000000f), + XMFLOAT4( 0.7838106f, 0.1889184f, 0.0272710f, 0.0000000f), + XMFLOAT4( 0.5571451f, 0.4428549f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5571451f, 0.4428549f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.6358581f, 0.3572480f, 0.0068939f, 0.0000000f), + XMFLOAT4( 0.6358581f, 0.3572480f, 0.0068939f, 0.0000000f), + XMFLOAT4( 0.7581828f, 0.2399969f, 0.0012200f, 0.0006002f), + XMFLOAT4( 0.7581828f, 0.2399969f, 0.0012200f, 0.0006002f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9619964f, 0.0380036f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9619964f, 0.0380036f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9689394f, 0.0310606f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9689394f, 0.0310606f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931825f, 0.0068175f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931825f, 0.0068175f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9847520f, 0.0152480f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9847520f, 0.0152480f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9990904f, 0.0009096f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9990904f, 0.0009096f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9590796f, 0.0409204f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9590796f, 0.0409204f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9945605f, 0.0054395f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9945605f, 0.0054395f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 1.0000000f, 0.0000000f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9619964f, 0.0380036f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9619964f, 0.0380036f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9689394f, 0.0310606f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9689394f, 0.0310606f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931825f, 0.0068175f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9931825f, 0.0068175f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9847520f, 0.0152480f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9847520f, 0.0152480f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9990904f, 0.0009096f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9990904f, 0.0009096f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9590796f, 0.0409204f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9590796f, 0.0409204f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9945605f, 0.0054395f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.9945605f, 0.0054395f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.8793960f, 0.1143886f, 0.0041458f, 0.0020696f), + XMFLOAT4( 0.8793960f, 0.1143886f, 0.0041458f, 0.0020696f), + XMFLOAT4( 0.6280711f, 0.3581006f, 0.0138284f, 0.0000000f), + XMFLOAT4( 0.6280711f, 0.3581006f, 0.0138284f, 0.0000000f), + XMFLOAT4( 0.7455971f, 0.1687225f, 0.0856804f, 0.0000000f), + XMFLOAT4( 0.7455971f, 0.1687225f, 0.0856804f, 0.0000000f), + XMFLOAT4( 0.7124659f, 0.2758425f, 0.0078146f, 0.0038770f), + XMFLOAT4( 0.7124659f, 0.2758425f, 0.0078146f, 0.0038770f), + XMFLOAT4( 0.7838106f, 0.1889184f, 0.0272710f, 0.0000000f), + XMFLOAT4( 0.7838106f, 0.1889184f, 0.0272710f, 0.0000000f), + XMFLOAT4( 0.5571451f, 0.4428549f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.5571451f, 0.4428549f, 0.0000000f, 0.0000000f), + XMFLOAT4( 0.6358581f, 0.3572339f, 0.0069080f, 0.0000000f), + XMFLOAT4( 0.6358581f, 0.3572339f, 0.0069080f, 0.0000000f), + XMFLOAT4( 0.7581828f, 0.2399969f, 0.0012203f, 0.0005999f), + XMFLOAT4( 0.7581828f, 0.2399969f, 0.0012203f, 0.0005999f), +}; + +const int accessor_8[] = { + 0, + 1, + 3, + 0, + 3, + 2, + 5, + 17, + 23, + 5, + 23, + 11, + 13, + 10, + 22, + 13, + 22, + 25, + 27, + 24, + 18, + 27, + 18, + 21, + 20, + 8, + 14, + 20, + 14, + 26, + 9, + 6, + 12, + 9, + 12, + 15, + 19, + 16, + 4, + 19, + 4, + 7, + 30, + 41, + 47, + 30, + 47, + 36, + 37, + 34, + 46, + 37, + 46, + 49, + 51, + 48, + 42, + 51, + 42, + 45, + 44, + 32, + 38, + 44, + 38, + 50, + 33, + 29, + 35, + 33, + 35, + 39, + 43, + 40, + 28, + 43, + 28, + 31, + 53, + 65, + 71, + 53, + 71, + 59, + 61, + 58, + 70, + 61, + 70, + 73, + 75, + 72, + 66, + 75, + 66, + 69, + 68, + 56, + 62, + 68, + 62, + 74, + 57, + 54, + 60, + 57, + 60, + 63, + 67, + 64, + 52, + 67, + 52, + 55, + 77, + 83, + 95, + 77, + 95, + 89, + 85, + 97, + 94, + 85, + 94, + 82, + 99, + 93, + 90, + 99, + 90, + 96, + 92, + 98, + 86, + 92, + 86, + 80, + 81, + 87, + 84, + 81, + 84, + 78, + 91, + 79, + 76, + 91, + 76, + 88, + 100, + 112, + 118, + 100, + 118, + 106, + 109, + 107, + 119, + 109, + 119, + 121, + 123, + 120, + 114, + 123, + 114, + 117, + 116, + 104, + 110, + 116, + 110, + 122, + 105, + 102, + 108, + 105, + 108, + 111, + 115, + 113, + 101, + 115, + 101, + 103, + 124, + 130, + 142, + 124, + 142, + 136, + 133, + 145, + 143, + 133, + 143, + 131, + 147, + 141, + 138, + 147, + 138, + 144, + 140, + 146, + 134, + 140, + 134, + 128, + 129, + 135, + 132, + 129, + 132, + 126, + 139, + 127, + 125, + 139, + 125, + 137, + 177, + 182, + 169, + 177, + 169, + 156, + 158, + 155, + 168, + 158, + 168, + 171, + 175, + 185, + 164, + 175, + 164, + 167, + 174, + 187, + 159, + 174, + 159, + 172, + 189, + 181, + 157, + 189, + 157, + 160, + 165, + 161, + 148, + 165, + 148, + 152, + 154, + 151, + 180, + 154, + 180, + 188, + 166, + 153, + 187, + 166, + 187, + 174, + 173, + 170, + 186, + 173, + 186, + 176, + 149, + 162, + 194, + 149, + 194, + 191, + 190, + 193, + 199, + 190, + 199, + 196, + 183, + 178, + 197, + 183, + 197, + 200, + 179, + 150, + 192, + 179, + 192, + 198, + 163, + 184, + 201, + 163, + 201, + 195, + 231, + 210, + 223, + 231, + 223, + 236, + 212, + 225, + 222, + 212, + 222, + 209, + 229, + 221, + 218, + 229, + 218, + 239, + 228, + 226, + 213, + 228, + 213, + 241, + 243, + 214, + 211, + 243, + 211, + 235, + 219, + 206, + 202, + 219, + 202, + 215, + 208, + 242, + 234, + 208, + 234, + 205, + 220, + 228, + 241, + 220, + 241, + 207, + 227, + 230, + 240, + 227, + 240, + 224, + 203, + 245, + 248, + 203, + 248, + 216, + 244, + 250, + 253, + 244, + 253, + 247, + 237, + 254, + 251, + 237, + 251, + 232, + 233, + 252, + 246, + 233, + 246, + 204, + 217, + 249, + 255, + 217, + 255, + 238, + 451, + 261, + 267, + 451, + 267, + 453, + 452, + 265, + 277, + 452, + 277, + 456, + 457, + 279, + 273, + 457, + 273, + 461, + 460, + 271, + 259, + 460, + 259, + 450, + 263, + 275, + 269, + 263, + 269, + 257, + 278, + 266, + 260, + 278, + 260, + 272, + 403, + 405, + 291, + 403, + 291, + 285, + 404, + 408, + 301, + 404, + 301, + 289, + 409, + 413, + 297, + 409, + 297, + 303, + 412, + 402, + 283, + 412, + 283, + 295, + 287, + 281, + 293, + 287, + 293, + 299, + 302, + 296, + 284, + 302, + 284, + 290, + 435, + 309, + 315, + 435, + 315, + 437, + 436, + 313, + 325, + 436, + 325, + 440, + 441, + 327, + 321, + 441, + 321, + 445, + 444, + 319, + 307, + 444, + 307, + 434, + 311, + 323, + 317, + 311, + 317, + 305, + 326, + 314, + 308, + 326, + 308, + 320, + 419, + 421, + 339, + 419, + 339, + 333, + 420, + 424, + 349, + 420, + 349, + 337, + 425, + 429, + 345, + 425, + 345, + 351, + 428, + 418, + 331, + 428, + 331, + 343, + 335, + 329, + 341, + 335, + 341, + 347, + 350, + 344, + 332, + 350, + 332, + 338, + 354, + 365, + 371, + 354, + 371, + 360, + 361, + 358, + 370, + 361, + 370, + 373, + 375, + 372, + 366, + 375, + 366, + 369, + 368, + 357, + 363, + 368, + 363, + 374, + 356, + 353, + 359, + 356, + 359, + 362, + 367, + 364, + 352, + 367, + 352, + 355, + 377, + 389, + 394, + 377, + 394, + 382, + 385, + 383, + 395, + 385, + 395, + 397, + 399, + 396, + 390, + 399, + 390, + 393, + 392, + 380, + 386, + 392, + 386, + 398, + 381, + 378, + 384, + 381, + 384, + 387, + 391, + 388, + 376, + 391, + 376, + 379, + 292, + 280, + 400, + 292, + 400, + 414, + 414, + 400, + 402, + 414, + 402, + 412, + 300, + 294, + 415, + 300, + 415, + 411, + 411, + 415, + 413, + 411, + 413, + 409, + 286, + 298, + 410, + 286, + 410, + 406, + 406, + 410, + 408, + 406, + 408, + 404, + 282, + 288, + 407, + 282, + 407, + 401, + 401, + 407, + 405, + 401, + 405, + 403, + 340, + 328, + 416, + 340, + 416, + 430, + 430, + 416, + 418, + 430, + 418, + 428, + 348, + 342, + 431, + 348, + 431, + 427, + 427, + 431, + 429, + 427, + 429, + 425, + 334, + 346, + 426, + 334, + 426, + 422, + 422, + 426, + 424, + 422, + 424, + 420, + 330, + 336, + 423, + 330, + 423, + 417, + 417, + 423, + 421, + 417, + 421, + 419, + 316, + 446, + 432, + 316, + 432, + 304, + 446, + 444, + 434, + 446, + 434, + 432, + 324, + 443, + 447, + 324, + 447, + 318, + 443, + 441, + 445, + 443, + 445, + 447, + 310, + 438, + 442, + 310, + 442, + 322, + 438, + 436, + 440, + 438, + 440, + 442, + 306, + 433, + 439, + 306, + 439, + 312, + 433, + 435, + 437, + 433, + 437, + 439, + 268, + 462, + 448, + 268, + 448, + 256, + 462, + 460, + 450, + 462, + 450, + 448, + 276, + 459, + 463, + 276, + 463, + 270, + 459, + 457, + 461, + 459, + 461, + 463, + 262, + 454, + 458, + 262, + 458, + 274, + 454, + 452, + 456, + 454, + 456, + 458, + 258, + 449, + 455, + 258, + 455, + 264, + 449, + 451, + 453, + 449, + 453, + 455, +}; + +const XMMATRIX accessor_9[] = { + XMMATRIX( 1.0000000f, -0.0000000f, -0.0000000f, -0.0000000f, + -0.0000000f, 1.0000000f, 0.0000001f, 0.0000000f, + 0.0000000f, -0.0000001f, 1.0000000f, -0.0000000f, + 0.0020865f, -0.6493472f, 0.0044682f, 1.0000000f), + XMMATRIX( 0.9999925f, 0.0038494f, 0.0002189f, -0.0000000f, + 0.0038556f, -0.9983662f, -0.0570068f, 0.0000000f, + -0.0000005f, 0.0570072f, -0.9983711f, -0.0000000f, + 0.1011884f, 0.6297937f, 0.0216967f, 1.0000000f), + XMMATRIX( 0.9999585f, -0.0091070f, 0.0003901f, -0.0000000f, + -0.0091151f, -0.9993415f, 0.0351207f, 0.0000000f, + 0.0000704f, -0.0351225f, -0.9993804f, -0.0000000f, + 0.1061165f, 0.3790198f, -0.0133131f, 1.0000000f), + XMMATRIX( 1.0000000f, -0.0000000f, 0.0000000f, -0.0000000f, + -0.0000000f, 1.0000000f, -0.0000001f, 0.0000000f, + -0.0000000f, 0.0000001f, 1.0000000f, -0.0000000f, + 0.0020865f, -0.7840567f, 0.0044684f, 1.0000000f), + XMMATRIX( 0.9999975f, 0.0022573f, 0.0000000f, -0.0000000f, + -0.0022573f, 0.9999975f, -0.0000001f, 0.0000000f, + -0.0000000f, 0.0000001f, 1.0000000f, -0.0000000f, + 0.0041084f, -0.8957252f, 0.0044684f, 1.0000000f), + XMMATRIX( 0.9999950f, 0.0031200f, -0.0002015f, -0.0000000f, + 0.0031265f, -0.9978876f, 0.0648890f, 0.0000000f, + 0.0000006f, -0.0648893f, -0.9978877f, -0.0000000f, + 0.2384893f, 1.1499825f, -0.0747788f, 1.0000000f), + XMMATRIX( 1.0000000f, -0.0000001f, 0.0000014f, -0.0000000f, + 0.0000000f, -0.9978564f, -0.0654442f, 0.0000000f, + 0.0000007f, 0.0654436f, -0.9978516f, -0.0000000f, + 0.2411296f, 0.8435937f, 0.0352844f, 1.0000000f), + XMMATRIX( 1.0000000f, -0.0000000f, 0.0000014f, -0.0000000f, + -0.0000000f, -1.0000001f, -0.0000000f, 0.0000000f, + 0.0000007f, -0.0000003f, -0.9999953f, -0.0000000f, + 0.2411296f, 0.5391477f, 0.0000003f, 1.0000000f), + XMMATRIX( 1.0000001f, 0.0000000f, -0.0000000f, -0.0000000f, + 0.0000000f, 1.0000001f, 0.0000000f, -0.0000000f, + 0.0000000f, -0.0000000f, 1.0000000f, -0.0000000f, + -0.0000000f, -1.2420585f, -0.0000000f, 1.0000000f), + XMMATRIX( 1.0000001f, 0.0000000f, 0.0000000f, -0.0000000f, + -0.0000000f, 0.9582973f, 0.2857734f, 0.0000000f, + 0.0000000f, -0.2857733f, 0.9582972f, -0.0000000f, + -0.2488541f, -1.3979810f, -0.3699030f, 1.0000000f), + XMMATRIX(-1.0000001f, 0.0000001f, 0.0000000f, -0.0000000f, + -0.0000001f, -0.9963848f, -0.0849537f, 0.0000000f, + 0.0000007f, -0.0849559f, 0.9963719f, -0.0000000f, + -0.2411295f, 1.1474965f, 0.0978378f, 1.0000000f), + XMMATRIX(-1.0000001f, -0.0000000f, -0.0000001f, -0.0000000f, + -0.0000000f, -0.9980612f, -0.0622333f, 0.0000000f, + 0.0000006f, -0.0622358f, 0.9980485f, -0.0000000f, + -0.2411295f, 1.0469925f, 0.0740057f, 1.0000000f), + XMMATRIX(-1.0000001f, -0.0000001f, -0.0000862f, -0.0000000f, + 0.0000029f, -0.9994135f, -0.0342350f, 0.0000000f, + -0.0000855f, -0.0342378f, 0.9994007f, 0.0000000f, + -0.2411336f, 0.9460898f, 0.0474864f, 1.0000000f), + XMMATRIX(-0.9999998f, 0.0000001f, -0.0007542f, 0.0000000f, + -0.0000017f, -0.9999976f, 0.0020114f, 0.0000000f, + -0.0007535f, 0.0020081f, 0.9999847f, -0.0000000f, + -0.2411421f, 0.8438913f, 0.0167495f, 1.0000000f), + XMMATRIX(-1.0000001f, -0.0000001f, 0.0000955f, -0.0000000f, + 0.0000050f, -0.9986384f, 0.0521609f, -0.0000000f, + 0.0000961f, 0.0521570f, 0.9986261f, -0.0000000f, + -0.2411315f, 0.7425256f, -0.0203104f, 1.0000000f), + XMMATRIX(-0.9999996f, -0.0000002f, -0.0008393f, 0.0000000f, + -0.0001079f, -0.9916946f, 0.1286122f, 0.0000000f, + -0.0008317f, 0.1286074f, 0.9916822f, -0.0000000f, + -0.2410713f, 0.6366479f, -0.0695772f, 1.0000000f), + XMMATRIX( 0.9999952f, -0.0031200f, 0.0002028f, -0.0000000f, + -0.0031266f, -0.9978875f, 0.0648890f, 0.0000000f, + 0.0000000f, -0.0648893f, -0.9978880f, -0.0000000f, + -0.2384892f, 1.1499823f, -0.0747791f, 1.0000000f), + XMMATRIX( 1.0000001f, 0.0000000f, -0.0000001f, -0.0000000f, + -0.0000000f, -0.9978563f, -0.0654443f, 0.0000000f, + -0.0000000f, 0.0654437f, -0.9978519f, -0.0000000f, + -0.2411296f, 0.8435934f, 0.0352841f, 1.0000000f), + XMMATRIX( 1.0000001f, 0.0000000f, -0.0000001f, -0.0000000f, + -0.0000000f, -1.0000000f, -0.0000000f, 0.0000000f, + -0.0000000f, -0.0000003f, -0.9999956f, 0.0000000f, + -0.2411296f, 0.5391475f, -0.0000000f, 1.0000000f), + XMMATRIX(-1.0000001f, 0.0000001f, -0.0000021f, -0.0000000f, + 0.0000000f, -0.9963848f, -0.0849539f, 0.0000000f, + -0.0000014f, -0.0849561f, 0.9963719f, 0.0000000f, + 0.2411296f, 1.1474965f, 0.0978385f, 1.0000000f), + XMMATRIX(-1.0000001f, -0.0000000f, -0.0000018f, -0.0000000f, + 0.0000001f, -0.9980614f, -0.0622338f, 0.0000000f, + -0.0000011f, -0.0622363f, 0.9980485f, 0.0000000f, + 0.2411295f, 1.0469943f, 0.0740067f, 1.0000000f), + XMMATRIX(-1.0000001f, -0.0000001f, 0.0000870f, -0.0000000f, + -0.0000030f, -0.9994135f, -0.0342354f, 0.0000000f, + 0.0000876f, -0.0342383f, 0.9994008f, -0.0000000f, + 0.2411337f, 0.9460909f, 0.0474867f, 1.0000000f), + XMMATRIX(-0.9999999f, -0.0000002f, 0.0007523f, -0.0000000f, + 0.0000017f, -0.9999979f, 0.0020110f, 0.0000000f, + 0.0007530f, 0.0020077f, 0.9999850f, -0.0000000f, + 0.2411422f, 0.8438926f, 0.0167503f, 1.0000000f), + XMMATRIX(-1.0000001f, -0.0000001f, -0.0000976f, 0.0000000f, + -0.0000051f, -0.9986385f, 0.0521605f, 0.0000000f, + -0.0000968f, 0.0521566f, 0.9986261f, -0.0000000f, + 0.2411316f, 0.7425266f, -0.0203097f, 1.0000000f), + XMMATRIX(-0.9999998f, -0.0000000f, 0.0008371f, -0.0000000f, + 0.0001073f, -0.9917392f, 0.1282703f, -0.0000000f, + 0.0008309f, 0.1282655f, 0.9917267f, -0.0000000f, + 0.2410717f, 0.6366724f, -0.0693572f, 1.0000000f), + XMMATRIX( 0.9999633f, -0.0042382f, 0.0074449f, -0.0000000f, + -0.0038072f, -0.9983676f, -0.0569888f, 0.0000000f, + 0.0076739f, 0.0569584f, -0.9983445f, -0.0000000f, + -0.1011066f, 0.6298342f, 0.0208916f, 1.0000000f), + XMMATRIX( 0.9999292f, 0.0094264f, 0.0072769f, -0.0000000f, + 0.0091650f, -0.9993400f, 0.0351520f, 0.0000000f, + 0.0076031f, -0.0350826f, -0.9993530f, -0.0000000f, + -0.1060352f, 0.3789865f, -0.0141235f, 1.0000000f), + XMMATRIX(-0.0000005f, -0.0000000f, -1.0000001f, 0.0000000f, + 1.0000001f, 0.0000000f, -0.0000005f, 0.0000000f, + 0.0000000f, -1.0000002f, 0.0000000f, -0.0000000f, + -0.5391478f, -0.0000001f, -0.2411293f, 1.0000000f), + XMMATRIX( 1.0000000f, -0.0000000f, 0.0000000f, -0.0000000f, + -0.0000000f, 0.0000000f, 1.0000000f, 0.0000000f, + 0.0000000f, -1.0000000f, 0.0000000f, -0.0000000f, + 0.2411295f, -0.0870393f, -0.8440942f, 1.0000000f), + XMMATRIX( 1.0000000f, -0.0000000f, 0.0000010f, -0.0000000f, + 0.0000010f, -0.0039314f, -0.9999923f, 0.0000000f, + 0.0000000f, 0.9999923f, -0.0039314f, -0.0000000f, + 0.1054246f, 0.0106810f, -0.0000419f, 1.0000000f), + XMMATRIX(-0.0475822f, -0.0000003f, -0.9988677f, -0.0000000f, + 0.7671170f, 0.6404657f, -0.0365425f, -0.0000000f, + 0.6397402f, -0.7679867f, -0.0304748f, -0.0000000f, + -0.0567143f, 0.0626259f, -0.1028425f, 1.0000000f), + XMMATRIX(-0.0475822f, -0.0000002f, -0.9988677f, -0.0000000f, + 0.9988673f, -0.0000001f, -0.0475821f, 0.0000000f, + -0.0000000f, -0.9999999f, -0.0000000f, 0.0000000f, + -0.0812300f, -0.0106811f, -0.1016747f, 1.0000000f), + XMMATRIX( 0.9999926f, -0.0000000f, 0.0038554f, -0.0000000f, + 0.0038554f, 0.0000000f, -0.9999926f, 0.0000000f, + -0.0000000f, 1.0000000f, 0.0000000f, -0.0000000f, + 0.1011885f, -0.2318209f, 0.3805980f, 1.0000000f), + XMMATRIX(-0.0000005f, -0.0000000f, 1.0000001f, -0.0000000f, + -1.0000001f, -0.0000000f, -0.0000005f, 0.0000000f, + 0.0000000f, -1.0000002f, 0.0000000f, -0.0000000f, + 0.5391478f, -0.0000001f, -0.2411293f, 1.0000000f), + XMMATRIX( 1.0000000f, -0.0000000f, 0.0000000f, -0.0000000f, + -0.0000000f, 0.0000000f, 1.0000000f, 0.0000000f, + 0.0000000f, -1.0000000f, 0.0000000f, -0.0000000f, + -0.2411295f, -0.0870393f, -0.8440942f, 1.0000000f), + XMMATRIX( 1.0000000f, -0.0000000f, -0.0000010f, -0.0000000f, + -0.0000010f, -0.0039314f, -0.9999923f, 0.0000000f, + -0.0000000f, 0.9999923f, -0.0039314f, -0.0000000f, + -0.1054246f, 0.0106810f, -0.0000419f, 1.0000000f), + XMMATRIX(-0.0475822f, 0.0000003f, 0.9988677f, -0.0000000f, + -0.7671170f, 0.6404657f, -0.0365425f, 0.0000000f, + -0.6397402f, -0.7679867f, -0.0304748f, 0.0000000f, + 0.0567143f, 0.0626259f, -0.1028425f, 1.0000000f), + XMMATRIX(-0.0475822f, 0.0000002f, 0.9988677f, -0.0000000f, + -0.9988673f, -0.0000001f, -0.0475821f, 0.0000000f, + 0.0000000f, -0.9999999f, -0.0000000f, -0.0000000f, + 0.0812300f, -0.0106811f, -0.1016747f, 1.0000000f), + XMMATRIX( 0.9999926f, -0.0000000f, -0.0038554f, -0.0000000f, + -0.0038554f, 0.0000000f, -0.9999926f, 0.0000000f, + 0.0000000f, 1.0000000f, 0.0000000f, -0.0000000f, + -0.1011885f, -0.2318209f, 0.3805980f, 1.0000000f), +}; + +const float accessor_10[] = { + 0.0, + 0.0416666679084301, + 0.0833333358168602, + 0.125, + 0.1666666716337204, + 0.2083333283662796, + 0.25, + 0.2916666567325592, + 0.3333333432674408, + 0.375, + 0.4166666567325592, + 0.4583333432674408, + 0.5, + 0.5416666865348816, + 0.5833333134651184, + 0.625, + 0.6666666865348816, + 0.7083333134651184, + 0.75, + 0.7916666865348816, + 0.8333333134651184, + 0.875, + 0.9166666865348816, + 0.9583333134651184, + 1.0, + 1.0416666269302368, + 1.0833333730697632, + 1.125, + 1.1666666269302368, + 1.2083333730697632, + 1.25, + 1.2916666269302368, + 1.3333333730697632, + 1.375, + 1.4166666269302368, + 1.4583333730697632, + 1.5, + 1.5416666269302368, + 1.5833333730697632, + 1.625, + 1.6666666269302368, + 1.7083333730697632, + 1.75, + 1.7916666269302368, + 1.8333333730697632, + 1.875, + 1.9166666269302368, + 1.9583333730697632, + 2.0, + 2.0416667461395264, + 2.0833332538604736, + 2.125, + 2.1666667461395264, + 2.2083332538604736, + 2.25, + 2.2916667461395264, + 2.3333332538604736, + 2.375, + 2.4166667461395264, + 2.4583332538604736, + 2.5, + 2.5416667461395264, + 2.5833332538604736, + 2.625, + 2.6666667461395264, + 2.7083332538604736, + 2.75, + 2.7916667461395264, + 2.8333332538604736, + 2.875, + 2.9166667461395264, + 2.9583332538604736, + 3.0, + 3.0416667461395264, + 3.0833332538604736, + 3.125, + 3.1666667461395264, + 3.2083332538604736, + 3.25, + 3.2916667461395264, + 3.3333332538604736, + 3.375, + 3.4166667461395264, + 3.4583332538604736, + 3.5, + 3.5416667461395264, + 3.5833332538604736, + 3.625, + 3.6666667461395264, + 3.7083332538604736, + 3.75, + 3.7916667461395264, + 3.8333332538604736, + 3.875, + 3.9166667461395264, + 3.9583332538604736, + 4.0, + 4.041666507720947, + 4.083333492279053, + 4.125, + 4.166666507720947, +}; + +const XMFLOAT3 accessor_11[] = { + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0961419f), + XMFLOAT3(-0.0020865f, 0.0060252f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.1826500f, -0.0797067f), + XMFLOAT3(-0.0020865f, 0.2834958f, -0.0406729f), + XMFLOAT3(-0.0020865f, 0.3014458f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.2924922f, 0.1602180f), + XMFLOAT3(-0.0020865f, 0.2529734f, 0.2644956f), + XMFLOAT3(-0.0020865f, 0.1544827f, 0.3028520f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.3114135f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.1534726f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), +}; + +const XMFLOAT4 accessor_12[] = { + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.0824925f, 0.0000000f, 0.0000000f, 0.9965917f), + XMFLOAT4(-0.0570609f, 0.0000000f, 0.0000000f, 0.9983707f), + XMFLOAT4(-0.1955030f, 0.0000000f, 0.0000000f, 0.9807031f), + XMFLOAT4(-0.2573321f, 0.0000000f, 0.0000000f, 0.9663230f), + XMFLOAT4(-0.1297633f, 0.0000000f, 0.0000000f, 0.9915450f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_13[] = { + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8206826f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937378f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9667931f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449012f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425745f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), +}; + +const float accessor_14[] = { + 0.0, + 4.166666507720947, +}; + +const XMFLOAT3 accessor_15[] = { + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), +}; + +const XMFLOAT4 accessor_16[] = { + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), +}; + +const XMFLOAT3 accessor_17[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_18[] = { + XMFLOAT3( 0.0000000f, 0.2498145f, -0.0000010f), + XMFLOAT3(-0.0000000f, 0.2498139f, 0.0000000f), +}; + +const XMFLOAT4 accessor_19[] = { + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), +}; + +const XMFLOAT3 accessor_20[] = { + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_21[] = { + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), + XMFLOAT3( 0.0000000f, 0.1042613f, -0.0238797f), + XMFLOAT3( 0.0000000f, 0.1135390f, -0.0116829f), + XMFLOAT3( 0.0000000f, 0.1261039f, 0.0005138f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1518535f, 0.0059257f), + XMFLOAT3(-0.0000000f, 0.1617508f, 0.0050005f), + XMFLOAT3( 0.0000000f, 0.1683611f, 0.0024894f), + XMFLOAT3( 0.0000000f, 0.1707681f, -0.0024006f), + XMFLOAT3( 0.0000000f, 0.1357049f, -0.0174982f), + XMFLOAT3( 0.0000000f, 0.1006418f, -0.0294237f), +}; + +const XMFLOAT4 accessor_22[] = { + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), + XMFLOAT4(-0.1093725f, -0.0000000f, -0.0000000f, 0.9940008f), + XMFLOAT4(-0.0673628f, -0.0000000f, -0.0000000f, 0.9977286f), + XMFLOAT4(-0.0161890f, -0.0000000f, -0.0000000f, 0.9998690f), + XMFLOAT4( 0.0284723f, -0.0000000f, -0.0000000f, 0.9995946f), + XMFLOAT4( 0.0563390f, -0.0000000f, -0.0000000f, 0.9984117f), + XMFLOAT4( 0.0706332f, -0.0000000f, -0.0000000f, 0.9975024f), + XMFLOAT4( 0.0758960f, -0.0000000f, -0.0000000f, 0.9971157f), + XMFLOAT4( 0.0766477f, -0.0000000f, -0.0000000f, 0.9970582f), + XMFLOAT4(-0.0251953f, -0.0000000f, -0.0000000f, 0.9996825f), + XMFLOAT4(-0.1267767f, 0.0000000f, -0.0000000f, 0.9919313f), +}; + +const XMFLOAT3 accessor_23[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_24[] = { + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000000f, 0.1075576f, 0.0008690f), + XMFLOAT3(-0.0000000f, 0.1139177f, 0.0012693f), + XMFLOAT3( 0.0000000f, 0.1219529f, 0.0018200f), + XMFLOAT3(-0.0000000f, 0.1295645f, 0.0024259f), + XMFLOAT3(-0.0000000f, 0.1350743f, 0.0029986f), + XMFLOAT3( 0.0000000f, 0.1384857f, 0.0034760f), + XMFLOAT3(-0.0000000f, 0.1402218f, 0.0038029f), + XMFLOAT3( 0.0000000f, 0.1407065f, 0.0039242f), + XMFLOAT3(-0.0000000f, 0.1228388f, 0.0023193f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), +}; + +const XMFLOAT4 accessor_25[] = { + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), + XMFLOAT4( 0.1327501f, 0.0001287f, -0.0011040f, 0.9911489f), + XMFLOAT4( 0.0605227f, 0.0000465f, -0.0010986f, 0.9981663f), + XMFLOAT4(-0.0120234f, -0.0000349f, -0.0011109f, 0.9999271f), + XMFLOAT4(-0.0450009f, -0.0000711f, -0.0011223f, 0.9989863f), + XMFLOAT4(-0.0228169f, -0.0000464f, -0.0011166f, 0.9997391f), + XMFLOAT4( 0.0296180f, 0.0000125f, -0.0011090f, 0.9995607f), + XMFLOAT4( 0.0909552f, 0.0000818f, -0.0011100f, 0.9958544f), + XMFLOAT4( 0.1398712f, 0.0001370f, -0.0011162f, 0.9901690f), + XMFLOAT4( 0.1615402f, 0.0001614f, -0.0011155f, 0.9868655f), + XMFLOAT4( 0.1653724f, 0.0001657f, -0.0011123f, 0.9862306f), +}; + +const XMFLOAT3 accessor_26[] = { + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000000f), +}; + +const XMFLOAT3 accessor_27[] = { + XMFLOAT3(-0.0005524f, 0.0688296f, -0.0213631f), + XMFLOAT3(-0.0005524f, 0.0688296f, -0.0213631f), +}; + +const XMFLOAT4 accessor_28[] = { + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), +}; + +const XMFLOAT3 accessor_29[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_30[] = { + XMFLOAT3( 0.0000000f, 0.3082166f, -0.0000008f), + XMFLOAT3( 0.0000000f, 0.3082193f, 0.0000003f), +}; + +const XMFLOAT4 accessor_31[] = { + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), +}; + +const XMFLOAT3 accessor_32[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), +}; + +const XMFLOAT3 accessor_33[] = { + XMFLOAT3(-0.0000000f, 0.3056026f, 0.0000008f), + XMFLOAT3(-0.0000000f, 0.3056034f, -0.0000001f), +}; + +const XMFLOAT4 accessor_34[] = { + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), +}; + +const XMFLOAT3 accessor_35[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_36[] = { + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), + XMFLOAT3( 0.0015116f, 0.2546579f, 0.0182690f), + XMFLOAT3( 0.0014504f, 0.2816091f, 0.0045884f), + XMFLOAT3( 0.0013892f, 0.3085600f, -0.0090922f), + XMFLOAT3( 0.0013614f, 0.3208103f, -0.0153106f), + XMFLOAT3( 0.0013621f, 0.3205253f, -0.0145414f), + XMFLOAT3( 0.0013668f, 0.3185293f, -0.0116041f), + XMFLOAT3( 0.0013792f, 0.3131109f, -0.0055542f), + XMFLOAT3( 0.0014033f, 0.3025597f, 0.0045527f), + XMFLOAT3( 0.0014791f, 0.2690616f, 0.0176327f), + XMFLOAT3( 0.0015394f, 0.2424075f, 0.0244875f), +}; + +const XMFLOAT4 accessor_37[] = { + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), + XMFLOAT4( 0.1206933f, 0.0001373f, 0.0011239f, 0.9926892f), + XMFLOAT4(-0.0473928f, -0.0000524f, 0.0011327f, 0.9988757f), + XMFLOAT4(-0.1310165f, -0.0001465f, 0.0011250f, 0.9913796f), + XMFLOAT4(-0.1736365f, -0.0001946f, 0.0011178f, 0.9848092f), + XMFLOAT4(-0.1892613f, -0.0002120f, 0.0011119f, 0.9819261f), + XMFLOAT4(-0.1914897f, -0.0002137f, 0.0011056f, 0.9814940f), + XMFLOAT4(-0.1255365f, -0.0001391f, 0.0011114f, 0.9920884f), + XMFLOAT4( 0.0211142f, 0.0000246f, 0.0011172f, 0.9997765f), + XMFLOAT4( 0.1673095f, 0.0001882f, 0.0011063f, 0.9859038f), + XMFLOAT4( 0.2327566f, 0.0002631f, 0.0010979f, 0.9725344f), +}; + +const XMFLOAT3 accessor_38[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_39[] = { + XMFLOAT3( 0.2488541f, 0.2033313f, -0.0450287f), + XMFLOAT3( 0.2488541f, 0.2033313f, -0.0450287f), +}; + +const XMFLOAT4 accessor_40[] = { + XMFLOAT4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), + XMFLOAT4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), +}; + +const XMFLOAT3 accessor_41[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_42[] = { + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), +}; + +const XMFLOAT4 accessor_43[] = { + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), +}; + +const XMFLOAT3 accessor_44[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_45[] = { + XMFLOAT3(-0.0000000f, 0.1024612f, -0.0000002f), + XMFLOAT3( 0.0000000f, 0.1024621f, -0.0000009f), +}; + +const XMFLOAT4 accessor_46[] = { + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_47[] = { + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000000f), +}; + +const XMFLOAT3 accessor_48[] = { + XMFLOAT3(-0.0000000f, 0.1026015f, -0.0000024f), + XMFLOAT3(-0.0000000f, 0.1026023f, -0.0000007f), +}; + +const XMFLOAT4 accessor_49[] = { + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_50[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_51[] = { + XMFLOAT3( 0.0000000f, 0.1033747f, 0.0000013f), + XMFLOAT3( 0.0000000f, 0.1033746f, 0.0000001f), +}; + +const XMFLOAT4 accessor_52[] = { + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_53[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_54[] = { + XMFLOAT3(-0.0000000f, 0.1012776f, -0.0000007f), + XMFLOAT3(-0.0000000f, 0.1012825f, 0.0000005f), +}; + +const XMFLOAT4 accessor_55[] = { + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), +}; + +const XMFLOAT3 accessor_56[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_57[] = { + XMFLOAT3( 0.0000000f, 0.1024311f, -0.0000003f), + XMFLOAT3( 0.0000000f, 0.1024376f, -0.0000000f), +}; + +const XMFLOAT4 accessor_58[] = { + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), +}; + +const XMFLOAT3 accessor_59[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_60[] = { + XMFLOAT3(-0.0027646f, 0.0680361f, -0.0078378f), + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), +}; + +const XMFLOAT4 accessor_61[] = { + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), +}; + +const XMFLOAT3 accessor_62[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_63[] = { + XMFLOAT3( 0.0000000f, 0.3082178f, -0.0000006f), + XMFLOAT3(-0.0000000f, 0.3082178f, 0.0000004f), +}; + +const XMFLOAT4 accessor_64[] = { + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), +}; + +const XMFLOAT3 accessor_65[] = { + XMFLOAT3( 1.0000001f, 1.0000000f, 1.0000001f), + XMFLOAT3( 1.0000001f, 1.0000000f, 1.0000001f), +}; + +const XMFLOAT3 accessor_66[] = { + XMFLOAT3(-0.0000000f, 0.3056006f, -0.0000014f), + XMFLOAT3( 0.0000000f, 0.3055994f, -0.0000001f), +}; + +const XMFLOAT4 accessor_67[] = { + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), +}; + +const XMFLOAT3 accessor_68[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_69[] = { + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), +}; + +const XMFLOAT4 accessor_70[] = { + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), +}; + +const XMFLOAT3 accessor_71[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_72[] = { + XMFLOAT3( 0.0000000f, 0.1024651f, -0.0000008f), + XMFLOAT3( 0.0000000f, 0.1024579f, -0.0000007f), +}; + +const XMFLOAT4 accessor_73[] = { + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_74[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_75[] = { + XMFLOAT3(-0.0000000f, 0.1026116f, -0.0000006f), + XMFLOAT3(-0.0000000f, 0.1026118f, -0.0000004f), +}; + +const XMFLOAT4 accessor_76[] = { + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_77[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_78[] = { + XMFLOAT3( 0.0000000f, 0.1033683f, 0.0000007f), + XMFLOAT3( 0.0000000f, 0.1033682f, 0.0000003f), +}; + +const XMFLOAT4 accessor_79[] = { + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_80[] = { + XMFLOAT3( 1.0000001f, 1.0000001f, 1.0000000f), + XMFLOAT3( 1.0000001f, 1.0000001f, 1.0000000f), +}; + +const XMFLOAT3 accessor_81[] = { + XMFLOAT3(-0.0000000f, 0.1012827f, -0.0000015f), + XMFLOAT3( 0.0000000f, 0.1012839f, -0.0000012f), +}; + +const XMFLOAT4 accessor_82[] = { + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), +}; + +const XMFLOAT3 accessor_83[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_84[] = { + XMFLOAT3( 0.0000001f, 0.1024345f, -0.0000001f), + XMFLOAT3( 0.0000001f, 0.1024316f, 0.0000000f), +}; + +const XMFLOAT4 accessor_85[] = { + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), +}; + +const XMFLOAT3 accessor_86[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_87[] = { + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), +}; + +const XMFLOAT4 accessor_88[] = { + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), +}; + +const XMFLOAT3 accessor_89[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_90[] = { + XMFLOAT3( 0.0000000f, 0.2498153f, -0.0000002f), + XMFLOAT3(-0.0000000f, 0.2498145f, 0.0000000f), +}; + +const XMFLOAT4 accessor_91[] = { + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), +}; + +const XMFLOAT3 accessor_92[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_93[] = { + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_94[] = { + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_95[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_96[] = { + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_97[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_98[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_99[] = { + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0044682f), +}; + +const XMFLOAT4 accessor_100[] = { + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_101[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_102[] = { + XMFLOAT3(-0.0000000f, 0.0919263f, 0.0000001f), + XMFLOAT3( 0.0000000f, 0.0919260f, 0.0000004f), +}; + +const XMFLOAT4 accessor_103[] = { + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_104[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_105[] = { + XMFLOAT3( 0.0000011f, 0.1196970f, 0.0000000f), + XMFLOAT3( 0.0000003f, 0.1196963f, -0.0000000f), +}; + +const XMFLOAT4 accessor_106[] = { + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_107[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_108[] = { + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_109[] = { + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_110[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_111[] = { + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_112[] = { + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_113[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_114[] = { + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_115[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_116[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_117[] = { + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0961420f), + XMFLOAT3(-0.0020865f, 0.0613021f, -0.0940875f), + XMFLOAT3(-0.0020865f, 0.2418380f, -0.0687290f), + XMFLOAT3(-0.0020865f, 0.3463625f, -0.0236981f), + XMFLOAT3(-0.0020865f, 0.3659465f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.3578844f, 0.1711183f), + XMFLOAT3(-0.0020865f, 0.3162026f, 0.2572443f), + XMFLOAT3(-0.0020865f, 0.2100785f, 0.2797687f), + XMFLOAT3(-0.0020865f, 0.0552606f, 0.2831892f), + XMFLOAT3(-0.0020865f, 0.0583039f, 0.1395508f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0044682f), +}; + +const XMFLOAT4 accessor_118[] = { + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_119[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_120[] = { + XMFLOAT3( 0.0000000f, 0.0919262f, 0.0000003f), + XMFLOAT3(-0.0000000f, 0.0919257f, 0.0000010f), +}; + +const XMFLOAT4 accessor_121[] = { + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_122[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_123[] = { + XMFLOAT3(-0.0000009f, 0.1196955f, 0.0000000f), + XMFLOAT3(-0.0000006f, 0.1196964f, -0.0000000f), +}; + +const XMFLOAT4 accessor_124[] = { + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_125[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_126[] = { + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_127[] = { + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_128[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const float accessor_129[] = { + 0.0, + 0.0416666679084301, + 0.0833333358168602, + 0.125, + 0.1666666716337204, + 0.2083333283662796, + 0.25, + 0.2916666567325592, + 0.3333333432674408, + 0.375, + 0.4166666567325592, + 0.4583333432674408, + 0.5, + 0.5416666865348816, + 0.5833333134651184, + 0.625, + 0.6666666865348816, + 0.7083333134651184, + 0.75, + 0.7916666865348816, + 0.8333333134651184, + 0.875, + 0.9166666865348816, + 0.9583333134651184, + 1.0, + 1.0416666269302368, + 1.0833333730697632, + 1.125, + 1.1666666269302368, + 1.2083333730697632, + 1.25, + 1.2916666269302368, + 1.3333333730697632, + 1.375, + 1.4166666269302368, + 1.4583333730697632, + 1.5, + 1.5416666269302368, + 1.5833333730697632, + 1.625, + 1.6666666269302368, + 1.7083333730697632, + 1.75, + 1.7916666269302368, + 1.8333333730697632, + 1.875, + 1.9166666269302368, + 1.9583333730697632, + 2.0, + 2.0416667461395264, + 2.0833332538604736, + 2.125, + 2.1666667461395264, + 2.2083332538604736, + 2.25, + 2.2916667461395264, + 2.3333332538604736, + 2.375, + 2.4166667461395264, + 2.4583332538604736, + 2.5, + 2.5416667461395264, + 2.5833332538604736, + 2.625, + 2.6666667461395264, + 2.7083332538604736, + 2.75, + 2.7916667461395264, + 2.8333332538604736, + 2.875, + 2.9166667461395264, + 2.9583332538604736, + 3.0, + 3.0416667461395264, + 3.0833332538604736, + 3.125, + 3.1666667461395264, + 3.2083332538604736, + 3.25, + 3.2916667461395264, + 3.3333332538604736, + 3.375, + 3.4166667461395264, + 3.4583332538604736, + 3.5, + 3.5416667461395264, + 3.5833332538604736, + 3.625, + 3.6666667461395264, + 3.7083332538604736, + 3.75, +}; + +const XMFLOAT3 accessor_130[] = { + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0178199f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0176253f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0170803f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0162434f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0151729f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0139273f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0125649f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0111441f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0097233f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0083608f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0071152f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0060447f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0052078f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0046629f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0046182f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0050419f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0057004f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0065544f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0075649f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0086928f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0098989f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0111441f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0123893f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0135953f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0147232f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0157337f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0165877f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0172462f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0176699f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0178199f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0176253f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0170803f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0162434f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0151729f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0139273f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0125649f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0111441f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0097233f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0083608f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0071152f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0060447f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0052078f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0046629f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0046182f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0050419f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0057004f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0065544f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0075649f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0086928f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0098989f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0111441f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0123893f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0135953f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0147232f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0157337f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0165877f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0172462f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0176699f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0178199f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0176253f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0170803f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0162434f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0151729f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0139273f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0125649f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0111441f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0097233f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0083608f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0071152f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0060447f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0052078f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0046629f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0046182f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0050419f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0057004f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0065544f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0075649f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0086928f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0098989f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0111441f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0123893f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0135953f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0147232f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0157337f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0165877f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0172462f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0176699f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0178199f), +}; + +const XMFLOAT4 accessor_131[] = { + XMFLOAT4(-0.0575390f, -0.0034855f, -0.0008133f, 0.9983369f), + XMFLOAT4(-0.0572518f, -0.0034832f, -0.0008201f, 0.9983534f), + XMFLOAT4(-0.0562628f, -0.0034771f, -0.0008388f, 0.9984096f), + XMFLOAT4(-0.0542909f, -0.0034680f, -0.0008658f, 0.9985188f), + XMFLOAT4(-0.0508358f, -0.0034575f, -0.0008972f, 0.9987006f), + XMFLOAT4(-0.0448832f, -0.0034473f, -0.0009273f, 0.9989859f), + XMFLOAT4(-0.0340697f, -0.0034409f, -0.0009455f, 0.9994131f), + XMFLOAT4(-0.0152769f, -0.0034421f, -0.0009389f, 0.9998769f), + XMFLOAT4( 0.0013275f, -0.0034423f, -0.0009401f, 0.9999927f), + XMFLOAT4( 0.0104516f, -0.0034370f, -0.0009648f, 0.9999390f), + XMFLOAT4( 0.0156699f, -0.0034296f, -0.0009984f, 0.9998708f), + XMFLOAT4( 0.0187803f, -0.0034223f, -0.0010320f, 0.9998173f), + XMFLOAT4( 0.0205858f, -0.0034160f, -0.0010604f, 0.9997817f), + XMFLOAT4( 0.0215017f, -0.0034117f, -0.0010798f, 0.9997625f), + XMFLOAT4( 0.0217697f, -0.0034101f, -0.0010870f, 0.9997566f), + XMFLOAT4( 0.0215163f, -0.0034113f, -0.0010816f, 0.9997621f), + XMFLOAT4( 0.0206586f, -0.0034145f, -0.0010670f, 0.9997802f), + XMFLOAT4( 0.0189944f, -0.0034191f, -0.0010455f, 0.9998133f), + XMFLOAT4( 0.0161988f, -0.0034246f, -0.0010197f, 0.9998624f), + XMFLOAT4( 0.0117109f, -0.0034302f, -0.0009934f, 0.9999250f), + XMFLOAT4( 0.0044984f, -0.0034346f, -0.0009716f, 0.9999835f), + XMFLOAT4(-0.0071126f, -0.0034362f, -0.0009619f, 0.9999683f), + XMFLOAT4(-0.0226609f, -0.0034351f, -0.0009642f, 0.9997368f), + XMFLOAT4(-0.0356789f, -0.0034372f, -0.0009581f, 0.9993569f), + XMFLOAT4(-0.0440886f, -0.0034440f, -0.0009380f, 0.9990212f), + XMFLOAT4(-0.0494646f, -0.0034530f, -0.0009107f, 0.9987695f), + XMFLOAT4(-0.0529636f, -0.0034627f, -0.0008819f, 0.9985901f), + XMFLOAT4(-0.0552203f, -0.0034716f, -0.0008551f, 0.9984678f), + XMFLOAT4(-0.0565983f, -0.0034789f, -0.0008332f, 0.9983907f), + XMFLOAT4(-0.0573221f, -0.0034838f, -0.0008186f, 0.9983494f), + XMFLOAT4(-0.0575390f, -0.0034855f, -0.0008133f, 0.9983369f), + XMFLOAT4(-0.0572518f, -0.0034832f, -0.0008201f, 0.9983534f), + XMFLOAT4(-0.0562628f, -0.0034771f, -0.0008388f, 0.9984096f), + XMFLOAT4(-0.0542909f, -0.0034680f, -0.0008658f, 0.9985188f), + XMFLOAT4(-0.0508358f, -0.0034575f, -0.0008972f, 0.9987006f), + XMFLOAT4(-0.0448832f, -0.0034473f, -0.0009273f, 0.9989859f), + XMFLOAT4(-0.0340697f, -0.0034409f, -0.0009455f, 0.9994131f), + XMFLOAT4(-0.0152769f, -0.0034421f, -0.0009389f, 0.9998769f), + XMFLOAT4( 0.0013275f, -0.0034423f, -0.0009401f, 0.9999927f), + XMFLOAT4( 0.0104516f, -0.0034370f, -0.0009648f, 0.9999390f), + XMFLOAT4( 0.0156699f, -0.0034296f, -0.0009984f, 0.9998708f), + XMFLOAT4( 0.0187803f, -0.0034223f, -0.0010320f, 0.9998173f), + XMFLOAT4( 0.0205858f, -0.0034160f, -0.0010604f, 0.9997817f), + XMFLOAT4( 0.0215017f, -0.0034117f, -0.0010798f, 0.9997625f), + XMFLOAT4( 0.0217697f, -0.0034101f, -0.0010870f, 0.9997566f), + XMFLOAT4( 0.0215163f, -0.0034113f, -0.0010816f, 0.9997621f), + XMFLOAT4( 0.0206586f, -0.0034145f, -0.0010670f, 0.9997802f), + XMFLOAT4( 0.0189944f, -0.0034191f, -0.0010455f, 0.9998133f), + XMFLOAT4( 0.0161988f, -0.0034246f, -0.0010197f, 0.9998624f), + XMFLOAT4( 0.0117109f, -0.0034302f, -0.0009934f, 0.9999250f), + XMFLOAT4( 0.0044984f, -0.0034346f, -0.0009716f, 0.9999835f), + XMFLOAT4(-0.0071126f, -0.0034362f, -0.0009619f, 0.9999683f), + XMFLOAT4(-0.0226609f, -0.0034351f, -0.0009642f, 0.9997368f), + XMFLOAT4(-0.0356789f, -0.0034372f, -0.0009581f, 0.9993569f), + XMFLOAT4(-0.0440886f, -0.0034440f, -0.0009380f, 0.9990212f), + XMFLOAT4(-0.0494646f, -0.0034530f, -0.0009107f, 0.9987695f), + XMFLOAT4(-0.0529636f, -0.0034627f, -0.0008819f, 0.9985901f), + XMFLOAT4(-0.0552203f, -0.0034716f, -0.0008551f, 0.9984678f), + XMFLOAT4(-0.0565983f, -0.0034789f, -0.0008332f, 0.9983907f), + XMFLOAT4(-0.0573221f, -0.0034838f, -0.0008186f, 0.9983494f), + XMFLOAT4(-0.0575390f, -0.0034855f, -0.0008133f, 0.9983369f), + XMFLOAT4(-0.0572518f, -0.0034832f, -0.0008201f, 0.9983534f), + XMFLOAT4(-0.0562628f, -0.0034771f, -0.0008388f, 0.9984096f), + XMFLOAT4(-0.0542909f, -0.0034680f, -0.0008658f, 0.9985188f), + XMFLOAT4(-0.0508358f, -0.0034575f, -0.0008972f, 0.9987006f), + XMFLOAT4(-0.0448832f, -0.0034473f, -0.0009273f, 0.9989859f), + XMFLOAT4(-0.0340697f, -0.0034409f, -0.0009455f, 0.9994131f), + XMFLOAT4(-0.0152769f, -0.0034421f, -0.0009389f, 0.9998769f), + XMFLOAT4( 0.0013275f, -0.0034423f, -0.0009401f, 0.9999927f), + XMFLOAT4( 0.0104516f, -0.0034370f, -0.0009648f, 0.9999390f), + XMFLOAT4( 0.0156699f, -0.0034296f, -0.0009984f, 0.9998708f), + XMFLOAT4( 0.0187803f, -0.0034223f, -0.0010320f, 0.9998173f), + XMFLOAT4( 0.0205858f, -0.0034160f, -0.0010604f, 0.9997817f), + XMFLOAT4( 0.0215017f, -0.0034117f, -0.0010798f, 0.9997625f), + XMFLOAT4( 0.0217697f, -0.0034101f, -0.0010870f, 0.9997566f), + XMFLOAT4( 0.0215163f, -0.0034113f, -0.0010816f, 0.9997621f), + XMFLOAT4( 0.0206586f, -0.0034145f, -0.0010670f, 0.9997802f), + XMFLOAT4( 0.0189944f, -0.0034191f, -0.0010455f, 0.9998133f), + XMFLOAT4( 0.0161988f, -0.0034246f, -0.0010197f, 0.9998624f), + XMFLOAT4( 0.0117109f, -0.0034302f, -0.0009934f, 0.9999250f), + XMFLOAT4( 0.0044984f, -0.0034346f, -0.0009716f, 0.9999835f), + XMFLOAT4(-0.0071126f, -0.0034362f, -0.0009619f, 0.9999683f), + XMFLOAT4(-0.0226609f, -0.0034351f, -0.0009642f, 0.9997368f), + XMFLOAT4(-0.0356789f, -0.0034372f, -0.0009581f, 0.9993569f), + XMFLOAT4(-0.0440886f, -0.0034440f, -0.0009380f, 0.9990212f), + XMFLOAT4(-0.0494646f, -0.0034530f, -0.0009107f, 0.9987695f), + XMFLOAT4(-0.0529636f, -0.0034627f, -0.0008819f, 0.9985901f), + XMFLOAT4(-0.0552203f, -0.0034716f, -0.0008551f, 0.9984678f), + XMFLOAT4(-0.0565983f, -0.0034789f, -0.0008332f, 0.9983907f), + XMFLOAT4(-0.0573221f, -0.0034838f, -0.0008186f, 0.9983494f), + XMFLOAT4(-0.0575390f, -0.0034855f, -0.0008133f, 0.9983369f), +}; + +const float accessor_132[] = { + 0.0, + 3.75, +}; + +const XMFLOAT3 accessor_133[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_134[] = { + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), +}; + +const XMFLOAT4 accessor_135[] = { + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), +}; + +const XMFLOAT3 accessor_136[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_137[] = { + XMFLOAT3(-0.0000000f, 0.2498149f, -0.0000000f), + XMFLOAT3(-0.0000000f, 0.2498149f, -0.0000000f), +}; + +const XMFLOAT4 accessor_138[] = { + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), +}; + +const XMFLOAT3 accessor_139[] = { + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999998f), + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999998f), +}; + +const XMFLOAT3 accessor_140[] = { + XMFLOAT3( 0.0000000f, 0.1347095f, -0.0000000f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000161f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000823f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002668f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006553f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008652f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009378f), + XMFLOAT3( 0.0000375f, 0.1128822f, -0.0009551f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009390f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008728f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006883f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002997f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000899f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000173f), + XMFLOAT3(-0.0000000f, 0.1347095f, -0.0000000f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000161f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000823f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002668f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006553f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008652f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009378f), + XMFLOAT3( 0.0000375f, 0.1128822f, -0.0009551f), + XMFLOAT3( 0.0000362f, 0.1136308f, -0.0009433f), + XMFLOAT3( 0.0000328f, 0.1156368f, -0.0008998f), + XMFLOAT3( 0.0000278f, 0.1185411f, -0.0008043f), + XMFLOAT3( 0.0000219f, 0.1219844f, -0.0006213f), + XMFLOAT3( 0.0000156f, 0.1256073f, -0.0003603f), + XMFLOAT3( 0.0000097f, 0.1290506f, -0.0001627f), + XMFLOAT3( 0.0000047f, 0.1319548f, -0.0000591f), + XMFLOAT3( 0.0000013f, 0.1339610f, -0.0000125f), + XMFLOAT3( 0.0000000f, 0.1347095f, -0.0000000f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000161f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000823f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002668f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006553f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008652f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009378f), + XMFLOAT3( 0.0000375f, 0.1128822f, -0.0009551f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009390f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008728f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006883f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002997f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000899f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000173f), + XMFLOAT3(-0.0000000f, 0.1347095f, -0.0000000f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000161f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000823f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002668f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006553f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008652f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009378f), + XMFLOAT3( 0.0000375f, 0.1128822f, -0.0009551f), + XMFLOAT3( 0.0000362f, 0.1136308f, -0.0009433f), + XMFLOAT3( 0.0000328f, 0.1156368f, -0.0008998f), + XMFLOAT3( 0.0000278f, 0.1185411f, -0.0008043f), + XMFLOAT3( 0.0000219f, 0.1219844f, -0.0006213f), + XMFLOAT3( 0.0000156f, 0.1256073f, -0.0003603f), + XMFLOAT3( 0.0000097f, 0.1290506f, -0.0001627f), + XMFLOAT3( 0.0000047f, 0.1319548f, -0.0000591f), + XMFLOAT3( 0.0000013f, 0.1339610f, -0.0000125f), + XMFLOAT3( 0.0000000f, 0.1347095f, -0.0000000f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000161f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000823f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002668f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006553f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008652f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009378f), + XMFLOAT3( 0.0000375f, 0.1128822f, -0.0009551f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009390f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008728f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006883f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002997f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000899f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000173f), + XMFLOAT3(-0.0000000f, 0.1347095f, -0.0000000f), + XMFLOAT3( 0.0000021f, 0.1335004f, -0.0000161f), + XMFLOAT3( 0.0000074f, 0.1303822f, -0.0000823f), + XMFLOAT3( 0.0000148f, 0.1261186f, -0.0002668f), + XMFLOAT3( 0.0000228f, 0.1214731f, -0.0006553f), + XMFLOAT3( 0.0000301f, 0.1172095f, -0.0008652f), + XMFLOAT3( 0.0000354f, 0.1140913f, -0.0009378f), + XMFLOAT3( 0.0000375f, 0.1128822f, -0.0009551f), + XMFLOAT3( 0.0000362f, 0.1136308f, -0.0009433f), + XMFLOAT3( 0.0000328f, 0.1156368f, -0.0008998f), + XMFLOAT3( 0.0000278f, 0.1185411f, -0.0008043f), + XMFLOAT3( 0.0000219f, 0.1219844f, -0.0006213f), + XMFLOAT3( 0.0000156f, 0.1256073f, -0.0003603f), + XMFLOAT3( 0.0000097f, 0.1290506f, -0.0001627f), + XMFLOAT3( 0.0000047f, 0.1319548f, -0.0000591f), + XMFLOAT3( 0.0000013f, 0.1339610f, -0.0000125f), + XMFLOAT3( 0.0000000f, 0.1347095f, -0.0000000f), +}; + +const XMFLOAT4 accessor_141[] = { + XMFLOAT4(-0.0474069f, -0.0008774f, 0.0013231f, 0.9988744f), + XMFLOAT4(-0.0463134f, -0.0008756f, 0.0013147f, 0.9989257f), + XMFLOAT4(-0.0432515f, -0.0008707f, 0.0012913f, 0.9990631f), + XMFLOAT4(-0.0385485f, -0.0008633f, 0.0012554f, 0.9992556f), + XMFLOAT4(-0.0325317f, -0.0008544f, 0.0012093f, 0.9994696f), + XMFLOAT4(-0.0255289f, -0.0008449f, 0.0011556f, 0.9996731f), + XMFLOAT4(-0.0178682f, -0.0008353f, 0.0010968f, 0.9998394f), + XMFLOAT4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), + XMFLOAT4(-0.0018872f, -0.0008184f, 0.0009740f, 0.9999974f), + XMFLOAT4( 0.0057753f, -0.0008118f, 0.0009150f, 0.9999826f), + XMFLOAT4( 0.0127808f, -0.0008065f, 0.0008610f, 0.9999176f), + XMFLOAT4( 0.0188006f, -0.0008027f, 0.0008146f, 0.9998226f), + XMFLOAT4( 0.0235065f, -0.0008001f, 0.0007783f, 0.9997230f), + XMFLOAT4( 0.0265706f, -0.0007986f, 0.0007547f, 0.9996463f), + XMFLOAT4( 0.0276649f, -0.0007981f, 0.0007462f, 0.9996167f), + XMFLOAT4( 0.0268218f, -0.0007984f, 0.0007527f, 0.9996396f), + XMFLOAT4( 0.0244393f, -0.0007996f, 0.0007711f, 0.9997007f), + XMFLOAT4( 0.0207369f, -0.0008016f, 0.0007997f, 0.9997844f), + XMFLOAT4( 0.0159344f, -0.0008044f, 0.0008367f, 0.9998724f), + XMFLOAT4( 0.0102516f, -0.0008083f, 0.0008805f, 0.9999467f), + XMFLOAT4( 0.0039085f, -0.0008133f, 0.0009293f, 0.9999916f), + XMFLOAT4(-0.0028748f, -0.0008193f, 0.0009816f, 0.9999951f), + XMFLOAT4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), + XMFLOAT4(-0.0168807f, -0.0008341f, 0.0010892f, 0.9998566f), + XMFLOAT4(-0.0236626f, -0.0008424f, 0.0011413f, 0.9997190f), + XMFLOAT4(-0.0300036f, -0.0008509f, 0.0011899f, 0.9995487f), + XMFLOAT4(-0.0356838f, -0.0008590f, 0.0012334f, 0.9993620f), + XMFLOAT4(-0.0404836f, -0.0008663f, 0.0012702f, 0.9991790f), + XMFLOAT4(-0.0441837f, -0.0008721f, 0.0012985f, 0.9990222f), + XMFLOAT4(-0.0465645f, -0.0008760f, 0.0013167f, 0.9989141f), + XMFLOAT4(-0.0474069f, -0.0008774f, 0.0013231f, 0.9988744f), + XMFLOAT4(-0.0463134f, -0.0008756f, 0.0013147f, 0.9989257f), + XMFLOAT4(-0.0432515f, -0.0008707f, 0.0012913f, 0.9990631f), + XMFLOAT4(-0.0385485f, -0.0008633f, 0.0012554f, 0.9992556f), + XMFLOAT4(-0.0325317f, -0.0008544f, 0.0012093f, 0.9994696f), + XMFLOAT4(-0.0255289f, -0.0008449f, 0.0011556f, 0.9996731f), + XMFLOAT4(-0.0178682f, -0.0008353f, 0.0010968f, 0.9998394f), + XMFLOAT4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), + XMFLOAT4(-0.0018872f, -0.0008184f, 0.0009740f, 0.9999974f), + XMFLOAT4( 0.0057753f, -0.0008118f, 0.0009150f, 0.9999826f), + XMFLOAT4( 0.0127808f, -0.0008065f, 0.0008610f, 0.9999176f), + XMFLOAT4( 0.0188006f, -0.0008027f, 0.0008146f, 0.9998226f), + XMFLOAT4( 0.0235065f, -0.0008001f, 0.0007783f, 0.9997230f), + XMFLOAT4( 0.0265706f, -0.0007986f, 0.0007547f, 0.9996463f), + XMFLOAT4( 0.0276649f, -0.0007981f, 0.0007462f, 0.9996167f), + XMFLOAT4( 0.0268218f, -0.0007984f, 0.0007527f, 0.9996396f), + XMFLOAT4( 0.0244393f, -0.0007996f, 0.0007711f, 0.9997007f), + XMFLOAT4( 0.0207369f, -0.0008016f, 0.0007997f, 0.9997844f), + XMFLOAT4( 0.0159344f, -0.0008044f, 0.0008367f, 0.9998724f), + XMFLOAT4( 0.0102516f, -0.0008083f, 0.0008805f, 0.9999467f), + XMFLOAT4( 0.0039085f, -0.0008133f, 0.0009293f, 0.9999916f), + XMFLOAT4(-0.0028748f, -0.0008193f, 0.0009816f, 0.9999951f), + XMFLOAT4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), + XMFLOAT4(-0.0168807f, -0.0008341f, 0.0010892f, 0.9998566f), + XMFLOAT4(-0.0236626f, -0.0008424f, 0.0011413f, 0.9997190f), + XMFLOAT4(-0.0300036f, -0.0008509f, 0.0011899f, 0.9995487f), + XMFLOAT4(-0.0356838f, -0.0008590f, 0.0012334f, 0.9993620f), + XMFLOAT4(-0.0404836f, -0.0008663f, 0.0012702f, 0.9991790f), + XMFLOAT4(-0.0441837f, -0.0008721f, 0.0012985f, 0.9990222f), + XMFLOAT4(-0.0465645f, -0.0008760f, 0.0013167f, 0.9989141f), + XMFLOAT4(-0.0474069f, -0.0008774f, 0.0013231f, 0.9988744f), + XMFLOAT4(-0.0463134f, -0.0008756f, 0.0013147f, 0.9989257f), + XMFLOAT4(-0.0432515f, -0.0008707f, 0.0012913f, 0.9990631f), + XMFLOAT4(-0.0385485f, -0.0008633f, 0.0012554f, 0.9992556f), + XMFLOAT4(-0.0325317f, -0.0008544f, 0.0012093f, 0.9994696f), + XMFLOAT4(-0.0255289f, -0.0008449f, 0.0011556f, 0.9996731f), + XMFLOAT4(-0.0178682f, -0.0008353f, 0.0010968f, 0.9998394f), + XMFLOAT4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), + XMFLOAT4(-0.0018872f, -0.0008184f, 0.0009740f, 0.9999974f), + XMFLOAT4( 0.0057753f, -0.0008118f, 0.0009150f, 0.9999826f), + XMFLOAT4( 0.0127808f, -0.0008065f, 0.0008610f, 0.9999176f), + XMFLOAT4( 0.0188006f, -0.0008027f, 0.0008146f, 0.9998226f), + XMFLOAT4( 0.0235065f, -0.0008001f, 0.0007783f, 0.9997230f), + XMFLOAT4( 0.0265706f, -0.0007986f, 0.0007547f, 0.9996463f), + XMFLOAT4( 0.0276649f, -0.0007981f, 0.0007462f, 0.9996167f), + XMFLOAT4( 0.0268218f, -0.0007984f, 0.0007527f, 0.9996396f), + XMFLOAT4( 0.0244393f, -0.0007996f, 0.0007711f, 0.9997007f), + XMFLOAT4( 0.0207369f, -0.0008016f, 0.0007997f, 0.9997844f), + XMFLOAT4( 0.0159344f, -0.0008044f, 0.0008367f, 0.9998724f), + XMFLOAT4( 0.0102516f, -0.0008083f, 0.0008805f, 0.9999467f), + XMFLOAT4( 0.0039085f, -0.0008133f, 0.0009293f, 0.9999916f), + XMFLOAT4(-0.0028748f, -0.0008193f, 0.0009816f, 0.9999951f), + XMFLOAT4(-0.0098780f, -0.0008263f, 0.0010354f, 0.9999503f), + XMFLOAT4(-0.0168807f, -0.0008341f, 0.0010892f, 0.9998566f), + XMFLOAT4(-0.0236626f, -0.0008424f, 0.0011413f, 0.9997190f), + XMFLOAT4(-0.0300036f, -0.0008509f, 0.0011899f, 0.9995487f), + XMFLOAT4(-0.0356838f, -0.0008590f, 0.0012334f, 0.9993620f), + XMFLOAT4(-0.0404836f, -0.0008663f, 0.0012702f, 0.9991790f), + XMFLOAT4(-0.0441837f, -0.0008721f, 0.0012985f, 0.9990222f), + XMFLOAT4(-0.0465645f, -0.0008760f, 0.0013167f, 0.9989141f), + XMFLOAT4(-0.0474069f, -0.0008774f, 0.0013231f, 0.9988744f), +}; + +const XMFLOAT3 accessor_142[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_143[] = { + XMFLOAT3(-0.0000000f, 0.1116755f, 0.0000000f), + XMFLOAT3(-0.0000009f, 0.1099318f, -0.0000377f), + XMFLOAT3(-0.0000031f, 0.1054348f, -0.0001928f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0006252f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0015358f), + XMFLOAT3(-0.0000127f, 0.0864372f, -0.0020276f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0021978f), + XMFLOAT3(-0.0000159f, 0.0801965f, -0.0022383f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0022006f), + XMFLOAT3(-0.0000127f, 0.0864373f, -0.0020454f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0016131f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0007024f), + XMFLOAT3(-0.0000031f, 0.1054347f, -0.0002107f), + XMFLOAT3(-0.0000009f, 0.1099317f, -0.0000405f), + XMFLOAT3(-0.0000000f, 0.1116755f, -0.0000000f), + XMFLOAT3(-0.0000009f, 0.1099318f, -0.0000377f), + XMFLOAT3(-0.0000031f, 0.1054347f, -0.0001928f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0006252f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0015358f), + XMFLOAT3(-0.0000127f, 0.0864372f, -0.0020276f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0021978f), + XMFLOAT3(-0.0000159f, 0.0801965f, -0.0022382f), + XMFLOAT3(-0.0000153f, 0.0812760f, -0.0022106f), + XMFLOAT3(-0.0000139f, 0.0841692f, -0.0021086f), + XMFLOAT3(-0.0000118f, 0.0883577f, -0.0018848f), + XMFLOAT3(-0.0000092f, 0.0933236f, -0.0014561f), + XMFLOAT3(-0.0000066f, 0.0985484f, -0.0008443f), + XMFLOAT3(-0.0000041f, 0.1035143f, -0.0003812f), + XMFLOAT3(-0.0000020f, 0.1077028f, -0.0001386f), + XMFLOAT3(-0.0000005f, 0.1105960f, -0.0000294f), + XMFLOAT3(-0.0000000f, 0.1116755f, 0.0000000f), + XMFLOAT3(-0.0000009f, 0.1099318f, -0.0000377f), + XMFLOAT3(-0.0000031f, 0.1054348f, -0.0001928f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0006252f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0015358f), + XMFLOAT3(-0.0000127f, 0.0864372f, -0.0020276f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0021978f), + XMFLOAT3(-0.0000159f, 0.0801965f, -0.0022383f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0022006f), + XMFLOAT3(-0.0000127f, 0.0864373f, -0.0020454f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0016131f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0007024f), + XMFLOAT3(-0.0000031f, 0.1054347f, -0.0002107f), + XMFLOAT3(-0.0000009f, 0.1099317f, -0.0000405f), + XMFLOAT3(-0.0000000f, 0.1116755f, -0.0000000f), + XMFLOAT3(-0.0000009f, 0.1099318f, -0.0000377f), + XMFLOAT3(-0.0000031f, 0.1054347f, -0.0001928f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0006252f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0015358f), + XMFLOAT3(-0.0000127f, 0.0864372f, -0.0020276f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0021978f), + XMFLOAT3(-0.0000159f, 0.0801965f, -0.0022382f), + XMFLOAT3(-0.0000153f, 0.0812760f, -0.0022106f), + XMFLOAT3(-0.0000139f, 0.0841692f, -0.0021086f), + XMFLOAT3(-0.0000118f, 0.0883577f, -0.0018848f), + XMFLOAT3(-0.0000092f, 0.0933236f, -0.0014561f), + XMFLOAT3(-0.0000066f, 0.0985484f, -0.0008443f), + XMFLOAT3(-0.0000041f, 0.1035143f, -0.0003812f), + XMFLOAT3(-0.0000020f, 0.1077028f, -0.0001386f), + XMFLOAT3(-0.0000005f, 0.1105960f, -0.0000294f), + XMFLOAT3(-0.0000000f, 0.1116755f, 0.0000000f), + XMFLOAT3(-0.0000009f, 0.1099318f, -0.0000377f), + XMFLOAT3(-0.0000031f, 0.1054348f, -0.0001928f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0006252f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0015358f), + XMFLOAT3(-0.0000127f, 0.0864372f, -0.0020276f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0021978f), + XMFLOAT3(-0.0000159f, 0.0801965f, -0.0022383f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0022006f), + XMFLOAT3(-0.0000127f, 0.0864373f, -0.0020454f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0016131f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0007024f), + XMFLOAT3(-0.0000031f, 0.1054347f, -0.0002107f), + XMFLOAT3(-0.0000009f, 0.1099317f, -0.0000405f), + XMFLOAT3(-0.0000000f, 0.1116755f, -0.0000000f), + XMFLOAT3(-0.0000009f, 0.1099318f, -0.0000377f), + XMFLOAT3(-0.0000031f, 0.1054347f, -0.0001928f), + XMFLOAT3(-0.0000062f, 0.0992858f, -0.0006252f), + XMFLOAT3(-0.0000096f, 0.0925862f, -0.0015358f), + XMFLOAT3(-0.0000127f, 0.0864372f, -0.0020276f), + XMFLOAT3(-0.0000150f, 0.0819402f, -0.0021978f), + XMFLOAT3(-0.0000159f, 0.0801965f, -0.0022382f), + XMFLOAT3(-0.0000153f, 0.0812760f, -0.0022106f), + XMFLOAT3(-0.0000139f, 0.0841692f, -0.0021086f), + XMFLOAT3(-0.0000118f, 0.0883577f, -0.0018848f), + XMFLOAT3(-0.0000092f, 0.0933236f, -0.0014561f), + XMFLOAT3(-0.0000066f, 0.0985484f, -0.0008443f), + XMFLOAT3(-0.0000041f, 0.1035143f, -0.0003812f), + XMFLOAT3(-0.0000020f, 0.1077028f, -0.0001386f), + XMFLOAT3(-0.0000005f, 0.1105960f, -0.0000294f), + XMFLOAT3(-0.0000000f, 0.1116755f, 0.0000000f), +}; + +const XMFLOAT4 accessor_144[] = { + XMFLOAT4( 0.0829399f, 0.0021449f, 0.0019676f, 0.9965503f), + XMFLOAT4( 0.0814701f, 0.0021492f, 0.0019763f, 0.9966716f), + XMFLOAT4( 0.0773537f, 0.0021613f, 0.0020006f, 0.9969994f), + XMFLOAT4( 0.0710295f, 0.0021796f, 0.0020383f, 0.9974698f), + XMFLOAT4( 0.0629364f, 0.0022024f, 0.0020869f, 0.9980130f), + XMFLOAT4( 0.0535137f, 0.0022280f, 0.0021441f, 0.9985623f), + XMFLOAT4( 0.0432023f, 0.0022551f, 0.0022075f, 0.9990614f), + XMFLOAT4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), + XMFLOAT4( 0.0216820f, 0.0023081f, 0.0023421f, 0.9997596f), + XMFLOAT4( 0.0113599f, 0.0023318f, 0.0024078f, 0.9999298f), + XMFLOAT4( 0.0019214f, 0.0023525f, 0.0024685f, 0.9999924f), + XMFLOAT4(-0.0061899f, 0.0023695f, 0.0025212f, 0.9999748f), + XMFLOAT4(-0.0125312f, 0.0023824f, 0.0025627f, 0.9999154f), + XMFLOAT4(-0.0166602f, 0.0023905f, 0.0025898f, 0.9998550f), + XMFLOAT4(-0.0181348f, 0.0023933f, 0.0025995f, 0.9998293f), + XMFLOAT4(-0.0169988f, 0.0023912f, 0.0025920f, 0.9998493f), + XMFLOAT4(-0.0137882f, 0.0023849f, 0.0025709f, 0.9998988f), + XMFLOAT4(-0.0087991f, 0.0023749f, 0.0025382f, 0.9999552f), + XMFLOAT4(-0.0023278f, 0.0023615f, 0.0024961f, 0.9999914f), + XMFLOAT4( 0.0053291f, 0.0023451f, 0.0024465f, 0.9999801f), + XMFLOAT4( 0.0138749f, 0.0023261f, 0.0023917f, 0.9998982f), + XMFLOAT4( 0.0230123f, 0.0023050f, 0.0023337f, 0.9997298f), + XMFLOAT4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), + XMFLOAT4( 0.0418729f, 0.0022585f, 0.0022157f, 0.9991179f), + XMFLOAT4( 0.0510020f, 0.0022347f, 0.0021595f, 0.9986938f), + XMFLOAT4( 0.0595351f, 0.0022117f, 0.0021075f, 0.9982215f), + XMFLOAT4( 0.0671767f, 0.0021905f, 0.0020614f, 0.9977366f), + XMFLOAT4( 0.0736320f, 0.0021721f, 0.0020228f, 0.9972811f), + XMFLOAT4( 0.0786069f, 0.0021577f, 0.0019932f, 0.9969013f), + XMFLOAT4( 0.0818075f, 0.0021482f, 0.0019743f, 0.9966439f), + XMFLOAT4( 0.0829399f, 0.0021449f, 0.0019676f, 0.9965503f), + XMFLOAT4( 0.0814701f, 0.0021492f, 0.0019763f, 0.9966716f), + XMFLOAT4( 0.0773537f, 0.0021613f, 0.0020006f, 0.9969994f), + XMFLOAT4( 0.0710295f, 0.0021796f, 0.0020383f, 0.9974698f), + XMFLOAT4( 0.0629364f, 0.0022024f, 0.0020869f, 0.9980130f), + XMFLOAT4( 0.0535137f, 0.0022280f, 0.0021441f, 0.9985623f), + XMFLOAT4( 0.0432023f, 0.0022551f, 0.0022075f, 0.9990614f), + XMFLOAT4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), + XMFLOAT4( 0.0216820f, 0.0023081f, 0.0023421f, 0.9997596f), + XMFLOAT4( 0.0113599f, 0.0023318f, 0.0024078f, 0.9999298f), + XMFLOAT4( 0.0019214f, 0.0023525f, 0.0024685f, 0.9999924f), + XMFLOAT4(-0.0061899f, 0.0023695f, 0.0025212f, 0.9999748f), + XMFLOAT4(-0.0125312f, 0.0023824f, 0.0025627f, 0.9999154f), + XMFLOAT4(-0.0166602f, 0.0023905f, 0.0025898f, 0.9998550f), + XMFLOAT4(-0.0181348f, 0.0023933f, 0.0025995f, 0.9998293f), + XMFLOAT4(-0.0169988f, 0.0023912f, 0.0025920f, 0.9998493f), + XMFLOAT4(-0.0137882f, 0.0023849f, 0.0025709f, 0.9998988f), + XMFLOAT4(-0.0087991f, 0.0023749f, 0.0025382f, 0.9999552f), + XMFLOAT4(-0.0023278f, 0.0023615f, 0.0024961f, 0.9999914f), + XMFLOAT4( 0.0053291f, 0.0023451f, 0.0024465f, 0.9999801f), + XMFLOAT4( 0.0138749f, 0.0023261f, 0.0023917f, 0.9998982f), + XMFLOAT4( 0.0230123f, 0.0023050f, 0.0023337f, 0.9997298f), + XMFLOAT4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), + XMFLOAT4( 0.0418729f, 0.0022585f, 0.0022157f, 0.9991179f), + XMFLOAT4( 0.0510020f, 0.0022347f, 0.0021595f, 0.9986938f), + XMFLOAT4( 0.0595351f, 0.0022117f, 0.0021075f, 0.9982215f), + XMFLOAT4( 0.0671767f, 0.0021905f, 0.0020614f, 0.9977366f), + XMFLOAT4( 0.0736320f, 0.0021721f, 0.0020228f, 0.9972811f), + XMFLOAT4( 0.0786069f, 0.0021577f, 0.0019932f, 0.9969013f), + XMFLOAT4( 0.0818075f, 0.0021482f, 0.0019743f, 0.9966439f), + XMFLOAT4( 0.0829399f, 0.0021449f, 0.0019676f, 0.9965503f), + XMFLOAT4( 0.0814701f, 0.0021492f, 0.0019763f, 0.9966716f), + XMFLOAT4( 0.0773537f, 0.0021613f, 0.0020006f, 0.9969994f), + XMFLOAT4( 0.0710295f, 0.0021796f, 0.0020383f, 0.9974698f), + XMFLOAT4( 0.0629364f, 0.0022024f, 0.0020869f, 0.9980130f), + XMFLOAT4( 0.0535137f, 0.0022280f, 0.0021441f, 0.9985623f), + XMFLOAT4( 0.0432023f, 0.0022551f, 0.0022075f, 0.9990614f), + XMFLOAT4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), + XMFLOAT4( 0.0216820f, 0.0023081f, 0.0023421f, 0.9997596f), + XMFLOAT4( 0.0113599f, 0.0023318f, 0.0024078f, 0.9999298f), + XMFLOAT4( 0.0019214f, 0.0023525f, 0.0024685f, 0.9999924f), + XMFLOAT4(-0.0061899f, 0.0023695f, 0.0025212f, 0.9999748f), + XMFLOAT4(-0.0125312f, 0.0023824f, 0.0025627f, 0.9999154f), + XMFLOAT4(-0.0166602f, 0.0023905f, 0.0025898f, 0.9998550f), + XMFLOAT4(-0.0181348f, 0.0023933f, 0.0025995f, 0.9998293f), + XMFLOAT4(-0.0169988f, 0.0023912f, 0.0025920f, 0.9998493f), + XMFLOAT4(-0.0137882f, 0.0023849f, 0.0025709f, 0.9998988f), + XMFLOAT4(-0.0087991f, 0.0023749f, 0.0025382f, 0.9999552f), + XMFLOAT4(-0.0023278f, 0.0023615f, 0.0024961f, 0.9999914f), + XMFLOAT4( 0.0053291f, 0.0023451f, 0.0024465f, 0.9999801f), + XMFLOAT4( 0.0138749f, 0.0023261f, 0.0023917f, 0.9998982f), + XMFLOAT4( 0.0230123f, 0.0023050f, 0.0023337f, 0.9997298f), + XMFLOAT4( 0.0324440f, 0.0022822f, 0.0022744f, 0.9994684f), + XMFLOAT4( 0.0418729f, 0.0022585f, 0.0022157f, 0.9991179f), + XMFLOAT4( 0.0510020f, 0.0022347f, 0.0021595f, 0.9986938f), + XMFLOAT4( 0.0595351f, 0.0022117f, 0.0021075f, 0.9982215f), + XMFLOAT4( 0.0671767f, 0.0021905f, 0.0020614f, 0.9977366f), + XMFLOAT4( 0.0736320f, 0.0021721f, 0.0020228f, 0.9972811f), + XMFLOAT4( 0.0786069f, 0.0021577f, 0.0019932f, 0.9969013f), + XMFLOAT4( 0.0818075f, 0.0021482f, 0.0019743f, 0.9966439f), + XMFLOAT4( 0.0829399f, 0.0021449f, 0.0019676f, 0.9965503f), +}; + +const XMFLOAT3 accessor_145[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 0.9985101f, 1.0018822f), + XMFLOAT3( 0.9999998f, 0.9946679f, 1.0067360f), + XMFLOAT3( 0.9999999f, 0.9894144f, 1.0133730f), + XMFLOAT3( 1.0000001f, 0.9836906f, 1.0206046f), + XMFLOAT3( 0.9999999f, 0.9784368f, 1.0272412f), + XMFLOAT3( 0.9999999f, 0.9745947f, 1.0320952f), + XMFLOAT3( 1.0000001f, 0.9731048f, 1.0339775f), + XMFLOAT3( 0.9999999f, 0.9745945f, 1.0320953f), + XMFLOAT3( 0.9999999f, 0.9784369f, 1.0272415f), + XMFLOAT3( 0.9999999f, 0.9836905f, 1.0206045f), + XMFLOAT3( 0.9999999f, 0.9894144f, 1.0133733f), + XMFLOAT3( 0.9999999f, 0.9946678f, 1.0067360f), + XMFLOAT3( 0.9999998f, 0.9985100f, 1.0018821f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000001f), + XMFLOAT3( 0.9999999f, 0.9985101f, 1.0018821f), + XMFLOAT3( 0.9999999f, 0.9946678f, 1.0067360f), + XMFLOAT3( 0.9999998f, 0.9894144f, 1.0133730f), + XMFLOAT3( 0.9999999f, 0.9836904f, 1.0206043f), + XMFLOAT3( 0.9999999f, 0.9784369f, 1.0272416f), + XMFLOAT3( 0.9999999f, 0.9745947f, 1.0320953f), + XMFLOAT3( 0.9999999f, 0.9731049f, 1.0339775f), + XMFLOAT3( 0.9999999f, 0.9740270f, 1.0328122f), + XMFLOAT3( 1.0000001f, 0.9764991f, 1.0296897f), + XMFLOAT3( 0.9999999f, 0.9800777f, 1.0251684f), + XMFLOAT3( 1.0000001f, 0.9843205f, 1.0198087f), + XMFLOAT3( 0.9999999f, 0.9887845f, 1.0141689f), + XMFLOAT3( 0.9999999f, 0.9930271f, 1.0088089f), + XMFLOAT3( 0.9999999f, 0.9966060f, 1.0042881f), + XMFLOAT3( 0.9999999f, 0.9990774f, 1.0011650f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 0.9985101f, 1.0018822f), + XMFLOAT3( 0.9999998f, 0.9946679f, 1.0067360f), + XMFLOAT3( 0.9999999f, 0.9894144f, 1.0133730f), + XMFLOAT3( 1.0000001f, 0.9836906f, 1.0206046f), + XMFLOAT3( 0.9999999f, 0.9784368f, 1.0272412f), + XMFLOAT3( 0.9999999f, 0.9745947f, 1.0320952f), + XMFLOAT3( 1.0000001f, 0.9731048f, 1.0339775f), + XMFLOAT3( 0.9999999f, 0.9745945f, 1.0320953f), + XMFLOAT3( 0.9999999f, 0.9784369f, 1.0272415f), + XMFLOAT3( 0.9999999f, 0.9836905f, 1.0206045f), + XMFLOAT3( 0.9999999f, 0.9894144f, 1.0133733f), + XMFLOAT3( 0.9999999f, 0.9946678f, 1.0067360f), + XMFLOAT3( 0.9999998f, 0.9985100f, 1.0018821f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000001f), + XMFLOAT3( 0.9999999f, 0.9985101f, 1.0018821f), + XMFLOAT3( 0.9999999f, 0.9946678f, 1.0067360f), + XMFLOAT3( 0.9999998f, 0.9894144f, 1.0133730f), + XMFLOAT3( 0.9999999f, 0.9836904f, 1.0206043f), + XMFLOAT3( 0.9999999f, 0.9784369f, 1.0272416f), + XMFLOAT3( 0.9999999f, 0.9745947f, 1.0320953f), + XMFLOAT3( 0.9999999f, 0.9731049f, 1.0339775f), + XMFLOAT3( 0.9999999f, 0.9740270f, 1.0328122f), + XMFLOAT3( 1.0000001f, 0.9764991f, 1.0296897f), + XMFLOAT3( 0.9999999f, 0.9800777f, 1.0251684f), + XMFLOAT3( 1.0000001f, 0.9843205f, 1.0198087f), + XMFLOAT3( 0.9999999f, 0.9887845f, 1.0141689f), + XMFLOAT3( 0.9999999f, 0.9930271f, 1.0088089f), + XMFLOAT3( 0.9999999f, 0.9966060f, 1.0042881f), + XMFLOAT3( 0.9999999f, 0.9990774f, 1.0011650f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 0.9985101f, 1.0018822f), + XMFLOAT3( 0.9999998f, 0.9946679f, 1.0067360f), + XMFLOAT3( 0.9999999f, 0.9894144f, 1.0133730f), + XMFLOAT3( 1.0000001f, 0.9836906f, 1.0206046f), + XMFLOAT3( 0.9999999f, 0.9784368f, 1.0272412f), + XMFLOAT3( 0.9999999f, 0.9745947f, 1.0320952f), + XMFLOAT3( 1.0000001f, 0.9731048f, 1.0339775f), + XMFLOAT3( 0.9999999f, 0.9745945f, 1.0320953f), + XMFLOAT3( 0.9999999f, 0.9784369f, 1.0272415f), + XMFLOAT3( 0.9999999f, 0.9836905f, 1.0206045f), + XMFLOAT3( 0.9999999f, 0.9894144f, 1.0133733f), + XMFLOAT3( 0.9999999f, 0.9946678f, 1.0067360f), + XMFLOAT3( 0.9999998f, 0.9985100f, 1.0018821f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000001f), + XMFLOAT3( 0.9999999f, 0.9985101f, 1.0018821f), + XMFLOAT3( 0.9999999f, 0.9946678f, 1.0067360f), + XMFLOAT3( 0.9999998f, 0.9894144f, 1.0133730f), + XMFLOAT3( 0.9999999f, 0.9836904f, 1.0206043f), + XMFLOAT3( 0.9999999f, 0.9784369f, 1.0272416f), + XMFLOAT3( 0.9999999f, 0.9745947f, 1.0320953f), + XMFLOAT3( 0.9999999f, 0.9731049f, 1.0339775f), + XMFLOAT3( 0.9999999f, 0.9740270f, 1.0328122f), + XMFLOAT3( 1.0000001f, 0.9764991f, 1.0296897f), + XMFLOAT3( 0.9999999f, 0.9800777f, 1.0251684f), + XMFLOAT3( 1.0000001f, 0.9843205f, 1.0198087f), + XMFLOAT3( 0.9999999f, 0.9887845f, 1.0141689f), + XMFLOAT3( 0.9999999f, 0.9930271f, 1.0088089f), + XMFLOAT3( 0.9999999f, 0.9966060f, 1.0042881f), + XMFLOAT3( 0.9999999f, 0.9990774f, 1.0011650f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_146[] = { + XMFLOAT3(-0.0005524f, 0.0688296f, -0.0213631f), + XMFLOAT3(-0.0005524f, 0.0688296f, -0.0213631f), +}; + +const XMFLOAT4 accessor_147[] = { + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), +}; + +const XMFLOAT3 accessor_148[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_149[] = { + XMFLOAT3(-0.0000000f, 0.3082172f, 0.0000012f), + XMFLOAT3(-0.0000000f, 0.3082172f, 0.0000012f), +}; + +const XMFLOAT4 accessor_150[] = { + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), +}; + +const XMFLOAT3 accessor_151[] = { + XMFLOAT3( 1.0000001f, 1.0000001f, 1.0000001f), + XMFLOAT3( 1.0000001f, 1.0000001f, 1.0000001f), +}; + +const XMFLOAT3 accessor_152[] = { + XMFLOAT3(-0.0000000f, 0.3056023f, -0.0000003f), + XMFLOAT3(-0.0000000f, 0.3056023f, -0.0000003f), +}; + +const XMFLOAT4 accessor_153[] = { + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), +}; + +const XMFLOAT3 accessor_154[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_155[] = { + XMFLOAT3( 0.0013047f, 0.3463301f, 0.0044682f), + XMFLOAT3( 0.0012900f, 0.3433006f, 0.0044711f), + XMFLOAT3( 0.0012551f, 0.3360764f, 0.0044868f), + XMFLOAT3( 0.0012134f, 0.3274537f, 0.0045631f), + XMFLOAT3( 0.0011785f, 0.3202294f, 0.0045817f), + XMFLOAT3( 0.0011639f, 0.3171998f, 0.0045849f), + XMFLOAT3( 0.0011687f, 0.3181989f, 0.0045834f), + XMFLOAT3( 0.0011816f, 0.3208762f, 0.0045781f), + XMFLOAT3( 0.0012004f, 0.3247521f, 0.0045665f), + XMFLOAT3( 0.0012226f, 0.3293475f, 0.0045441f), + XMFLOAT3( 0.0012460f, 0.3341826f, 0.0045122f), + XMFLOAT3( 0.0012682f, 0.3387780f, 0.0044881f), + XMFLOAT3( 0.0012869f, 0.3426540f, 0.0044754f), + XMFLOAT3( 0.0012998f, 0.3453312f, 0.0044697f), + XMFLOAT3( 0.0013047f, 0.3463303f, 0.0044682f), + XMFLOAT3( 0.0012969f, 0.3447166f, 0.0044701f), + XMFLOAT3( 0.0012768f, 0.3405551f, 0.0044782f), + XMFLOAT3( 0.0012493f, 0.3348649f, 0.0045008f), + XMFLOAT3( 0.0012193f, 0.3286653f, 0.0045483f), + XMFLOAT3( 0.0011918f, 0.3229749f, 0.0045739f), + XMFLOAT3( 0.0011717f, 0.3188135f, 0.0045828f), + XMFLOAT3( 0.0011639f, 0.3171999f, 0.0045849f), + XMFLOAT3( 0.0011687f, 0.3181988f, 0.0045834f), + XMFLOAT3( 0.0011816f, 0.3208760f, 0.0045781f), + XMFLOAT3( 0.0012004f, 0.3247522f, 0.0045665f), + XMFLOAT3( 0.0012226f, 0.3293476f, 0.0045441f), + XMFLOAT3( 0.0012460f, 0.3341825f, 0.0045122f), + XMFLOAT3( 0.0012682f, 0.3387780f, 0.0044881f), + XMFLOAT3( 0.0012869f, 0.3426539f, 0.0044754f), + XMFLOAT3( 0.0012998f, 0.3453312f, 0.0044697f), + XMFLOAT3( 0.0013047f, 0.3463301f, 0.0044682f), + XMFLOAT3( 0.0012900f, 0.3433006f, 0.0044711f), + XMFLOAT3( 0.0012551f, 0.3360764f, 0.0044868f), + XMFLOAT3( 0.0012134f, 0.3274537f, 0.0045631f), + XMFLOAT3( 0.0011785f, 0.3202294f, 0.0045817f), + XMFLOAT3( 0.0011639f, 0.3171998f, 0.0045849f), + XMFLOAT3( 0.0011687f, 0.3181989f, 0.0045834f), + XMFLOAT3( 0.0011816f, 0.3208762f, 0.0045781f), + XMFLOAT3( 0.0012004f, 0.3247521f, 0.0045665f), + XMFLOAT3( 0.0012226f, 0.3293475f, 0.0045441f), + XMFLOAT3( 0.0012460f, 0.3341826f, 0.0045122f), + XMFLOAT3( 0.0012682f, 0.3387780f, 0.0044881f), + XMFLOAT3( 0.0012869f, 0.3426540f, 0.0044754f), + XMFLOAT3( 0.0012998f, 0.3453312f, 0.0044697f), + XMFLOAT3( 0.0013047f, 0.3463303f, 0.0044682f), + XMFLOAT3( 0.0012969f, 0.3447166f, 0.0044701f), + XMFLOAT3( 0.0012768f, 0.3405551f, 0.0044782f), + XMFLOAT3( 0.0012493f, 0.3348649f, 0.0045008f), + XMFLOAT3( 0.0012193f, 0.3286653f, 0.0045483f), + XMFLOAT3( 0.0011918f, 0.3229749f, 0.0045739f), + XMFLOAT3( 0.0011717f, 0.3188135f, 0.0045828f), + XMFLOAT3( 0.0011639f, 0.3171999f, 0.0045849f), + XMFLOAT3( 0.0011687f, 0.3181988f, 0.0045834f), + XMFLOAT3( 0.0011816f, 0.3208760f, 0.0045781f), + XMFLOAT3( 0.0012004f, 0.3247522f, 0.0045665f), + XMFLOAT3( 0.0012226f, 0.3293476f, 0.0045441f), + XMFLOAT3( 0.0012460f, 0.3341825f, 0.0045122f), + XMFLOAT3( 0.0012682f, 0.3387780f, 0.0044881f), + XMFLOAT3( 0.0012869f, 0.3426539f, 0.0044754f), + XMFLOAT3( 0.0012998f, 0.3453312f, 0.0044697f), + XMFLOAT3( 0.0013047f, 0.3463301f, 0.0044682f), + XMFLOAT3( 0.0012900f, 0.3433006f, 0.0044711f), + XMFLOAT3( 0.0012551f, 0.3360764f, 0.0044868f), + XMFLOAT3( 0.0012134f, 0.3274537f, 0.0045631f), + XMFLOAT3( 0.0011785f, 0.3202294f, 0.0045817f), + XMFLOAT3( 0.0011639f, 0.3171998f, 0.0045849f), + XMFLOAT3( 0.0011687f, 0.3181989f, 0.0045834f), + XMFLOAT3( 0.0011816f, 0.3208762f, 0.0045781f), + XMFLOAT3( 0.0012004f, 0.3247521f, 0.0045665f), + XMFLOAT3( 0.0012226f, 0.3293475f, 0.0045441f), + XMFLOAT3( 0.0012460f, 0.3341826f, 0.0045122f), + XMFLOAT3( 0.0012682f, 0.3387780f, 0.0044881f), + XMFLOAT3( 0.0012869f, 0.3426540f, 0.0044754f), + XMFLOAT3( 0.0012998f, 0.3453312f, 0.0044697f), + XMFLOAT3( 0.0013047f, 0.3463303f, 0.0044682f), + XMFLOAT3( 0.0012969f, 0.3447166f, 0.0044701f), + XMFLOAT3( 0.0012768f, 0.3405551f, 0.0044782f), + XMFLOAT3( 0.0012493f, 0.3348649f, 0.0045008f), + XMFLOAT3( 0.0012193f, 0.3286653f, 0.0045483f), + XMFLOAT3( 0.0011918f, 0.3229749f, 0.0045739f), + XMFLOAT3( 0.0011717f, 0.3188135f, 0.0045828f), + XMFLOAT3( 0.0011639f, 0.3171999f, 0.0045849f), + XMFLOAT3( 0.0011687f, 0.3181988f, 0.0045834f), + XMFLOAT3( 0.0011816f, 0.3208760f, 0.0045781f), + XMFLOAT3( 0.0012004f, 0.3247522f, 0.0045665f), + XMFLOAT3( 0.0012226f, 0.3293476f, 0.0045441f), + XMFLOAT3( 0.0012460f, 0.3341825f, 0.0045122f), + XMFLOAT3( 0.0012682f, 0.3387780f, 0.0044881f), + XMFLOAT3( 0.0012869f, 0.3426539f, 0.0044754f), + XMFLOAT3( 0.0012998f, 0.3453312f, 0.0044697f), + XMFLOAT3( 0.0013047f, 0.3463301f, 0.0044682f), +}; + +const XMFLOAT4 accessor_156[] = { + XMFLOAT4( 0.0387171f, 0.0002800f, 0.0013465f, 0.9992493f), + XMFLOAT4( 0.0493966f, -0.0177392f, 0.0031159f, 0.9986169f), + XMFLOAT4( 0.0606996f, -0.0367296f, 0.0057199f, 0.9974637f), + XMFLOAT4( 0.0707855f, -0.0537815f, 0.0088475f, 0.9960014f), + XMFLOAT4( 0.0784067f, -0.0683515f, 0.0121915f, 0.9945008f), + XMFLOAT4( 0.0817480f, -0.0807198f, 0.0154127f, 0.9932593f), + XMFLOAT4( 0.0747506f, -0.0913959f, 0.0177616f, 0.9928463f), + XMFLOAT4( 0.0556845f, -0.1008034f, 0.0187529f, 0.9931698f), + XMFLOAT4( 0.0280073f, -0.1091611f, 0.0184893f, 0.9934574f), + XMFLOAT4(-0.0048100f, -0.1165879f, 0.0172380f, 0.9930192f), + XMFLOAT4(-0.0392765f, -0.1231512f, 0.0153883f, 0.9914910f), + XMFLOAT4(-0.0719051f, -0.1289043f, 0.0134206f, 0.9889557f), + XMFLOAT4(-0.0992459f, -0.1339051f, 0.0118810f, 0.9859404f), + XMFLOAT4(-0.1179052f, -0.1382139f, 0.0113604f, 0.9832936f), + XMFLOAT4(-0.1245264f, -0.1418656f, 0.0124765f, 0.9819428f), + XMFLOAT4(-0.1224307f, -0.1449235f, 0.0148707f, 0.9817264f), + XMFLOAT4(-0.1177773f, -0.1474829f, 0.0176792f, 0.9818680f), + XMFLOAT4(-0.1111950f, -0.1495734f, 0.0208023f, 0.9822580f), + XMFLOAT4(-0.1030942f, -0.1512196f, 0.0241640f, 0.9828125f), + XMFLOAT4(-0.0937609f, -0.1524428f, 0.0277018f, 0.9834647f), + XMFLOAT4(-0.0834059f, -0.1532622f, 0.0313610f, 0.9841599f), + XMFLOAT4(-0.0721912f, -0.1536956f, 0.0350924f, 0.9848526f), + XMFLOAT4(-0.0602471f, -0.1537601f, 0.0388498f, 0.9855043f), + XMFLOAT4(-0.0476825f, -0.1534726f, 0.0425891f, 0.9860825f), + XMFLOAT4(-0.0346149f, -0.1524973f, 0.0462851f, 0.9866124f), + XMFLOAT4(-0.0211576f, -0.1504736f, 0.0498945f, 0.9871275f), + XMFLOAT4(-0.0074050f, -0.1473921f, 0.0533421f, 0.9876110f), + XMFLOAT4( 0.0065302f, -0.1432618f, 0.0565457f, 0.9880466f), + XMFLOAT4( 0.0204811f, -0.1381156f, 0.0594127f, 0.9884204f), + XMFLOAT4( 0.0340977f, -0.1320207f, 0.0618273f, 0.9887291f), + XMFLOAT4( 0.0458801f, -0.1251444f, 0.0635367f, 0.9890384f), + XMFLOAT4( 0.0560597f, -0.1175647f, 0.0646364f, 0.9893726f), + XMFLOAT4( 0.0667953f, -0.1092420f, 0.0654558f, 0.9896061f), + XMFLOAT4( 0.0762679f, -0.1004125f, 0.0657694f, 0.9898358f), + XMFLOAT4( 0.0832366f, -0.0912865f, 0.0654775f, 0.9901773f), + XMFLOAT4( 0.0858855f, -0.0821102f, 0.0644889f, 0.9908193f), + XMFLOAT4( 0.0781465f, -0.0733800f, 0.0624743f, 0.9922729f), + XMFLOAT4( 0.0582459f, -0.0652543f, 0.0595997f, 0.9943828f), + XMFLOAT4( 0.0296094f, -0.0575410f, 0.0563712f, 0.9963105f), + XMFLOAT4(-0.0042901f, -0.0500729f, 0.0531686f, 0.9973201f), + XMFLOAT4(-0.0399325f, -0.0427180f, 0.0502538f, 0.9970232f), + XMFLOAT4(-0.0737814f, -0.0353748f, 0.0477780f, 0.9955010f), + XMFLOAT4(-0.1023270f, -0.0279613f, 0.0457860f, 0.9933031f), + XMFLOAT4(-0.1221112f, -0.0204020f, 0.0442204f, 0.9913210f), + XMFLOAT4(-0.1297143f, -0.0126208f, 0.0429242f, 0.9905415f), + XMFLOAT4(-0.1284588f, -0.0048277f, 0.0416081f, 0.9908299f), + XMFLOAT4(-0.1245467f, 0.0026717f, 0.0401076f, 0.9913992f), + XMFLOAT4(-0.1186091f, 0.0098275f, 0.0384018f, 0.9921495f), + XMFLOAT4(-0.1110591f, 0.0166051f, 0.0364919f, 0.9930048f), + XMFLOAT4(-0.1021864f, 0.0229805f, 0.0343923f, 0.9939049f), + XMFLOAT4(-0.0922060f, 0.0289375f, 0.0321254f, 0.9948008f), + XMFLOAT4(-0.0812846f, 0.0344663f, 0.0297177f, 0.9956514f), + XMFLOAT4(-0.0695569f, 0.0395620f, 0.0271980f, 0.9964221f), + XMFLOAT4(-0.0571369f, 0.0442244f, 0.0245948f, 0.9970831f), + XMFLOAT4(-0.0441252f, 0.0484563f, 0.0219360f, 0.9976090f), + XMFLOAT4(-0.0306171f, 0.0522637f, 0.0192480f, 0.9979783f), + XMFLOAT4(-0.0167114f, 0.0556548f, 0.0165563f, 0.9981729f), + XMFLOAT4(-0.0025264f, 0.0586396f, 0.0138860f, 0.9981794f), + XMFLOAT4( 0.0117657f, 0.0612287f, 0.0112215f, 0.9979913f), + XMFLOAT4( 0.0258064f, 0.0634317f, 0.0085759f, 0.9976156f), + XMFLOAT4( 0.0380740f, 0.0652552f, 0.0060878f, 0.9971234f), + XMFLOAT4( 0.0487839f, 0.0667278f, 0.0037808f, 0.9965708f), + XMFLOAT4( 0.0600906f, 0.0678784f, 0.0015353f, 0.9958812f), + XMFLOAT4( 0.0701530f, 0.0687186f, -0.0005088f, 0.9951664f), + XMFLOAT4( 0.0777177f, 0.0692727f, -0.0022518f, 0.9945633f), + XMFLOAT4( 0.0809597f, 0.0695728f, -0.0035576f, 0.9942799f), + XMFLOAT4( 0.0737964f, 0.0696717f, -0.0039981f, 0.9948286f), + XMFLOAT4( 0.0544659f, 0.0696648f, -0.0034622f, 0.9960765f), + XMFLOAT4( 0.0264120f, 0.0695867f, -0.0022097f, 0.9972237f), + XMFLOAT4(-0.0068905f, 0.0693881f, -0.0004990f, 0.9975659f), + XMFLOAT4(-0.0419323f, 0.0690313f, 0.0014082f, 0.9967319f), + XMFLOAT4(-0.0751965f, 0.0685022f, 0.0032511f, 0.9948077f), + XMFLOAT4(-0.1031954f, 0.0678127f, 0.0047754f, 0.9923353f), + XMFLOAT4(-0.1224918f, 0.0669932f, 0.0057394f, 0.9901893f), + XMFLOAT4(-0.1296819f, 0.0660770f, 0.0059186f, 0.9893339f), + XMFLOAT4(-0.1280892f, 0.0650392f, 0.0055533f, 0.9896122f), + XMFLOAT4(-0.1239076f, 0.0638259f, 0.0050655f, 0.9902260f), + XMFLOAT4(-0.1177644f, 0.0624143f, 0.0045012f, 0.9910680f), + XMFLOAT4(-0.1100672f, 0.0607806f, 0.0038919f, 0.9920564f), + XMFLOAT4(-0.1010998f, 0.0588983f, 0.0032619f, 0.9931260f), + XMFLOAT4(-0.0910701f, 0.0567373f, 0.0026315f, 0.9942234f), + XMFLOAT4(-0.0801377f, 0.0542614f, 0.0020192f, 0.9953038f), + XMFLOAT4(-0.0684298f, 0.0514265f, 0.0014430f, 0.9963285f), + XMFLOAT4(-0.0560529f, 0.0481771f, 0.0009217f, 0.9972644f), + XMFLOAT4(-0.0431007f, 0.0444411f, 0.0004759f, 0.9980817f), + XMFLOAT4(-0.0296616f, 0.0401208f, 0.0001293f, 0.9987545f), + XMFLOAT4(-0.0158282f, 0.0350788f, -0.0000894f, 0.9992592f), + XMFLOAT4(-0.0017130f, 0.0291084f, -0.0001432f, 0.9995748f), + XMFLOAT4( 0.0125142f, 0.0218732f, 0.0000184f, 0.9996824f), + XMFLOAT4( 0.0264973f, 0.0127488f, 0.0004716f, 0.9995675f), + XMFLOAT4( 0.0387171f, 0.0002800f, 0.0013465f, 0.9992493f), +}; + +const XMFLOAT3 accessor_157[] = { + XMFLOAT3( 1.0000001f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_158[] = { + XMFLOAT3( 0.2488541f, 0.2033312f, -0.0450287f), + XMFLOAT3( 0.2488541f, 0.2033312f, -0.0450287f), +}; + +const XMFLOAT4 accessor_159[] = { + XMFLOAT4(-0.1444001f, -0.0000000f, 0.0000000f, 0.9895194f), + XMFLOAT4(-0.1444001f, -0.0000000f, 0.0000000f, 0.9895194f), +}; + +const XMFLOAT3 accessor_160[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_161[] = { + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), +}; + +const XMFLOAT4 accessor_162[] = { + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), +}; + +const XMFLOAT3 accessor_163[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_164[] = { + XMFLOAT3( 0.0000000f, 0.1024574f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1024574f, 0.0000000f), +}; + +const XMFLOAT4 accessor_165[] = { + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_166[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000001f), + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000001f), +}; + +const XMFLOAT3 accessor_167[] = { + XMFLOAT3( 0.0000000f, 0.1026066f, -0.0000000f), + XMFLOAT3( 0.0000000f, 0.1026066f, -0.0000000f), +}; + +const XMFLOAT4 accessor_168[] = { + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_169[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_170[] = { + XMFLOAT3( 0.0000000f, 0.1033655f, -0.0000004f), + XMFLOAT3( 0.0000000f, 0.1033655f, -0.0000004f), +}; + +const XMFLOAT4 accessor_171[] = { + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_172[] = { + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000001f), + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000001f), +}; + +const XMFLOAT3 accessor_173[] = { + XMFLOAT3(-0.0000000f, 0.1012878f, -0.0000007f), + XMFLOAT3(-0.0000000f, 0.1012878f, -0.0000007f), +}; + +const XMFLOAT4 accessor_174[] = { + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), +}; + +const XMFLOAT3 accessor_175[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_176[] = { + XMFLOAT3( 0.0000000f, 0.1024326f, 0.0000002f), + XMFLOAT3( 0.0000000f, 0.1024326f, 0.0000002f), +}; + +const XMFLOAT4 accessor_177[] = { + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), +}; + +const XMFLOAT3 accessor_178[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), +}; + +const XMFLOAT3 accessor_179[] = { + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), +}; + +const XMFLOAT4 accessor_180[] = { + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), +}; + +const XMFLOAT3 accessor_181[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_182[] = { + XMFLOAT3(-0.0000000f, 0.3082159f, -0.0000004f), + XMFLOAT3(-0.0000000f, 0.3082159f, -0.0000004f), +}; + +const XMFLOAT4 accessor_183[] = { + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), +}; + +const XMFLOAT3 accessor_184[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_185[] = { + XMFLOAT3( 0.0000000f, 0.3056036f, -0.0000006f), + XMFLOAT3( 0.0000000f, 0.3056036f, -0.0000006f), +}; + +const XMFLOAT4 accessor_186[] = { + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), +}; + +const XMFLOAT3 accessor_187[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_188[] = { + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), +}; + +const XMFLOAT4 accessor_189[] = { + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), +}; + +const XMFLOAT3 accessor_190[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_191[] = { + XMFLOAT3(-0.0000000f, 0.1024561f, 0.0000011f), + XMFLOAT3(-0.0000000f, 0.1024561f, 0.0000011f), +}; + +const XMFLOAT4 accessor_192[] = { + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_193[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), +}; + +const XMFLOAT3 accessor_194[] = { + XMFLOAT3(-0.0000000f, 0.1025997f, 0.0000007f), + XMFLOAT3(-0.0000000f, 0.1025997f, 0.0000007f), +}; + +const XMFLOAT4 accessor_195[] = { + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_196[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_197[] = { + XMFLOAT3( 0.0000000f, 0.1033699f, 0.0000007f), + XMFLOAT3( 0.0000000f, 0.1033699f, 0.0000007f), +}; + +const XMFLOAT4 accessor_198[] = { + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_199[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999998f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999998f), +}; + +const XMFLOAT3 accessor_200[] = { + XMFLOAT3( 0.0000001f, 0.1012830f, -0.0000006f), + XMFLOAT3( 0.0000001f, 0.1012830f, -0.0000006f), +}; + +const XMFLOAT4 accessor_201[] = { + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), +}; + +const XMFLOAT3 accessor_202[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_203[] = { + XMFLOAT3(-0.0000000f, 0.1024322f, 0.0000006f), + XMFLOAT3(-0.0000000f, 0.1024322f, 0.0000006f), +}; + +const XMFLOAT4 accessor_204[] = { + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), +}; + +const XMFLOAT3 accessor_205[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_206[] = { + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), +}; + +const XMFLOAT4 accessor_207[] = { + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), +}; + +const XMFLOAT3 accessor_208[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_209[] = { + XMFLOAT3(-0.0000000f, 0.2498153f, -0.0000003f), + XMFLOAT3(-0.0000000f, 0.2498153f, -0.0000003f), +}; + +const XMFLOAT4 accessor_210[] = { + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), +}; + +const XMFLOAT3 accessor_211[] = { + XMFLOAT3( 0.9999998f, 0.9999997f, 0.9999998f), + XMFLOAT3( 0.9999998f, 0.9999997f, 0.9999998f), +}; + +const XMFLOAT3 accessor_212[] = { + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_213[] = { + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_214[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_215[] = { + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_216[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_217[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_218[] = { + XMFLOAT3(-0.0019501f, 0.0729339f, -0.0255577f), + XMFLOAT3(-0.0019493f, 0.0729384f, -0.0253245f), + XMFLOAT3(-0.0019473f, 0.0729535f, -0.0246470f), + XMFLOAT3(-0.0019446f, 0.0729828f, -0.0235456f), + XMFLOAT3(-0.0019421f, 0.0730318f, -0.0220117f), + XMFLOAT3(-0.0019408f, 0.0731085f, -0.0199669f), + XMFLOAT3(-0.0019434f, 0.0732235f, -0.0171513f), + XMFLOAT3(-0.0019529f, 0.0733484f, -0.0132013f), + XMFLOAT3(-0.0019604f, 0.0733796f, -0.0095440f), + XMFLOAT3(-0.0019613f, 0.0733652f, -0.0069526f), + XMFLOAT3(-0.0019592f, 0.0733468f, -0.0050041f), + XMFLOAT3(-0.0019561f, 0.0733323f, -0.0035149f), + XMFLOAT3(-0.0019531f, 0.0733228f, -0.0024349f), + XMFLOAT3(-0.0019509f, 0.0733176f, -0.0017666f), + XMFLOAT3(-0.0019501f, 0.0733160f, -0.0015359f), + XMFLOAT3(-0.0019507f, 0.0733175f, -0.0017199f), + XMFLOAT3(-0.0019522f, 0.0733223f, -0.0022591f), + XMFLOAT3(-0.0019544f, 0.0733312f, -0.0031417f), + XMFLOAT3(-0.0019566f, 0.0733445f, -0.0043721f), + XMFLOAT3(-0.0019581f, 0.0733614f, -0.0059870f), + XMFLOAT3(-0.0019577f, 0.0733771f, -0.0080864f), + XMFLOAT3(-0.0019536f, 0.0733730f, -0.0108565f), + XMFLOAT3(-0.0019461f, 0.0733107f, -0.0141954f), + XMFLOAT3(-0.0019409f, 0.0732084f, -0.0171920f), + XMFLOAT3(-0.0019398f, 0.0731180f, -0.0195282f), + XMFLOAT3(-0.0019409f, 0.0730503f, -0.0213779f), + XMFLOAT3(-0.0019431f, 0.0730020f, -0.0228579f), + XMFLOAT3(-0.0019456f, 0.0729691f, -0.0240146f), + XMFLOAT3(-0.0019479f, 0.0729484f, -0.0248579f), + XMFLOAT3(-0.0019495f, 0.0729373f, -0.0253787f), + XMFLOAT3(-0.0019501f, 0.0729339f, -0.0255577f), + XMFLOAT3(-0.0019493f, 0.0729384f, -0.0253245f), + XMFLOAT3(-0.0019473f, 0.0729535f, -0.0246470f), + XMFLOAT3(-0.0019446f, 0.0729828f, -0.0235456f), + XMFLOAT3(-0.0019421f, 0.0730318f, -0.0220117f), + XMFLOAT3(-0.0019408f, 0.0731085f, -0.0199669f), + XMFLOAT3(-0.0019434f, 0.0732235f, -0.0171513f), + XMFLOAT3(-0.0019529f, 0.0733484f, -0.0132013f), + XMFLOAT3(-0.0019604f, 0.0733796f, -0.0095440f), + XMFLOAT3(-0.0019613f, 0.0733652f, -0.0069526f), + XMFLOAT3(-0.0019592f, 0.0733468f, -0.0050041f), + XMFLOAT3(-0.0019561f, 0.0733323f, -0.0035149f), + XMFLOAT3(-0.0019531f, 0.0733228f, -0.0024349f), + XMFLOAT3(-0.0019509f, 0.0733176f, -0.0017666f), + XMFLOAT3(-0.0019501f, 0.0733160f, -0.0015359f), + XMFLOAT3(-0.0019507f, 0.0733175f, -0.0017199f), + XMFLOAT3(-0.0019522f, 0.0733223f, -0.0022591f), + XMFLOAT3(-0.0019544f, 0.0733312f, -0.0031417f), + XMFLOAT3(-0.0019566f, 0.0733445f, -0.0043721f), + XMFLOAT3(-0.0019581f, 0.0733614f, -0.0059870f), + XMFLOAT3(-0.0019577f, 0.0733771f, -0.0080864f), + XMFLOAT3(-0.0019536f, 0.0733730f, -0.0108565f), + XMFLOAT3(-0.0019461f, 0.0733107f, -0.0141954f), + XMFLOAT3(-0.0019409f, 0.0732084f, -0.0171920f), + XMFLOAT3(-0.0019398f, 0.0731180f, -0.0195282f), + XMFLOAT3(-0.0019409f, 0.0730503f, -0.0213779f), + XMFLOAT3(-0.0019431f, 0.0730020f, -0.0228579f), + XMFLOAT3(-0.0019456f, 0.0729691f, -0.0240146f), + XMFLOAT3(-0.0019479f, 0.0729484f, -0.0248579f), + XMFLOAT3(-0.0019495f, 0.0729373f, -0.0253787f), + XMFLOAT3(-0.0019501f, 0.0729339f, -0.0255577f), + XMFLOAT3(-0.0019493f, 0.0729384f, -0.0253245f), + XMFLOAT3(-0.0019473f, 0.0729535f, -0.0246470f), + XMFLOAT3(-0.0019446f, 0.0729828f, -0.0235456f), + XMFLOAT3(-0.0019421f, 0.0730318f, -0.0220117f), + XMFLOAT3(-0.0019408f, 0.0731085f, -0.0199669f), + XMFLOAT3(-0.0019434f, 0.0732235f, -0.0171513f), + XMFLOAT3(-0.0019529f, 0.0733484f, -0.0132013f), + XMFLOAT3(-0.0019604f, 0.0733796f, -0.0095440f), + XMFLOAT3(-0.0019613f, 0.0733652f, -0.0069526f), + XMFLOAT3(-0.0019592f, 0.0733468f, -0.0050041f), + XMFLOAT3(-0.0019561f, 0.0733323f, -0.0035149f), + XMFLOAT3(-0.0019531f, 0.0733228f, -0.0024349f), + XMFLOAT3(-0.0019509f, 0.0733176f, -0.0017666f), + XMFLOAT3(-0.0019501f, 0.0733160f, -0.0015359f), + XMFLOAT3(-0.0019507f, 0.0733175f, -0.0017199f), + XMFLOAT3(-0.0019522f, 0.0733223f, -0.0022591f), + XMFLOAT3(-0.0019544f, 0.0733312f, -0.0031417f), + XMFLOAT3(-0.0019566f, 0.0733445f, -0.0043721f), + XMFLOAT3(-0.0019581f, 0.0733614f, -0.0059870f), + XMFLOAT3(-0.0019577f, 0.0733771f, -0.0080864f), + XMFLOAT3(-0.0019536f, 0.0733730f, -0.0108565f), + XMFLOAT3(-0.0019461f, 0.0733107f, -0.0141954f), + XMFLOAT3(-0.0019409f, 0.0732084f, -0.0171920f), + XMFLOAT3(-0.0019398f, 0.0731180f, -0.0195282f), + XMFLOAT3(-0.0019409f, 0.0730503f, -0.0213779f), + XMFLOAT3(-0.0019431f, 0.0730020f, -0.0228579f), + XMFLOAT3(-0.0019456f, 0.0729691f, -0.0240146f), + XMFLOAT3(-0.0019479f, 0.0729484f, -0.0248579f), + XMFLOAT3(-0.0019495f, 0.0729373f, -0.0253787f), + XMFLOAT3(-0.0019501f, 0.0729339f, -0.0255577f), +}; + +const XMFLOAT4 accessor_219[] = { + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_220[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_221[] = { + XMFLOAT3(-0.0000000f, 0.0919257f, -0.0000006f), + XMFLOAT3(-0.0000000f, 0.0919257f, -0.0000006f), +}; + +const XMFLOAT4 accessor_222[] = { + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_223[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_224[] = { + XMFLOAT3( 0.0000006f, 0.1196968f, 0.0000000f), + XMFLOAT3( 0.0000006f, 0.1196968f, 0.0000000f), +}; + +const XMFLOAT4 accessor_225[] = { + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_226[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_227[] = { + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_228[] = { + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_229[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_230[] = { + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_231[] = { + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_232[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_233[] = { + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_234[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_235[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_236[] = { + XMFLOAT3(-0.0019501f, 0.0729339f, -0.0255577f), + XMFLOAT3(-0.0019493f, 0.0729384f, -0.0253245f), + XMFLOAT3(-0.0019473f, 0.0729535f, -0.0246470f), + XMFLOAT3(-0.0019446f, 0.0729828f, -0.0235456f), + XMFLOAT3(-0.0019421f, 0.0730318f, -0.0220117f), + XMFLOAT3(-0.0019408f, 0.0731085f, -0.0199669f), + XMFLOAT3(-0.0019434f, 0.0732235f, -0.0171513f), + XMFLOAT3(-0.0019529f, 0.0733484f, -0.0132013f), + XMFLOAT3(-0.0019604f, 0.0733796f, -0.0095440f), + XMFLOAT3(-0.0019613f, 0.0733652f, -0.0069526f), + XMFLOAT3(-0.0019592f, 0.0733468f, -0.0050041f), + XMFLOAT3(-0.0019561f, 0.0733323f, -0.0035149f), + XMFLOAT3(-0.0019531f, 0.0733228f, -0.0024349f), + XMFLOAT3(-0.0019509f, 0.0733176f, -0.0017666f), + XMFLOAT3(-0.0019501f, 0.0733160f, -0.0015359f), + XMFLOAT3(-0.0019507f, 0.0733175f, -0.0017199f), + XMFLOAT3(-0.0019523f, 0.0733223f, -0.0022591f), + XMFLOAT3(-0.0019544f, 0.0733313f, -0.0031417f), + XMFLOAT3(-0.0019566f, 0.0733445f, -0.0043721f), + XMFLOAT3(-0.0019581f, 0.0733614f, -0.0059870f), + XMFLOAT3(-0.0019577f, 0.0733771f, -0.0080864f), + XMFLOAT3(-0.0019536f, 0.0733731f, -0.0108565f), + XMFLOAT3(-0.0019461f, 0.0733107f, -0.0141954f), + XMFLOAT3(-0.0019409f, 0.0732084f, -0.0171920f), + XMFLOAT3(-0.0019398f, 0.0731180f, -0.0195282f), + XMFLOAT3(-0.0019409f, 0.0730503f, -0.0213779f), + XMFLOAT3(-0.0019431f, 0.0730020f, -0.0228579f), + XMFLOAT3(-0.0019456f, 0.0729691f, -0.0240146f), + XMFLOAT3(-0.0019479f, 0.0729484f, -0.0248579f), + XMFLOAT3(-0.0019495f, 0.0729373f, -0.0253787f), + XMFLOAT3(-0.0019501f, 0.0729339f, -0.0255577f), + XMFLOAT3(-0.0019493f, 0.0729384f, -0.0253245f), + XMFLOAT3(-0.0019473f, 0.0729535f, -0.0246470f), + XMFLOAT3(-0.0019446f, 0.0729828f, -0.0235456f), + XMFLOAT3(-0.0019421f, 0.0730318f, -0.0220117f), + XMFLOAT3(-0.0019408f, 0.0731085f, -0.0199669f), + XMFLOAT3(-0.0019434f, 0.0732235f, -0.0171513f), + XMFLOAT3(-0.0019529f, 0.0733484f, -0.0132013f), + XMFLOAT3(-0.0019604f, 0.0733796f, -0.0095440f), + XMFLOAT3(-0.0019613f, 0.0733652f, -0.0069526f), + XMFLOAT3(-0.0019592f, 0.0733468f, -0.0050041f), + XMFLOAT3(-0.0019561f, 0.0733323f, -0.0035149f), + XMFLOAT3(-0.0019531f, 0.0733228f, -0.0024349f), + XMFLOAT3(-0.0019509f, 0.0733176f, -0.0017666f), + XMFLOAT3(-0.0019501f, 0.0733160f, -0.0015359f), + XMFLOAT3(-0.0019507f, 0.0733175f, -0.0017199f), + XMFLOAT3(-0.0019523f, 0.0733223f, -0.0022591f), + XMFLOAT3(-0.0019544f, 0.0733313f, -0.0031417f), + XMFLOAT3(-0.0019566f, 0.0733445f, -0.0043721f), + XMFLOAT3(-0.0019581f, 0.0733614f, -0.0059870f), + XMFLOAT3(-0.0019577f, 0.0733771f, -0.0080864f), + XMFLOAT3(-0.0019536f, 0.0733731f, -0.0108565f), + XMFLOAT3(-0.0019461f, 0.0733107f, -0.0141954f), + XMFLOAT3(-0.0019409f, 0.0732084f, -0.0171920f), + XMFLOAT3(-0.0019398f, 0.0731180f, -0.0195282f), + XMFLOAT3(-0.0019409f, 0.0730503f, -0.0213779f), + XMFLOAT3(-0.0019431f, 0.0730020f, -0.0228579f), + XMFLOAT3(-0.0019456f, 0.0729691f, -0.0240146f), + XMFLOAT3(-0.0019479f, 0.0729484f, -0.0248579f), + XMFLOAT3(-0.0019495f, 0.0729373f, -0.0253787f), + XMFLOAT3(-0.0019501f, 0.0729339f, -0.0255577f), + XMFLOAT3(-0.0019493f, 0.0729384f, -0.0253245f), + XMFLOAT3(-0.0019473f, 0.0729535f, -0.0246470f), + XMFLOAT3(-0.0019446f, 0.0729828f, -0.0235456f), + XMFLOAT3(-0.0019421f, 0.0730318f, -0.0220117f), + XMFLOAT3(-0.0019408f, 0.0731085f, -0.0199669f), + XMFLOAT3(-0.0019434f, 0.0732235f, -0.0171513f), + XMFLOAT3(-0.0019529f, 0.0733484f, -0.0132013f), + XMFLOAT3(-0.0019604f, 0.0733796f, -0.0095440f), + XMFLOAT3(-0.0019613f, 0.0733652f, -0.0069526f), + XMFLOAT3(-0.0019592f, 0.0733468f, -0.0050041f), + XMFLOAT3(-0.0019561f, 0.0733323f, -0.0035149f), + XMFLOAT3(-0.0019531f, 0.0733228f, -0.0024349f), + XMFLOAT3(-0.0019509f, 0.0733176f, -0.0017666f), + XMFLOAT3(-0.0019501f, 0.0733160f, -0.0015359f), + XMFLOAT3(-0.0019507f, 0.0733175f, -0.0017199f), + XMFLOAT3(-0.0019523f, 0.0733223f, -0.0022591f), + XMFLOAT3(-0.0019544f, 0.0733313f, -0.0031417f), + XMFLOAT3(-0.0019566f, 0.0733445f, -0.0043721f), + XMFLOAT3(-0.0019581f, 0.0733614f, -0.0059870f), + XMFLOAT3(-0.0019577f, 0.0733771f, -0.0080864f), + XMFLOAT3(-0.0019536f, 0.0733731f, -0.0108565f), + XMFLOAT3(-0.0019461f, 0.0733107f, -0.0141954f), + XMFLOAT3(-0.0019409f, 0.0732084f, -0.0171920f), + XMFLOAT3(-0.0019398f, 0.0731180f, -0.0195282f), + XMFLOAT3(-0.0019409f, 0.0730503f, -0.0213779f), + XMFLOAT3(-0.0019431f, 0.0730020f, -0.0228579f), + XMFLOAT3(-0.0019456f, 0.0729691f, -0.0240146f), + XMFLOAT3(-0.0019479f, 0.0729484f, -0.0248579f), + XMFLOAT3(-0.0019495f, 0.0729373f, -0.0253787f), + XMFLOAT3(-0.0019501f, 0.0729339f, -0.0255577f), +}; + +const XMFLOAT4 accessor_237[] = { + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_238[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_239[] = { + XMFLOAT3(-0.0000000f, 0.0919261f, -0.0000001f), + XMFLOAT3(-0.0000000f, 0.0919261f, -0.0000001f), +}; + +const XMFLOAT4 accessor_240[] = { + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_241[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_242[] = { + XMFLOAT3(-0.0000000f, 0.1196964f, 0.0000000f), + XMFLOAT3(-0.0000000f, 0.1196964f, 0.0000000f), +}; + +const XMFLOAT4 accessor_243[] = { + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_244[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_245[] = { + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_246[] = { + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_247[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const float accessor_248[] = { + 0.0, + 0.0416666679084301, + 0.0833333358168602, + 0.125, + 0.1666666716337204, + 0.2083333283662796, + 0.25, + 0.2916666567325592, + 0.3333333432674408, + 0.375, + 0.4166666567325592, + 0.4583333432674408, + 0.5, + 0.5416666865348816, + 0.5833333134651184, + 0.625, + 0.6666666865348816, + 0.7083333134651184, + 0.75, + 0.7916666865348816, + 0.8333333134651184, + 0.875, + 0.9166666865348816, + 0.9583333134651184, + 1.0, + 1.0416666269302368, + 1.0833333730697632, + 1.125, + 1.1666666269302368, + 1.2083333730697632, + 1.25, + 1.2916666269302368, + 1.3333333730697632, + 1.375, + 1.4166666269302368, + 1.4583333730697632, + 1.5, + 1.5416666269302368, + 1.5833333730697632, + 1.625, + 1.6666666269302368, + 1.7083333730697632, + 1.75, + 1.7916666269302368, + 1.8333333730697632, + 1.875, + 1.9166666269302368, + 1.9583333730697632, + 2.0, + 2.0416667461395264, + 2.0833332538604736, + 2.125, + 2.1666667461395264, + 2.2083332538604736, + 2.25, + 2.2916667461395264, + 2.3333332538604736, + 2.375, + 2.4166667461395264, + 2.4583332538604736, + 2.5, + 2.5416667461395264, + 2.5833332538604736, +}; + +const XMFLOAT3 accessor_249[] = { + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0062762f, -0.0043773f), + XMFLOAT3(-0.0020865f, 0.0070791f, -0.0040864f), + XMFLOAT3(-0.0020865f, 0.0085242f, -0.0035628f), + XMFLOAT3(-0.0020865f, 0.0107303f, -0.0027635f), + XMFLOAT3(-0.0020865f, 0.0138577f, -0.0016304f), + XMFLOAT3(-0.0020865f, 0.0181335f, -0.0000812f), + XMFLOAT3(-0.0020865f, 0.0238962f, 0.0020067f), + XMFLOAT3(-0.0020865f, 0.0316917f, 0.0048312f), + XMFLOAT3(-0.0020865f, 0.0425045f, 0.0087489f), + XMFLOAT3(-0.0020865f, 0.0584514f, 0.0145268f), + XMFLOAT3(-0.0020865f, 0.0853066f, 0.0242569f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1092089f, 0.0329172f), + XMFLOAT3(-0.0020865f, 0.1077885f, 0.0324025f), + XMFLOAT3(-0.0020865f, 0.1034038f, 0.0308139f), + XMFLOAT3(-0.0020865f, 0.0960671f, 0.0281556f), + XMFLOAT3(-0.0020865f, 0.0861663f, 0.0245684f), + XMFLOAT3(-0.0020865f, 0.0745076f, 0.0203442f), + XMFLOAT3(-0.0020865f, 0.0621546f, 0.0158685f), + XMFLOAT3(-0.0020865f, 0.0501323f, 0.0115126f), + XMFLOAT3(-0.0020865f, 0.0391940f, 0.0075494f), + XMFLOAT3(-0.0020865f, 0.0297636f, 0.0041326f), + XMFLOAT3(-0.0020865f, 0.0220025f, 0.0013206f), + XMFLOAT3(-0.0020865f, 0.0159076f, -0.0008877f), + XMFLOAT3(-0.0020865f, 0.0113903f, -0.0025244f), + XMFLOAT3(-0.0020865f, 0.0083256f, -0.0036348f), + XMFLOAT3(-0.0020865f, 0.0065801f, -0.0042672f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), +}; + +const XMFLOAT4 accessor_250[] = { + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0021890f, 0.0000000f, 0.0000000f, 0.9999976f), + XMFLOAT4(-0.0087159f, 0.0000000f, 0.0000000f, 0.9999620f), + XMFLOAT4(-0.0190081f, 0.0000000f, 0.0000000f, 0.9998193f), + XMFLOAT4(-0.0317616f, 0.0000000f, 0.0000000f, 0.9994955f), + XMFLOAT4(-0.0452434f, 0.0000000f, 0.0000000f, 0.9989760f), + XMFLOAT4(-0.0578583f, 0.0000000f, 0.0000000f, 0.9983248f), + XMFLOAT4(-0.0685415f, 0.0000000f, 0.0000000f, 0.9976483f), + XMFLOAT4(-0.0767916f, 0.0000000f, 0.0000000f, 0.9970472f), + XMFLOAT4(-0.0825017f, 0.0000000f, 0.0000000f, 0.9965910f), + XMFLOAT4(-0.0857791f, 0.0000000f, 0.0000000f, 0.9963142f), + XMFLOAT4(-0.0868221f, 0.0000000f, -0.0000000f, 0.9962238f), + XMFLOAT4(-0.0850719f, 0.0000000f, -0.0000000f, 0.9963748f), + XMFLOAT4(-0.0794759f, 0.0000000f, -0.0000000f, 0.9968368f), + XMFLOAT4(-0.0694075f, -0.0000000f, -0.0000000f, 0.9975884f), + XMFLOAT4(-0.0540456f, -0.0000000f, -0.0000000f, 0.9985385f), + XMFLOAT4(-0.0322847f, -0.0000000f, -0.0000000f, 0.9994787f), + XMFLOAT4(-0.0025831f, -0.0000000f, -0.0000000f, 0.9999967f), + XMFLOAT4( 0.0373075f, -0.0000000f, -0.0000000f, 0.9993039f), + XMFLOAT4( 0.0908692f, -0.0000000f, -0.0000000f, 0.9958628f), + XMFLOAT4( 0.1639615f, -0.0000000f, -0.0000000f, 0.9864668f), + XMFLOAT4( 0.2675723f, -0.0000000f, -0.0000000f, 0.9635378f), + XMFLOAT4( 0.4205562f, -0.0000000f, -0.0000000f, 0.9072665f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5290549f, -0.0000000f, -0.0000000f, 0.8485876f), + XMFLOAT4( 0.5225267f, -0.0000000f, -0.0000000f, 0.8526229f), + XMFLOAT4( 0.5021838f, -0.0000000f, -0.0000000f, 0.8647609f), + XMFLOAT4( 0.4675197f, -0.0000000f, -0.0000000f, 0.8839827f), + XMFLOAT4( 0.4195871f, -0.0000000f, -0.0000000f, 0.9077151f), + XMFLOAT4( 0.3616153f, -0.0000000f, -0.0000000f, 0.9323274f), + XMFLOAT4( 0.2986318f, -0.0000000f, -0.0000000f, 0.9543684f), + XMFLOAT4( 0.2360551f, -0.0000000f, -0.0000000f, 0.9717396f), + XMFLOAT4( 0.1782483f, -0.0000000f, -0.0000000f, 0.9839855f), + XMFLOAT4( 0.1279036f, -0.0000000f, -0.0000000f, 0.9917866f), + XMFLOAT4( 0.0862154f, -0.0000000f, -0.0000000f, 0.9962766f), + XMFLOAT4( 0.0533678f, -0.0000000f, -0.0000000f, 0.9985749f), + XMFLOAT4( 0.0289823f, -0.0000000f, -0.0000000f, 0.9995800f), + XMFLOAT4( 0.0124282f, -0.0000000f, -0.0000000f, 0.9999228f), + XMFLOAT4( 0.0029981f, -0.0000000f, -0.0000000f, 0.9999955f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), +}; + +const float accessor_251[] = { + 0.0, + 2.5833332538604736, +}; + +const XMFLOAT3 accessor_252[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_253[] = { + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), +}; + +const XMFLOAT4 accessor_254[] = { + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), +}; + +const XMFLOAT3 accessor_255[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_256[] = { + XMFLOAT3( 0.0000000f, 0.2498153f, 0.0000001f), + XMFLOAT3( 0.0000000f, 0.2498153f, 0.0000001f), +}; + +const XMFLOAT4 accessor_257[] = { + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), +}; + +const XMFLOAT3 accessor_258[] = { + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_259[] = { + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), +}; + +const XMFLOAT4 accessor_260[] = { + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0009257f, 0.0000000f, 0.0000000f, 0.9999996f), + XMFLOAT4(-0.0036345f, 0.0000000f, 0.0000000f, 0.9999934f), + XMFLOAT4(-0.0078942f, 0.0000000f, 0.0000000f, 0.9999688f), + XMFLOAT4(-0.0132994f, 0.0000000f, 0.0000000f, 0.9999115f), + XMFLOAT4(-0.0193194f, 0.0000000f, 0.0000000f, 0.9998134f), + XMFLOAT4(-0.0253953f, 0.0000000f, 0.0000000f, 0.9996775f), + XMFLOAT4(-0.0310393f, 0.0000000f, 0.0000000f, 0.9995182f), + XMFLOAT4(-0.0358950f, 0.0000000f, 0.0000000f, 0.9993556f), + XMFLOAT4(-0.0397460f, 0.0000000f, 0.0000000f, 0.9992098f), + XMFLOAT4(-0.0424918f, 0.0000000f, 0.0000000f, 0.9990968f), + XMFLOAT4(-0.0441135f, -0.0000000f, -0.0000000f, 0.9990265f), + XMFLOAT4(-0.0446421f, 0.0000000f, -0.0000000f, 0.9990031f), + XMFLOAT4(-0.0440583f, 0.0000000f, 0.0000000f, 0.9990289f), + XMFLOAT4(-0.0421923f, -0.0000000f, 0.0000000f, 0.9991095f), + XMFLOAT4(-0.0388366f, 0.0000000f, 0.0000000f, 0.9992456f), + XMFLOAT4(-0.0337202f, 0.0000000f, 0.0000000f, 0.9994313f), + XMFLOAT4(-0.0264780f, -0.0000000f, 0.0000000f, 0.9996494f), + XMFLOAT4(-0.0165986f, 0.0000000f, 0.0000000f, 0.9998623f), + XMFLOAT4(-0.0033273f, -0.0000000f, -0.0000000f, 0.9999945f), + XMFLOAT4( 0.0145278f, -0.0000000f, 0.0000000f, 0.9998945f), + XMFLOAT4( 0.0390374f, 0.0000000f, 0.0000000f, 0.9992377f), + XMFLOAT4( 0.0742943f, 0.0000000f, 0.0000000f, 0.9972364f), + XMFLOAT4( 0.1282902f, 0.0000000f, 0.0000000f, 0.9917367f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1688885f, 0.0000000f, 0.0000000f, 0.9856352f), + XMFLOAT4( 0.1752275f, -0.0000000f, -0.0000000f, 0.9845279f), + XMFLOAT4( 0.1891056f, 0.0000000f, -0.0000000f, 0.9819568f), + XMFLOAT4( 0.2033245f, 0.0000000f, 0.0000000f, 0.9791114f), + XMFLOAT4( 0.2105833f, -0.0000000f, 0.0000000f, 0.9775760f), + XMFLOAT4( 0.2022800f, 0.0000000f, 0.0000000f, 0.9793277f), + XMFLOAT4( 0.1771124f, -0.0000000f, 0.0000000f, 0.9841906f), + XMFLOAT4( 0.1446740f, 0.0000000f, 0.0000000f, 0.9894794f), + XMFLOAT4( 0.1100498f, -0.0000000f, 0.0000000f, 0.9939261f), + XMFLOAT4( 0.0769736f, 0.0000000f, 0.0000000f, 0.9970331f), + XMFLOAT4( 0.0477586f, 0.0000000f, 0.0000000f, 0.9988589f), + XMFLOAT4( 0.0236126f, -0.0000000f, 0.0000000f, 0.9997212f), + XMFLOAT4( 0.0050150f, 0.0000000f, 0.0000000f, 0.9999874f), + XMFLOAT4(-0.0079844f, 0.0000000f, -0.0000000f, 0.9999681f), + XMFLOAT4(-0.0155661f, 0.0000000f, 0.0000000f, 0.9998789f), + XMFLOAT4(-0.0180204f, 0.0000000f, -0.0000000f, 0.9998376f), + XMFLOAT4(-0.0171851f, -0.0000000f, -0.0000000f, 0.9998524f), + XMFLOAT4(-0.0074814f, -0.0000000f, -0.0000000f, 0.9999720f), + XMFLOAT4(-0.0016779f, -0.0000000f, -0.0000000f, 0.9999986f), + XMFLOAT4(-0.0003088f, -0.0000000f, -0.0000000f, 0.9999999f), + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_261[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_262[] = { + XMFLOAT3( 0.0000000f, 0.1116755f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1116755f, 0.0000000f), +}; + +const XMFLOAT4 accessor_263[] = { + XMFLOAT4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), + XMFLOAT4(-0.0007234f, -0.0000008f, -0.0011286f, 0.9999991f), + XMFLOAT4(-0.0028400f, -0.0000032f, -0.0011285f, 0.9999954f), + XMFLOAT4(-0.0061685f, -0.0000069f, -0.0011282f, 0.9999803f), + XMFLOAT4(-0.0103921f, -0.0000117f, -0.0011280f, 0.9999454f), + XMFLOAT4(-0.0150963f, -0.0000170f, -0.0011279f, 0.9998854f), + XMFLOAT4(-0.0198443f, -0.0000224f, -0.0011278f, 0.9998025f), + XMFLOAT4(-0.0242551f, -0.0000274f, -0.0011278f, 0.9997052f), + XMFLOAT4(-0.0280501f, -0.0000316f, -0.0011278f, 0.9996060f), + XMFLOAT4(-0.0310600f, -0.0000350f, -0.0011279f, 0.9995170f), + XMFLOAT4(-0.0332062f, -0.0000375f, -0.0011279f, 0.9994479f), + XMFLOAT4(-0.0344739f, -0.0000389f, -0.0011280f, 0.9994050f), + XMFLOAT4(-0.0348870f, -0.0000394f, -0.0011280f, 0.9993907f), + XMFLOAT4(-0.0343703f, -0.0000388f, -0.0011278f, 0.9994086f), + XMFLOAT4(-0.0327188f, -0.0000370f, -0.0011272f, 0.9994640f), + XMFLOAT4(-0.0297488f, -0.0000338f, -0.0011260f, 0.9995568f), + XMFLOAT4(-0.0252208f, -0.0000288f, -0.0011244f, 0.9996813f), + XMFLOAT4(-0.0188120f, -0.0000218f, -0.0011222f, 0.9998224f), + XMFLOAT4(-0.0100701f, -0.0000121f, -0.0011194f, 0.9999486f), + XMFLOAT4( 0.0016721f, 0.0000009f, -0.0011161f, 0.9999980f), + XMFLOAT4( 0.0174689f, 0.0000185f, -0.0011124f, 0.9998468f), + XMFLOAT4( 0.0391528f, 0.0000429f, -0.0011086f, 0.9992326f), + XMFLOAT4( 0.0703498f, 0.0000783f, -0.0011059f, 0.9975218f), + XMFLOAT4( 0.1181570f, 0.0001329f, -0.0011082f, 0.9929943f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1541417f, 0.0001740f, -0.0011152f, 0.9880481f), + XMFLOAT4( 0.1722621f, 0.0001928f, -0.0011071f, 0.9850505f), + XMFLOAT4( 0.2138882f, 0.0002375f, -0.0010919f, 0.9768575f), + XMFLOAT4( 0.2613858f, 0.0002906f, -0.0010801f, 0.9652337f), + XMFLOAT4( 0.2975744f, 0.0003326f, -0.0010731f, 0.9546980f), + XMFLOAT4( 0.3032275f, 0.0003390f, -0.0010639f, 0.9529175f), + XMFLOAT4( 0.2764442f, 0.0003066f, -0.0010510f, 0.9610294f), + XMFLOAT4( 0.2377807f, 0.0002601f, -0.0010413f, 0.9713183f), + XMFLOAT4( 0.1941246f, 0.0002082f, -0.0010385f, 0.9809763f), + XMFLOAT4( 0.1497597f, 0.0001566f, -0.0010432f, 0.9887218f), + XMFLOAT4( 0.1075356f, 0.0001089f, -0.0010544f, 0.9942007f), + XMFLOAT4( 0.0694092f, 0.0000673f, -0.0010698f, 0.9975877f), + XMFLOAT4( 0.0367376f, 0.0000330f, -0.0010869f, 0.9993244f), + XMFLOAT4( 0.0104604f, 0.0000063f, -0.0011032f, 0.9999447f), + XMFLOAT4(-0.0087762f, -0.0000126f, -0.0011165f, 0.9999608f), + XMFLOAT4(-0.0205302f, -0.0000239f, -0.0011253f, 0.9997886f), + XMFLOAT4(-0.0244987f, -0.0000276f, -0.0011283f, 0.9996992f), + XMFLOAT4(-0.0233632f, -0.0000264f, -0.0011283f, 0.9997264f), + XMFLOAT4(-0.0101714f, -0.0000115f, -0.0011283f, 0.9999477f), + XMFLOAT4(-0.0022814f, -0.0000026f, -0.0011285f, 0.9999968f), + XMFLOAT4(-0.0004200f, -0.0000005f, -0.0011286f, 0.9999993f), + XMFLOAT4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), + XMFLOAT4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), +}; + +const XMFLOAT3 accessor_264[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_265[] = { + XMFLOAT3(-0.0005524f, 0.0688295f, -0.0213631f), + XMFLOAT3(-0.0005524f, 0.0688295f, -0.0213631f), +}; + +const XMFLOAT4 accessor_266[] = { + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), +}; + +const XMFLOAT3 accessor_267[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_268[] = { + XMFLOAT3( 0.0000000f, 0.3082207f, -0.0000010f), + XMFLOAT3( 0.0000000f, 0.3082207f, -0.0000010f), +}; + +const XMFLOAT4 accessor_269[] = { + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), +}; + +const XMFLOAT3 accessor_270[] = { + XMFLOAT3( 1.0000001f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000001f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_271[] = { + XMFLOAT3( 0.0000000f, 0.3056044f, 0.0000002f), + XMFLOAT3( 0.0000000f, 0.3056044f, 0.0000002f), +}; + +const XMFLOAT4 accessor_272[] = { + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), +}; + +const XMFLOAT3 accessor_273[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_274[] = { + XMFLOAT3( 0.0013047f, 0.3463302f, 0.0044682f), + XMFLOAT3( 0.0013047f, 0.3463302f, 0.0044682f), +}; + +const XMFLOAT4 accessor_275[] = { + XMFLOAT4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), + XMFLOAT4( 0.0052462f, 0.0000059f, 0.0011287f, 0.9999856f), + XMFLOAT4( 0.0174043f, 0.0000196f, 0.0011285f, 0.9998479f), + XMFLOAT4( 0.0328632f, 0.0000370f, 0.0011281f, 0.9994593f), + XMFLOAT4( 0.0487975f, 0.0000550f, 0.0011273f, 0.9988081f), + XMFLOAT4( 0.0616544f, 0.0041983f, 0.0008714f, 0.9980884f), + XMFLOAT4( 0.0631043f, 0.0148462f, 0.0001917f, 0.9978965f), + XMFLOAT4( 0.0437547f, 0.0294095f, -0.0001603f, 0.9986094f), + XMFLOAT4( 0.0153181f, 0.0452828f, 0.0004309f, 0.9988567f), + XMFLOAT4(-0.0144889f, 0.0598154f, 0.0019910f, 0.9981024f), + XMFLOAT4(-0.0405501f, 0.0703799f, 0.0039846f, 0.9966878f), + XMFLOAT4(-0.0601135f, 0.0744065f, 0.0056148f, 0.9953987f), + XMFLOAT4(-0.0720231f, 0.0704410f, 0.0062225f, 0.9948930f), + XMFLOAT4(-0.0760304f, 0.0509407f, 0.0050169f, 0.9957908f), + XMFLOAT4(-0.0759624f, -0.0523691f, -0.0028679f, 0.9957303f), + XMFLOAT4(-0.0760197f, -0.0717574f, -0.0043619f, 0.9945114f), + XMFLOAT4(-0.0763799f, -0.0635159f, -0.0037504f, 0.9950466f), + XMFLOAT4(-0.0770289f, 0.0526805f, 0.0052030f, 0.9956225f), + XMFLOAT4(-0.0776180f, 0.0727178f, 0.0067996f, 0.9943045f), + XMFLOAT4(-0.0785292f, 0.0715604f, 0.0067751f, 0.9943171f), + XMFLOAT4(-0.0797666f, 0.0669097f, 0.0064910f, 0.9945443f), + XMFLOAT4(-0.0814541f, 0.0538083f, 0.0055315f, 0.9952081f), + XMFLOAT4(-0.0837723f, 0.0091012f, 0.0018977f, 0.9964416f), + XMFLOAT4(-0.0870097f, -0.0002338f, 0.0011125f, 0.9962068f), + XMFLOAT4(-0.0919945f, -0.0002393f, 0.0011114f, 0.9957589f), + XMFLOAT4(-0.0957634f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0957633f, -0.0002435f, 0.0011104f, 0.9954035f), + XMFLOAT4(-0.0940729f, -0.0002394f, 0.0011110f, 0.9955647f), + XMFLOAT4(-0.0896200f, -0.0002281f, 0.0011126f, 0.9959754f), + XMFLOAT4(-0.0833320f, -0.0002114f, 0.0011147f, 0.9965212f), + XMFLOAT4(-0.0761369f, -0.0001909f, 0.0011170f, 0.9970967f), + XMFLOAT4(-0.0689652f, -0.0001680f, 0.0011194f, 0.9976184f), + XMFLOAT4(-0.0625721f, -0.0001459f, 0.0011214f, 0.9980398f), + XMFLOAT4(-0.0570058f, -0.0001265f, 0.0011229f, 0.9983733f), + XMFLOAT4(-0.0521370f, -0.0001094f, 0.0011242f, 0.9986393f), + XMFLOAT4(-0.0478361f, -0.0000942f, 0.0011252f, 0.9988546f), + XMFLOAT4(-0.0439732f, -0.0000807f, 0.0011260f, 0.9990321f), + XMFLOAT4(-0.0404184f, -0.0000687f, 0.0011266f, 0.9991822f), + XMFLOAT4(-0.0370413f, -0.0000580f, 0.0011272f, 0.9993132f), + XMFLOAT4(-0.0337119f, -0.0000486f, 0.0011276f, 0.9994310f), + XMFLOAT4(-0.0302997f, -0.0000402f, 0.0011279f, 0.9995403f), + XMFLOAT4(-0.0266745f, -0.0000328f, 0.0011282f, 0.9996436f), + XMFLOAT4(-0.0227057f, -0.0000263f, 0.0011284f, 0.9997416f), + XMFLOAT4(-0.0182630f, -0.0000206f, 0.0011285f, 0.9998326f), + XMFLOAT4(-0.0133408f, -0.0000151f, 0.0011286f, 0.9999104f), + XMFLOAT4(-0.0084337f, -0.0000095f, 0.0011286f, 0.9999638f), + XMFLOAT4(-0.0041614f, -0.0000047f, 0.0011287f, 0.9999907f), + XMFLOAT4(-0.0011437f, -0.0000013f, 0.0011287f, 0.9999987f), + XMFLOAT4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), +}; + +const XMFLOAT3 accessor_276[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000001f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999998f), + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999998f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0624999f, 1.0624999f, 0.9687501f), + XMFLOAT3( 1.1999999f, 1.1999999f, 0.9000000f), + XMFLOAT3( 1.3375000f, 1.3375002f, 0.8312501f), + XMFLOAT3( 1.4000000f, 1.4000000f, 0.8000001f), + XMFLOAT3( 1.2962962f, 1.2962964f, 0.8518518f), + XMFLOAT3( 1.1037036f, 1.1037037f, 0.9481483f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000001f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000001f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000001f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_277[] = { + XMFLOAT3( 0.2488541f, 0.2033313f, -0.0450287f), + XMFLOAT3( 0.2488541f, 0.2033313f, -0.0450287f), +}; + +const XMFLOAT4 accessor_278[] = { + XMFLOAT4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), + XMFLOAT4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), +}; + +const XMFLOAT3 accessor_279[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_280[] = { + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), +}; + +const XMFLOAT4 accessor_281[] = { + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), +}; + +const XMFLOAT3 accessor_282[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_283[] = { + XMFLOAT3(-0.0000000f, 0.1024613f, -0.0000003f), + XMFLOAT3(-0.0000000f, 0.1024613f, -0.0000003f), +}; + +const XMFLOAT4 accessor_284[] = { + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_285[] = { + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000000f), +}; + +const XMFLOAT3 accessor_286[] = { + XMFLOAT3(-0.0000000f, 0.1026039f, 0.0000002f), + XMFLOAT3(-0.0000000f, 0.1026039f, 0.0000002f), +}; + +const XMFLOAT4 accessor_287[] = { + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_288[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), +}; + +const XMFLOAT3 accessor_289[] = { + XMFLOAT3(-0.0000000f, 0.1033674f, -0.0000001f), + XMFLOAT3(-0.0000000f, 0.1033674f, -0.0000001f), +}; + +const XMFLOAT4 accessor_290[] = { + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_291[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_292[] = { + XMFLOAT3(-0.0000000f, 0.1012845f, -0.0000001f), + XMFLOAT3(-0.0000000f, 0.1012845f, -0.0000001f), +}; + +const XMFLOAT4 accessor_293[] = { + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), +}; + +const XMFLOAT3 accessor_294[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_295[] = { + XMFLOAT3(-0.0000000f, 0.1024317f, -0.0000007f), + XMFLOAT3(-0.0000000f, 0.1024317f, -0.0000007f), +}; + +const XMFLOAT4 accessor_296[] = { + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), +}; + +const XMFLOAT3 accessor_297[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_298[] = { + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), +}; + +const XMFLOAT4 accessor_299[] = { + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), +}; + +const XMFLOAT3 accessor_300[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_301[] = { + XMFLOAT3(-0.0000000f, 0.3082130f, -0.0000004f), + XMFLOAT3(-0.0000000f, 0.3082130f, -0.0000004f), +}; + +const XMFLOAT4 accessor_302[] = { + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), +}; + +const XMFLOAT3 accessor_303[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), +}; + +const XMFLOAT3 accessor_304[] = { + XMFLOAT3(-0.0000000f, 0.3055982f, -0.0000002f), + XMFLOAT3(-0.0000000f, 0.3055982f, -0.0000002f), +}; + +const XMFLOAT4 accessor_305[] = { + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), +}; + +const XMFLOAT3 accessor_306[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), +}; + +const XMFLOAT3 accessor_307[] = { + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), +}; + +const XMFLOAT4 accessor_308[] = { + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), +}; + +const XMFLOAT3 accessor_309[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_310[] = { + XMFLOAT3( 0.0000000f, 0.1024598f, 0.0000012f), + XMFLOAT3( 0.0000000f, 0.1024598f, 0.0000012f), +}; + +const XMFLOAT4 accessor_311[] = { + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_312[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_313[] = { + XMFLOAT3(-0.0000000f, 0.1026044f, 0.0000000f), + XMFLOAT3(-0.0000000f, 0.1026044f, 0.0000000f), +}; + +const XMFLOAT4 accessor_314[] = { + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_315[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), +}; + +const XMFLOAT3 accessor_316[] = { + XMFLOAT3( 0.0000000f, 0.1033676f, 0.0000003f), + XMFLOAT3( 0.0000000f, 0.1033676f, 0.0000003f), +}; + +const XMFLOAT4 accessor_317[] = { + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_318[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_319[] = { + XMFLOAT3(-0.0000000f, 0.1012811f, -0.0000002f), + XMFLOAT3(-0.0000000f, 0.1012811f, -0.0000002f), +}; + +const XMFLOAT4 accessor_320[] = { + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), +}; + +const XMFLOAT3 accessor_321[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), +}; + +const XMFLOAT3 accessor_322[] = { + XMFLOAT3( 0.0000000f, 0.1024289f, -0.0000001f), + XMFLOAT3( 0.0000000f, 0.1024289f, -0.0000001f), +}; + +const XMFLOAT4 accessor_323[] = { + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), +}; + +const XMFLOAT3 accessor_324[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_325[] = { + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), +}; + +const XMFLOAT4 accessor_326[] = { + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), +}; + +const XMFLOAT3 accessor_327[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_328[] = { + XMFLOAT3(-0.0000000f, 0.2498146f, 0.0000000f), + XMFLOAT3(-0.0000000f, 0.2498146f, 0.0000000f), +}; + +const XMFLOAT4 accessor_329[] = { + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), +}; + +const XMFLOAT3 accessor_330[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_331[] = { + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_332[] = { + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_333[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_334[] = { + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_335[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_336[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_337[] = { + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733793f, -0.0047631f), + XMFLOAT3(-0.0020865f, 0.0733698f, -0.0056423f), + XMFLOAT3(-0.0020865f, 0.0733313f, -0.0070283f), + XMFLOAT3(-0.0020865f, 0.0732441f, -0.0087447f), + XMFLOAT3(-0.0020865f, 0.0731042f, -0.0105567f), + XMFLOAT3(-0.0020865f, 0.0729290f, -0.0122492f), + XMFLOAT3(-0.0020865f, 0.0727471f, -0.0136797f), + XMFLOAT3(-0.0020865f, 0.0725856f, -0.0147822f), + XMFLOAT3(-0.0020865f, 0.0724631f, -0.0155441f), + XMFLOAT3(-0.0020865f, 0.0723888f, -0.0159809f), + XMFLOAT3(-0.0020865f, 0.0723645f, -0.0161198f), + XMFLOAT3(-0.0020865f, 0.0726561f, -0.0157957f), + XMFLOAT3(-0.0020865f, 0.0735829f, -0.0147587f), + XMFLOAT3(-0.0020865f, 0.0752300f, -0.0128901f), + XMFLOAT3(-0.0020865f, 0.0776915f, -0.0100333f), + XMFLOAT3(-0.0020865f, 0.0810721f, -0.0059772f), + XMFLOAT3(-0.0020865f, 0.0854873f, -0.0004292f), + XMFLOAT3(-0.0020865f, 0.0910634f, 0.0070289f), + XMFLOAT3(-0.0020865f, 0.0979341f, 0.0170215f), + XMFLOAT3(-0.0020865f, 0.1062378f, 0.0305371f), + XMFLOAT3(-0.0020865f, 0.1161616f, 0.0492570f), + XMFLOAT3(-0.0020865f, 0.1288357f, 0.0756562f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1383629f, 0.0924180f), + XMFLOAT3(-0.0020865f, 0.1367864f, 0.0893140f), + XMFLOAT3(-0.0020865f, 0.1339777f, 0.0838283f), + XMFLOAT3(-0.0020865f, 0.1298050f, 0.0758746f), + XMFLOAT3(-0.0020865f, 0.1242470f, 0.0657607f), + XMFLOAT3(-0.0020865f, 0.1174958f, 0.0542613f), + XMFLOAT3(-0.0020865f, 0.1099808f, 0.0424128f), + XMFLOAT3(-0.0020865f, 0.1022687f, 0.0311766f), + XMFLOAT3(-0.0020865f, 0.0949146f, 0.0212209f), + XMFLOAT3(-0.0020865f, 0.0883559f, 0.0128914f), + XMFLOAT3(-0.0020865f, 0.0828787f, 0.0062912f), + XMFLOAT3(-0.0020865f, 0.0786319f, 0.0013782f), + XMFLOAT3(-0.0020865f, 0.0756596f, -0.0019607f), + XMFLOAT3(-0.0020865f, 0.0739336f, -0.0038633f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), +}; + +const XMFLOAT4 accessor_338[] = { + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_339[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_340[] = { + XMFLOAT3(-0.0000000f, 0.0919256f, -0.0000004f), + XMFLOAT3(-0.0000000f, 0.0919256f, -0.0000004f), +}; + +const XMFLOAT4 accessor_341[] = { + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_342[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_343[] = { + XMFLOAT3(-0.0000001f, 0.1196961f, 0.0000000f), + XMFLOAT3(-0.0000001f, 0.1196961f, 0.0000000f), +}; + +const XMFLOAT4 accessor_344[] = { + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_345[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_346[] = { + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_347[] = { + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_348[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_349[] = { + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_350[] = { + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_351[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_352[] = { + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_353[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_354[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_355[] = { + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733793f, -0.0047631f), + XMFLOAT3(-0.0020865f, 0.0733698f, -0.0056423f), + XMFLOAT3(-0.0020865f, 0.0733313f, -0.0070283f), + XMFLOAT3(-0.0020865f, 0.0732441f, -0.0087447f), + XMFLOAT3(-0.0020865f, 0.0731043f, -0.0105567f), + XMFLOAT3(-0.0020865f, 0.0729290f, -0.0122492f), + XMFLOAT3(-0.0020865f, 0.0727471f, -0.0136797f), + XMFLOAT3(-0.0020865f, 0.0725856f, -0.0147822f), + XMFLOAT3(-0.0020865f, 0.0724631f, -0.0155441f), + XMFLOAT3(-0.0020865f, 0.0723888f, -0.0159809f), + XMFLOAT3(-0.0020865f, 0.0723645f, -0.0161198f), + XMFLOAT3(-0.0020865f, 0.0726561f, -0.0157957f), + XMFLOAT3(-0.0020865f, 0.0735829f, -0.0147587f), + XMFLOAT3(-0.0020865f, 0.0752300f, -0.0128901f), + XMFLOAT3(-0.0020865f, 0.0776915f, -0.0100333f), + XMFLOAT3(-0.0020865f, 0.0810721f, -0.0059772f), + XMFLOAT3(-0.0020865f, 0.0854873f, -0.0004292f), + XMFLOAT3(-0.0020865f, 0.0910634f, 0.0070289f), + XMFLOAT3(-0.0020865f, 0.0979341f, 0.0170215f), + XMFLOAT3(-0.0020865f, 0.1062378f, 0.0305371f), + XMFLOAT3(-0.0020865f, 0.1161616f, 0.0492570f), + XMFLOAT3(-0.0020865f, 0.1288357f, 0.0756562f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1388586f, 0.0933949f), + XMFLOAT3(-0.0020865f, 0.1383629f, 0.0924180f), + XMFLOAT3(-0.0020865f, 0.1367864f, 0.0893140f), + XMFLOAT3(-0.0020865f, 0.1339777f, 0.0838283f), + XMFLOAT3(-0.0020865f, 0.1298050f, 0.0758746f), + XMFLOAT3(-0.0020865f, 0.1242470f, 0.0657607f), + XMFLOAT3(-0.0020865f, 0.1174958f, 0.0542613f), + XMFLOAT3(-0.0020865f, 0.1099808f, 0.0424128f), + XMFLOAT3(-0.0020865f, 0.1022687f, 0.0311766f), + XMFLOAT3(-0.0020865f, 0.0949146f, 0.0212209f), + XMFLOAT3(-0.0020865f, 0.0883559f, 0.0128914f), + XMFLOAT3(-0.0020865f, 0.0828787f, 0.0062912f), + XMFLOAT3(-0.0020865f, 0.0786319f, 0.0013782f), + XMFLOAT3(-0.0020865f, 0.0756596f, -0.0019607f), + XMFLOAT3(-0.0020865f, 0.0739336f, -0.0038633f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), +}; + +const XMFLOAT4 accessor_356[] = { + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_357[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_358[] = { + XMFLOAT3(-0.0000000f, 0.0919253f, 0.0000002f), + XMFLOAT3(-0.0000000f, 0.0919253f, 0.0000002f), +}; + +const XMFLOAT4 accessor_359[] = { + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_360[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_361[] = { + XMFLOAT3( 0.0000001f, 0.1196966f, 0.0000001f), + XMFLOAT3( 0.0000001f, 0.1196966f, 0.0000001f), +}; + +const XMFLOAT4 accessor_362[] = { + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_363[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_364[] = { + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_365[] = { + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_366[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const float accessor_367[] = { + 0.0, + 1.4583333730697632, +}; + +const XMFLOAT3 accessor_368[] = { + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), +}; + +const float accessor_369[] = { + 0.0, + 0.0416666679084301, + 0.0833333358168602, + 0.125, + 0.1666666716337204, + 0.2083333283662796, + 0.25, + 0.2916666567325592, + 0.3333333432674408, + 0.375, + 0.4166666567325592, + 0.4583333432674408, + 0.5, + 0.5416666865348816, + 0.5833333134651184, + 0.625, + 0.6666666865348816, + 0.7083333134651184, + 0.75, + 0.7916666865348816, + 0.8333333134651184, + 0.875, + 0.9166666865348816, + 0.9583333134651184, + 1.0, + 1.0416666269302368, + 1.0833333730697632, + 1.125, + 1.1666666269302368, + 1.2083333730697632, + 1.25, + 1.2916666269302368, + 1.3333333730697632, + 1.375, + 1.4166666269302368, + 1.4583333730697632, +}; + +const XMFLOAT4 accessor_370[] = { + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0004076f, 0.0000000f, 0.0000000f, 0.9999999f), + XMFLOAT4(-0.0016908f, 0.0000000f, 0.0000000f, 0.9999986f), + XMFLOAT4(-0.0039414f, 0.0000000f, 0.0000000f, 0.9999923f), + XMFLOAT4(-0.0072462f, 0.0000000f, 0.0000000f, 0.9999738f), + XMFLOAT4(-0.0116751f, 0.0000000f, 0.0000000f, 0.9999319f), + XMFLOAT4(-0.0172633f, 0.0000000f, 0.0000000f, 0.9998510f), + XMFLOAT4(-0.0239899f, 0.0000000f, 0.0000000f, 0.9997122f), + XMFLOAT4(-0.0317582f, 0.0000000f, 0.0000000f, 0.9994956f), + XMFLOAT4(-0.0403872f, 0.0000000f, 0.0000000f, 0.9991841f), + XMFLOAT4(-0.0496231f, 0.0000000f, 0.0000000f, 0.9987680f), + XMFLOAT4(-0.0591723f, 0.0000000f, 0.0000000f, 0.9982477f), + XMFLOAT4(-0.0687446f, 0.0000000f, 0.0000000f, 0.9976343f), + XMFLOAT4(-0.0780917f, 0.0000000f, 0.0000000f, 0.9969462f), + XMFLOAT4(-0.0870264f, 0.0000000f, 0.0000000f, 0.9962060f), + XMFLOAT4(-0.0954261f, 0.0000000f, 0.0000000f, 0.9954365f), + XMFLOAT4(-0.1032224f, 0.0000000f, 0.0000000f, 0.9946584f), + XMFLOAT4(-0.1103887f, 0.0000000f, 0.0000000f, 0.9938886f), + XMFLOAT4(-0.1169263f, 0.0000000f, 0.0000000f, 0.9931406f), + XMFLOAT4(-0.1228541f, 0.0000000f, 0.0000000f, 0.9924248f), + XMFLOAT4(-0.1282012f, 0.0000000f, 0.0000000f, 0.9917482f), + XMFLOAT4(-0.1330013f, 0.0000000f, 0.0000000f, 0.9911159f), + XMFLOAT4(-0.1372900f, 0.0000000f, 0.0000000f, 0.9905309f), + XMFLOAT4(-0.1411023f, 0.0000000f, 0.0000000f, 0.9899950f), + XMFLOAT4(-0.1444717f, 0.0000000f, 0.0000000f, 0.9895090f), + XMFLOAT4(-0.1474298f, 0.0000000f, 0.0000000f, 0.9890725f), + XMFLOAT4(-0.1500057f, 0.0000000f, 0.0000000f, 0.9886851f), + XMFLOAT4(-0.1522263f, 0.0000000f, 0.0000000f, 0.9883457f), + XMFLOAT4(-0.1541162f, 0.0000000f, 0.0000000f, 0.9880528f), + XMFLOAT4(-0.1556976f, 0.0000000f, 0.0000000f, 0.9878048f), + XMFLOAT4(-0.1569911f, 0.0000000f, 0.0000000f, 0.9876000f), + XMFLOAT4(-0.1580151f, 0.0000000f, 0.0000000f, 0.9874367f), + XMFLOAT4(-0.1587865f, 0.0000000f, 0.0000000f, 0.9873130f), + XMFLOAT4(-0.1593208f, 0.0000000f, 0.0000000f, 0.9872268f), + XMFLOAT4(-0.1596321f, 0.0000000f, 0.0000000f, 0.9871766f), + XMFLOAT4(-0.1597332f, 0.0000000f, 0.0000000f, 0.9871602f), +}; + +const XMFLOAT3 accessor_371[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9996425f, 0.9999951f), + XMFLOAT3( 1.0000000f, 0.9985171f, 0.9999799f), + XMFLOAT3( 1.0000000f, 0.9965431f, 0.9999531f), + XMFLOAT3( 1.0000000f, 0.9936445f, 0.9999138f), + XMFLOAT3( 1.0000000f, 0.9897598f, 0.9998610f), + XMFLOAT3( 1.0000000f, 0.9848580f, 0.9997945f), + XMFLOAT3( 1.0000000f, 0.9789570f, 0.9997146f), + XMFLOAT3( 1.0000000f, 0.9721409f, 0.9996219f), + XMFLOAT3( 1.0000000f, 0.9645676f, 0.9995192f), + XMFLOAT3( 1.0000000f, 0.9564588f, 0.9994091f), + XMFLOAT3( 1.0000000f, 0.9480710f, 0.9992954f), + XMFLOAT3( 1.0000000f, 0.9396580f, 0.9991812f), + XMFLOAT3( 1.0000000f, 0.9314376f, 0.9990696f), + XMFLOAT3( 1.0000000f, 0.9235743f, 0.9989629f), + XMFLOAT3( 1.0000000f, 0.9161762f, 0.9988625f), + XMFLOAT3( 1.0000000f, 0.9093043f, 0.9987693f), + XMFLOAT3( 1.0000000f, 0.9029827f, 0.9986835f), + XMFLOAT3( 1.0000000f, 0.8972114f, 0.9986051f), + XMFLOAT3( 1.0000000f, 0.8919743f, 0.9985340f), + XMFLOAT3( 1.0000000f, 0.8872471f, 0.9984699f), + XMFLOAT3( 1.0000000f, 0.8830007f, 0.9984124f), + XMFLOAT3( 1.0000000f, 0.8792043f, 0.9983608f), + XMFLOAT3( 1.0000000f, 0.8758278f, 0.9983150f), + XMFLOAT3( 1.0000000f, 0.8728420f, 0.9982744f), + XMFLOAT3( 1.0000000f, 0.8702194f, 0.9982388f), + XMFLOAT3( 1.0000000f, 0.8679347f, 0.9982079f), + XMFLOAT3( 1.0000000f, 0.8659645f, 0.9981811f), + XMFLOAT3( 1.0000000f, 0.8642871f, 0.9981584f), + XMFLOAT3( 1.0000000f, 0.8628830f, 0.9981393f), + XMFLOAT3( 1.0000000f, 0.8617345f, 0.9981238f), + XMFLOAT3( 1.0000000f, 0.8608249f, 0.9981114f), + XMFLOAT3( 1.0000000f, 0.8601398f, 0.9981021f), + XMFLOAT3( 1.0000000f, 0.8596650f, 0.9980956f), + XMFLOAT3( 1.0000000f, 0.8593884f, 0.9980919f), + XMFLOAT3( 1.0000000f, 0.8592988f, 0.9980907f), +}; + +const XMFLOAT3 accessor_372[] = { + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), +}; + +const XMFLOAT4 accessor_373[] = { + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), +}; + +const XMFLOAT3 accessor_374[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_375[] = { + XMFLOAT3( 0.0000000f, 0.2498153f, 0.0000001f), + XMFLOAT3(-0.0000000f, 0.2498147f, 0.0000002f), +}; + +const XMFLOAT4 accessor_376[] = { + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), +}; + +const XMFLOAT3 accessor_377[] = { + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999998f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_378[] = { + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1346838f, -0.0001081f), + XMFLOAT3(-0.0000000f, 0.1346030f, -0.0004485f), + XMFLOAT3(-0.0000000f, 0.1344613f, -0.0010455f), + XMFLOAT3( 0.0000000f, 0.1342532f, -0.0019221f), + XMFLOAT3( 0.0000000f, 0.1339744f, -0.0030969f), + XMFLOAT3( 0.0000000f, 0.1336226f, -0.0045794f), + XMFLOAT3( 0.0000000f, 0.1331989f, -0.0063640f), + XMFLOAT3( 0.0000000f, 0.1327097f, -0.0084254f), + XMFLOAT3(-0.0000000f, 0.1321661f, -0.0107157f), + XMFLOAT3( 0.0000000f, 0.1315840f, -0.0131681f), + XMFLOAT3( 0.0000000f, 0.1309819f, -0.0157048f), + XMFLOAT3( 0.0000000f, 0.1303780f, -0.0182491f), + XMFLOAT3( 0.0000000f, 0.1297879f, -0.0207352f), + XMFLOAT3( 0.0000000f, 0.1292235f, -0.0231133f), + XMFLOAT3( 0.0000000f, 0.1286924f, -0.0253506f), + XMFLOAT3( 0.0000000f, 0.1281991f, -0.0274289f), + XMFLOAT3( 0.0000000f, 0.1277453f, -0.0293407f), + XMFLOAT3( 0.0000000f, 0.1273311f, -0.0310862f), + XMFLOAT3( 0.0000000f, 0.1269551f, -0.0326700f), + XMFLOAT3( 0.0000000f, 0.1266158f, -0.0340996f), + XMFLOAT3( 0.0000000f, 0.1263110f, -0.0353839f), + XMFLOAT3( 0.0000000f, 0.1260385f, -0.0365320f), + XMFLOAT3( 0.0000000f, 0.1257961f, -0.0375531f), + XMFLOAT3( 0.0000000f, 0.1255818f, -0.0384562f), + XMFLOAT3( 0.0000000f, 0.1253936f, -0.0392493f), + XMFLOAT3( 0.0000000f, 0.1252295f, -0.0399402f), + XMFLOAT3( 0.0000000f, 0.1250881f, -0.0405361f), + XMFLOAT3( 0.0000000f, 0.1249677f, -0.0410434f), + XMFLOAT3( 0.0000000f, 0.1248669f, -0.0414680f), + XMFLOAT3( 0.0000000f, 0.1247845f, -0.0418154f), + XMFLOAT3( 0.0000000f, 0.1247192f, -0.0420904f), + XMFLOAT3( 0.0000000f, 0.1246700f, -0.0422977f), + XMFLOAT3( 0.0000000f, 0.1246359f, -0.0424412f), + XMFLOAT3( 0.0000000f, 0.1246161f, -0.0425249f), + XMFLOAT3( 0.0000000f, 0.1246096f, -0.0425520f), +}; + +const XMFLOAT4 accessor_379[] = { + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0003120f, 0.0000000f, 0.0000000f, 0.9999999f), + XMFLOAT4(-0.0012946f, 0.0000000f, 0.0000000f, 0.9999992f), + XMFLOAT4(-0.0030179f, 0.0000000f, 0.0000000f, 0.9999955f), + XMFLOAT4(-0.0055485f, 0.0000000f, 0.0000000f, 0.9999846f), + XMFLOAT4(-0.0089399f, 0.0000000f, 0.0000000f, 0.9999601f), + XMFLOAT4(-0.0132191f, 0.0000000f, 0.0000000f, 0.9999126f), + XMFLOAT4(-0.0183703f, 0.0000000f, 0.0000000f, 0.9998313f), + XMFLOAT4(-0.0243197f, 0.0000000f, 0.0000000f, 0.9997042f), + XMFLOAT4(-0.0309290f, 0.0000000f, 0.0000000f, 0.9995216f), + XMFLOAT4(-0.0380041f, 0.0000000f, 0.0000000f, 0.9992776f), + XMFLOAT4(-0.0453207f, 0.0000000f, 0.0000000f, 0.9989725f), + XMFLOAT4(-0.0526567f, 0.0000000f, 0.0000000f, 0.9986127f), + XMFLOAT4(-0.0598221f, 0.0000000f, 0.0000000f, 0.9982091f), + XMFLOAT4(-0.0666734f, 0.0000000f, 0.0000000f, 0.9977748f), + XMFLOAT4(-0.0731163f, 0.0000000f, 0.0000000f, 0.9973235f), + XMFLOAT4(-0.0790985f, 0.0000000f, 0.0000000f, 0.9968668f), + XMFLOAT4(-0.0845989f, 0.0000000f, 0.0000000f, 0.9964151f), + XMFLOAT4(-0.0896185f, 0.0000000f, 0.0000000f, 0.9959762f), + XMFLOAT4(-0.0941712f, 0.0000000f, 0.0000000f, 0.9955560f), + XMFLOAT4(-0.0982792f, 0.0000000f, 0.0000000f, 0.9951589f), + XMFLOAT4(-0.1019679f, 0.0000000f, 0.0000000f, 0.9947877f), + XMFLOAT4(-0.1052644f, 0.0000000f, 0.0000000f, 0.9944443f), + XMFLOAT4(-0.1081954f, 0.0000000f, 0.0000000f, 0.9941297f), + XMFLOAT4(-0.1107865f, 0.0000000f, 0.0000000f, 0.9938442f), + XMFLOAT4(-0.1130617f, 0.0000000f, 0.0000000f, 0.9935880f), + XMFLOAT4(-0.1150433f, 0.0000000f, 0.0000000f, 0.9933605f), + XMFLOAT4(-0.1167519f, 0.0000000f, 0.0000000f, 0.9931611f), + XMFLOAT4(-0.1182062f, 0.0000000f, 0.0000000f, 0.9929891f), + XMFLOAT4(-0.1194233f, 0.0000000f, 0.0000000f, 0.9928434f), + XMFLOAT4(-0.1204187f, 0.0000000f, 0.0000000f, 0.9927232f), + XMFLOAT4(-0.1212069f, 0.0000000f, 0.0000000f, 0.9926273f), + XMFLOAT4(-0.1218008f, 0.0000000f, 0.0000000f, 0.9925546f), + XMFLOAT4(-0.1222121f, 0.0000000f, 0.0000000f, 0.9925041f), + XMFLOAT4(-0.1224517f, 0.0000000f, 0.0000000f, 0.9924745f), + XMFLOAT4(-0.1225295f, 0.0000000f, 0.0000000f, 0.9924649f), +}; + +const XMFLOAT3 accessor_380[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_381[] = { + XMFLOAT3( 0.0000000f, 0.1116755f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1115194f, -0.0001042f), + XMFLOAT3( 0.0000000f, 0.1110281f, -0.0004322f), + XMFLOAT3(-0.0000000f, 0.1101664f, -0.0010075f), + XMFLOAT3( 0.0000000f, 0.1089009f, -0.0018524f), + XMFLOAT3(-0.0000000f, 0.1072050f, -0.0029846f), + XMFLOAT3( 0.0000000f, 0.1050651f, -0.0044133f), + XMFLOAT3(-0.0000000f, 0.1024889f, -0.0061332f), + XMFLOAT3(-0.0000000f, 0.0995133f, -0.0081198f), + XMFLOAT3(-0.0000000f, 0.0962070f, -0.0103272f), + XMFLOAT3(-0.0000000f, 0.0926670f, -0.0126906f), + XMFLOAT3(-0.0000000f, 0.0890052f, -0.0151353f), + XMFLOAT3(-0.0000000f, 0.0853324f, -0.0175874f), + XMFLOAT3(-0.0000000f, 0.0817437f, -0.0199833f), + XMFLOAT3(-0.0000000f, 0.0783109f, -0.0222751f), + XMFLOAT3(-0.0000000f, 0.0750812f, -0.0244313f), + XMFLOAT3( 0.0000000f, 0.0720812f, -0.0264343f), + XMFLOAT3(-0.0000000f, 0.0693214f, -0.0282768f), + XMFLOAT3(-0.0000000f, 0.0668018f, -0.0299589f), + XMFLOAT3(-0.0000000f, 0.0645156f, -0.0314853f), + XMFLOAT3( 0.0000000f, 0.0624519f, -0.0328631f), + XMFLOAT3(-0.0000000f, 0.0605980f, -0.0341008f), + XMFLOAT3(-0.0000000f, 0.0589407f, -0.0352072f), + XMFLOAT3(-0.0000000f, 0.0574667f, -0.0361914f), + XMFLOAT3(-0.0000000f, 0.0561631f, -0.0370616f), + XMFLOAT3(-0.0000000f, 0.0550182f, -0.0378260f), + XMFLOAT3(-0.0000000f, 0.0540208f, -0.0384919f), + XMFLOAT3( 0.0000000f, 0.0531607f, -0.0390662f), + XMFLOAT3(-0.0000000f, 0.0524284f, -0.0395551f), + XMFLOAT3( 0.0000000f, 0.0518155f, -0.0399642f), + XMFLOAT3( 0.0000000f, 0.0513140f, -0.0402990f), + XMFLOAT3( 0.0000000f, 0.0509170f, -0.0405641f), + XMFLOAT3(-0.0000000f, 0.0506178f, -0.0407638f), + XMFLOAT3(-0.0000000f, 0.0504106f, -0.0409022f), + XMFLOAT3(-0.0000000f, 0.0502899f, -0.0409828f), + XMFLOAT3( 0.0000000f, 0.0502506f, -0.0410090f), +}; + +const XMFLOAT4 accessor_382[] = { + XMFLOAT4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), + XMFLOAT4( 0.0004308f, -0.0000004f, -0.0008704f, 0.9999995f), + XMFLOAT4( 0.0017872f, -0.0000002f, -0.0001396f, 0.9999984f), + XMFLOAT4( 0.0041660f, 0.0000042f, 0.0010036f, 0.9999908f), + XMFLOAT4( 0.0076592f, 0.0000192f, 0.0025053f, 0.9999676f), + XMFLOAT4( 0.0123404f, 0.0000533f, 0.0043171f, 0.9999145f), + XMFLOAT4( 0.0182468f, 0.0001167f, 0.0063941f, 0.9998131f), + XMFLOAT4( 0.0253094f, 0.0055413f, 0.0085593f, 0.9996276f), + XMFLOAT4( 0.0333918f, 0.0155730f, 0.0106662f, 0.9992641f), + XMFLOAT4( 0.0423915f, 0.0210981f, 0.0129269f, 0.9987947f), + XMFLOAT4( 0.0524793f, -0.0013882f, 0.0166593f, 0.9984821f), + XMFLOAT4( 0.0629941f, -0.0237829f, 0.0209049f, 0.9975115f), + XMFLOAT4( 0.0725314f, 0.0063933f, 0.0218316f, 0.9971067f), + XMFLOAT4( 0.0815918f, 0.0365677f, 0.0221360f, 0.9957488f), + XMFLOAT4( 0.0919024f, 0.0038264f, 0.0276729f, 0.9953761f), + XMFLOAT4( 0.1017266f, -0.0288304f, 0.0337013f, 0.9938233f), + XMFLOAT4( 0.1087924f, 0.0104874f, 0.0322857f, 0.9934847f), + XMFLOAT4( 0.1148307f, 0.0497130f, 0.0300239f, 0.9916860f), + XMFLOAT4( 0.1235279f, 0.0031324f, 0.0377347f, 0.9916185f), + XMFLOAT4( 0.1314894f, -0.0434065f, 0.0456742f, 0.9893130f), + XMFLOAT4( 0.1351532f, 0.0103212f, 0.0401490f, 0.9899572f), + XMFLOAT4( 0.1377223f, 0.0638586f, 0.0334506f, 0.9878439f), + XMFLOAT4( 0.1446365f, 0.0129983f, 0.0410873f, 0.9885460f), + XMFLOAT4( 0.1506421f, -0.0380793f, 0.0475474f, 0.9867098f), + XMFLOAT4( 0.1523061f, 0.0121889f, 0.0336960f, 0.9876836f), + XMFLOAT4( 0.1541551f, 0.0613527f, 0.0131141f, 0.9860528f), + XMFLOAT4( 0.1583794f, 0.0083855f, 0.0131145f, 0.9872556f), + XMFLOAT4( 0.1610744f, -0.0440960f, 0.0165747f, 0.9858174f), + XMFLOAT4( 0.1629164f, -0.0232214f, 0.0098386f, 0.9863175f), + XMFLOAT4( 0.1644170f, 0.0161364f, 0.0009279f, 0.9862585f), + XMFLOAT4( 0.1656732f, 0.0372386f, -0.0043580f, 0.9854677f), + XMFLOAT4( 0.1668234f, 0.0332291f, -0.0049411f, 0.9854143f), + XMFLOAT4( 0.1677081f, 0.0240142f, -0.0042448f, 0.9855350f), + XMFLOAT4( 0.1683121f, 0.0130929f, -0.0029379f, 0.9856424f), + XMFLOAT4( 0.1686491f, 0.0039690f, -0.0016794f, 0.9856668f), + XMFLOAT4( 0.1687535f, 0.0001485f, -0.0011179f, 0.9856576f), +}; + +const XMFLOAT3 accessor_383[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0002844f, 0.9995330f, 0.9999747f), + XMFLOAT3( 1.0011802f, 0.9980625f, 0.9998952f), + XMFLOAT3( 1.0027509f, 0.9954833f, 0.9997556f), + XMFLOAT3( 1.0050577f, 0.9916960f, 0.9995508f), + XMFLOAT3( 1.0081490f, 0.9866204f, 0.9992760f), + XMFLOAT3( 1.0120502f, 0.9802159f, 0.9989296f), + XMFLOAT3( 1.0167462f, 0.9725057f, 0.9985124f), + XMFLOAT3( 1.0221705f, 0.9635997f, 0.9980305f), + XMFLOAT3( 1.0281975f, 0.9537047f, 0.9974951f), + XMFLOAT3( 1.0346508f, 0.9431097f, 0.9969220f), + XMFLOAT3( 1.0413260f, 0.9321504f, 0.9963289f), + XMFLOAT3( 1.0480207f, 0.9211581f, 0.9957342f), + XMFLOAT3( 1.0545628f, 0.9104177f, 0.9951530f), + XMFLOAT3( 1.0608205f, 0.9001434f, 0.9945972f), + XMFLOAT3( 1.0667080f, 0.8904773f, 0.9940742f), + XMFLOAT3( 1.0721768f, 0.8814985f, 0.9935883f), + XMFLOAT3( 1.0772076f, 0.8732389f, 0.9931415f), + XMFLOAT3( 1.0818005f, 0.8656982f, 0.9927335f), + XMFLOAT3( 1.0859680f, 0.8588557f, 0.9923633f), + XMFLOAT3( 1.0897303f, 0.8526793f, 0.9920291f), + XMFLOAT3( 1.0931095f, 0.8471308f, 0.9917288f), + XMFLOAT3( 1.0961307f, 0.8421705f, 0.9914606f), + XMFLOAT3( 1.0988178f, 0.8377588f, 0.9912218f), + XMFLOAT3( 1.1011939f, 0.8338575f, 0.9910107f), + XMFLOAT3( 1.1032810f, 0.8304309f, 0.9908254f), + XMFLOAT3( 1.1050994f, 0.8274459f, 0.9906639f), + XMFLOAT3( 1.1066670f, 0.8248714f, 0.9905245f), + XMFLOAT3( 1.1080019f, 0.8226799f, 0.9904059f), + XMFLOAT3( 1.1091194f, 0.8208455f, 0.9903066f), + XMFLOAT3( 1.1100335f, 0.8193448f, 0.9902255f), + XMFLOAT3( 1.1107574f, 0.8181564f, 0.9901612f), + XMFLOAT3( 1.1113026f, 0.8172610f, 0.9901127f), + XMFLOAT3( 1.1116803f, 0.8166409f, 0.9900792f), + XMFLOAT3( 1.1119004f, 0.8162794f, 0.9900597f), + XMFLOAT3( 1.1119719f, 0.8161622f, 0.9900533f), +}; + +const XMFLOAT3 accessor_384[] = { + XMFLOAT3(-0.0005524f, 0.0688295f, -0.0213631f), + XMFLOAT3(-0.0005524f, 0.0688295f, -0.0213631f), +}; + +const XMFLOAT4 accessor_385[] = { + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), +}; + +const XMFLOAT3 accessor_386[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_387[] = { + XMFLOAT3( 0.0000000f, 0.3082207f, -0.0000010f), + XMFLOAT3( 0.0000000f, 0.3082177f, -0.0000000f), +}; + +const XMFLOAT4 accessor_388[] = { + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), +}; + +const XMFLOAT3 accessor_389[] = { + XMFLOAT3( 1.0000001f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000001f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_390[] = { + XMFLOAT3( 0.0000000f, 0.3056044f, 0.0000002f), + XMFLOAT3( 0.0000000f, 0.3056000f, 0.0000004f), +}; + +const XMFLOAT4 accessor_391[] = { + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), +}; + +const XMFLOAT3 accessor_392[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_393[] = { + XMFLOAT3( 0.0013047f, 0.3463302f, 0.0044682f), + XMFLOAT3( 0.0013054f, 0.3460192f, 0.0048573f), + XMFLOAT3( 0.0013076f, 0.3450401f, 0.0060825f), + XMFLOAT3( 0.0013116f, 0.3433230f, 0.0082313f), + XMFLOAT3( 0.0013174f, 0.3408013f, 0.0113867f), + XMFLOAT3( 0.0013252f, 0.3374218f, 0.0156154f), + XMFLOAT3( 0.0013350f, 0.3331575f, 0.0209514f), + XMFLOAT3( 0.0013468f, 0.3280240f, 0.0273751f), + XMFLOAT3( 0.0013604f, 0.3220945f, 0.0347950f), + XMFLOAT3( 0.0013756f, 0.3155062f, 0.0430391f), + XMFLOAT3( 0.0013918f, 0.3084520f, 0.0518662f), + XMFLOAT3( 0.0014086f, 0.3011551f, 0.0609970f), + XMFLOAT3( 0.0014255f, 0.2938364f, 0.0701552f), + XMFLOAT3( 0.0014419f, 0.2866851f, 0.0791037f), + XMFLOAT3( 0.0014576f, 0.2798445f, 0.0876636f), + XMFLOAT3( 0.0014725f, 0.2734087f, 0.0957168f), + XMFLOAT3( 0.0014862f, 0.2674304f, 0.1031975f), + XMFLOAT3( 0.0014989f, 0.2619310f, 0.1100791f), + XMFLOAT3( 0.0015104f, 0.2569104f, 0.1163617f), + XMFLOAT3( 0.0015209f, 0.2523544f, 0.1220626f), + XMFLOAT3( 0.0015304f, 0.2482421f, 0.1272085f), + XMFLOAT3( 0.0015389f, 0.2445479f, 0.1318311f), + XMFLOAT3( 0.0015465f, 0.2412453f, 0.1359637f), + XMFLOAT3( 0.0015532f, 0.2383079f, 0.1396393f), + XMFLOAT3( 0.0015592f, 0.2357105f, 0.1428897f), + XMFLOAT3( 0.0015644f, 0.2334290f, 0.1457445f), + XMFLOAT3( 0.0015690f, 0.2314415f, 0.1482316f), + XMFLOAT3( 0.0015730f, 0.2297274f, 0.1503764f), + XMFLOAT3( 0.0015763f, 0.2282682f, 0.1522023f), + XMFLOAT3( 0.0015791f, 0.2270469f, 0.1537307f), + XMFLOAT3( 0.0015814f, 0.2260476f, 0.1549810f), + XMFLOAT3( 0.0015832f, 0.2252564f, 0.1559711f), + XMFLOAT3( 0.0015846f, 0.2246604f, 0.1567170f), + XMFLOAT3( 0.0015856f, 0.2242474f, 0.1572338f), + XMFLOAT3( 0.0015861f, 0.2240068f, 0.1575347f), + XMFLOAT3( 0.0015863f, 0.2239287f, 0.1576325f), +}; + +const XMFLOAT4 accessor_394[] = { + XMFLOAT4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), + XMFLOAT4( 0.0002590f, 0.0000002f, 0.0011287f, 0.9999993f), + XMFLOAT4( 0.0010745f, 0.0000010f, 0.0011287f, 0.9999988f), + XMFLOAT4( 0.0025048f, 0.0000024f, 0.0011286f, 0.9999962f), + XMFLOAT4( 0.0046051f, 0.0000045f, 0.0011286f, 0.9999888f), + XMFLOAT4( 0.0074198f, 0.0000072f, 0.0011286f, 0.9999719f), + XMFLOAT4( 0.0109714f, 0.0000106f, 0.0011285f, 0.9999392f), + XMFLOAT4( 0.0152469f, 0.0000148f, 0.0011285f, 0.9998831f), + XMFLOAT4( 0.0201850f, 0.0000195f, 0.0011284f, 0.9997957f), + XMFLOAT4( 0.0256711f, 0.0000248f, 0.0011282f, 0.9996698f), + XMFLOAT4( 0.0315443f, 0.0000305f, 0.0011281f, 0.9995017f), + XMFLOAT4( 0.0376184f, 0.0000364f, 0.0011278f, 0.9992915f), + XMFLOAT4( 0.0437093f, 0.0000423f, 0.0011276f, 0.9990436f), + XMFLOAT4( 0.0496592f, 0.0000481f, 0.0011273f, 0.9987656f), + XMFLOAT4( 0.0553491f, 0.0000536f, 0.0011271f, 0.9984664f), + XMFLOAT4( 0.0607005f, 0.0000587f, 0.0011268f, 0.9981554f), + XMFLOAT4( 0.0656700f, 0.0000636f, 0.0011265f, 0.9978408f), + XMFLOAT4( 0.0702399f, 0.0000680f, 0.0011262f, 0.9975295f), + XMFLOAT4( 0.0744109f, 0.0000720f, 0.0011259f, 0.9972271f), + XMFLOAT4( 0.0781945f, 0.0000757f, 0.0011257f, 0.9969375f), + XMFLOAT4( 0.0816088f, 0.0000790f, 0.0011254f, 0.9966638f), + XMFLOAT4( 0.0846752f, 0.0000820f, 0.0011252f, 0.9964080f), + XMFLOAT4( 0.0874157f, 0.0000846f, 0.0011250f, 0.9961713f), + XMFLOAT4( 0.0898527f, 0.0000870f, 0.0011248f, 0.9959545f), + XMFLOAT4( 0.0920072f, 0.0000891f, 0.0011246f, 0.9957578f), + XMFLOAT4( 0.0938993f, 0.0000909f, 0.0011245f, 0.9955811f), + XMFLOAT4( 0.0955473f, 0.0000925f, 0.0011243f, 0.9954243f), + XMFLOAT4( 0.0969684f, 0.0000939f, 0.0011242f, 0.9952869f), + XMFLOAT4( 0.0981780f, 0.0000950f, 0.0011241f, 0.9951683f), + XMFLOAT4( 0.0991903f, 0.0000960f, 0.0011240f, 0.9950678f), + XMFLOAT4( 0.1000184f, 0.0000968f, 0.0011240f, 0.9949850f), + XMFLOAT4( 0.1006742f, 0.0000974f, 0.0011239f, 0.9949188f), + XMFLOAT4( 0.1011681f, 0.0000979f, 0.0011238f, 0.9948688f), + XMFLOAT4( 0.1015103f, 0.0000983f, 0.0011238f, 0.9948339f), + XMFLOAT4( 0.1017097f, 0.0000985f, 0.0011238f, 0.9948135f), + XMFLOAT4( 0.1017744f, 0.0000985f, 0.0011238f, 0.9948069f), +}; + +const XMFLOAT3 accessor_395[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0003822f, 1.0000035f), + XMFLOAT3( 0.9999999f, 1.0015857f, 1.0000148f), + XMFLOAT3( 0.9999999f, 1.0036966f, 1.0000347f), + XMFLOAT3( 0.9999999f, 1.0067961f, 1.0000644f), + XMFLOAT3( 0.9999999f, 1.0109502f, 1.0001035f), + XMFLOAT3( 0.9999999f, 1.0161920f, 1.0001529f), + XMFLOAT3( 0.9999999f, 1.0225023f, 1.0002126f), + XMFLOAT3( 0.9999999f, 1.0297909f, 1.0002813f), + XMFLOAT3( 0.9999999f, 1.0378897f, 1.0003579f), + XMFLOAT3( 0.9999999f, 1.0465610f, 1.0004400f), + XMFLOAT3( 0.9999999f, 1.0555302f, 1.0005245f), + XMFLOAT3( 0.9999999f, 1.0645267f, 1.0006095f), + XMFLOAT3( 0.9999999f, 1.0733171f, 1.0006927f), + XMFLOAT3( 0.9999999f, 1.0817257f, 1.0007721f), + XMFLOAT3( 1.0000001f, 1.0896368f, 1.0008471f), + XMFLOAT3( 0.9999999f, 1.0969852f, 1.0009164f), + XMFLOAT3( 0.9999999f, 1.1037451f, 1.0009803f), + XMFLOAT3( 1.0000001f, 1.1099169f, 1.0010386f), + XMFLOAT3( 1.0000002f, 1.1155170f, 1.0010915f), + XMFLOAT3( 1.0000001f, 1.1205721f, 1.0011392f), + XMFLOAT3( 0.9999999f, 1.1251130f, 1.0011820f), + XMFLOAT3( 1.0000002f, 1.1291726f, 1.0012205f), + XMFLOAT3( 0.9999999f, 1.1327834f, 1.0012544f), + XMFLOAT3( 1.0000001f, 1.1359764f, 1.0012847f), + XMFLOAT3( 0.9999998f, 1.1387808f, 1.0013111f), + XMFLOAT3( 0.9999999f, 1.1412241f, 1.0013344f), + XMFLOAT3( 0.9999999f, 1.1433307f, 1.0013542f), + XMFLOAT3( 0.9999999f, 1.1451243f, 1.0013711f), + XMFLOAT3( 1.0000001f, 1.1466256f, 1.0013855f), + XMFLOAT3( 0.9999999f, 1.1478541f, 1.0013970f), + XMFLOAT3( 0.9999999f, 1.1488265f, 1.0014062f), + XMFLOAT3( 0.9999999f, 1.1495593f, 1.0014130f), + XMFLOAT3( 0.9999999f, 1.1500669f, 1.0014180f), + XMFLOAT3( 0.9999999f, 1.1503626f, 1.0014206f), + XMFLOAT3( 1.0000001f, 1.1504586f, 1.0014217f), +}; + +const XMFLOAT3 accessor_396[] = { + XMFLOAT3( 0.2488541f, 0.2033313f, -0.0450287f), + XMFLOAT3( 0.2488541f, 0.2033312f, -0.0450287f), +}; + +const XMFLOAT4 accessor_397[] = { + XMFLOAT4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), + XMFLOAT4(-0.1444001f, -0.0000000f, 0.0000000f, 0.9895194f), +}; + +const XMFLOAT3 accessor_398[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_399[] = { + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), +}; + +const XMFLOAT4 accessor_400[] = { + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), +}; + +const XMFLOAT3 accessor_401[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_402[] = { + XMFLOAT3(-0.0000000f, 0.1024613f, -0.0000003f), + XMFLOAT3( 0.0000000f, 0.1024673f, 0.0000021f), +}; + +const XMFLOAT4 accessor_403[] = { + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_404[] = { + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), +}; + +const XMFLOAT3 accessor_405[] = { + XMFLOAT3(-0.0000000f, 0.1026039f, 0.0000002f), + XMFLOAT3( 0.0000000f, 0.1026061f, -0.0000003f), +}; + +const XMFLOAT4 accessor_406[] = { + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_407[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), + XMFLOAT3( 1.0000000f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_408[] = { + XMFLOAT3(-0.0000000f, 0.1033674f, -0.0000001f), + XMFLOAT3( 0.0000000f, 0.1033664f, 0.0000010f), +}; + +const XMFLOAT4 accessor_409[] = { + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181255f, 0.0003339f, 0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_410[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_411[] = { + XMFLOAT3(-0.0000000f, 0.1012845f, -0.0000001f), + XMFLOAT3( 0.0000000f, 0.1012722f, -0.0000000f), +}; + +const XMFLOAT4 accessor_412[] = { + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), + XMFLOAT4( 0.0250840f, -0.0004247f, 0.0000082f, 0.9996852f), +}; + +const XMFLOAT3 accessor_413[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999998f), +}; + +const XMFLOAT3 accessor_414[] = { + XMFLOAT3(-0.0000000f, 0.1024317f, -0.0000007f), + XMFLOAT3( 0.0000000f, 0.1024291f, 0.0000000f), +}; + +const XMFLOAT4 accessor_415[] = { + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), +}; + +const XMFLOAT3 accessor_416[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_417[] = { + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), +}; + +const XMFLOAT4 accessor_418[] = { + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), +}; + +const XMFLOAT3 accessor_419[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_420[] = { + XMFLOAT3(-0.0000000f, 0.3082130f, -0.0000004f), + XMFLOAT3( 0.0000000f, 0.3082150f, 0.0000001f), +}; + +const XMFLOAT4 accessor_421[] = { + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), +}; + +const XMFLOAT3 accessor_422[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), + XMFLOAT3( 1.0000001f, 1.0000001f, 0.9999999f), +}; + +const XMFLOAT3 accessor_423[] = { + XMFLOAT3(-0.0000000f, 0.3055982f, -0.0000002f), + XMFLOAT3(-0.0000000f, 0.3056078f, -0.0000030f), +}; + +const XMFLOAT4 accessor_424[] = { + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), +}; + +const XMFLOAT3 accessor_425[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999998f), +}; + +const XMFLOAT3 accessor_426[] = { + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), +}; + +const XMFLOAT4 accessor_427[] = { + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), +}; + +const XMFLOAT3 accessor_428[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_429[] = { + XMFLOAT3( 0.0000000f, 0.1024598f, 0.0000012f), + XMFLOAT3(-0.0000000f, 0.1024574f, -0.0000004f), +}; + +const XMFLOAT4 accessor_430[] = { + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_431[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_432[] = { + XMFLOAT3(-0.0000000f, 0.1026044f, 0.0000000f), + XMFLOAT3(-0.0000001f, 0.1026062f, 0.0000004f), +}; + +const XMFLOAT4 accessor_433[] = { + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_434[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000001f), +}; + +const XMFLOAT3 accessor_435[] = { + XMFLOAT3( 0.0000000f, 0.1033676f, 0.0000003f), + XMFLOAT3(-0.0000000f, 0.1033718f, 0.0000001f), +}; + +const XMFLOAT4 accessor_436[] = { + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_437[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 1.0000001f, 1.0000001f), +}; + +const XMFLOAT3 accessor_438[] = { + XMFLOAT3(-0.0000000f, 0.1012811f, -0.0000002f), + XMFLOAT3( 0.0000000f, 0.1012763f, -0.0000033f), +}; + +const XMFLOAT4 accessor_439[] = { + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), +}; + +const XMFLOAT3 accessor_440[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_441[] = { + XMFLOAT3( 0.0000000f, 0.1024289f, -0.0000001f), + XMFLOAT3( 0.0000000f, 0.1024334f, -0.0000007f), +}; + +const XMFLOAT4 accessor_442[] = { + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), +}; + +const XMFLOAT3 accessor_443[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), +}; + +const XMFLOAT3 accessor_444[] = { + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), +}; + +const XMFLOAT4 accessor_445[] = { + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), +}; + +const XMFLOAT3 accessor_446[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_447[] = { + XMFLOAT3(-0.0000000f, 0.2498146f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.2498151f, 0.0000002f), +}; + +const XMFLOAT4 accessor_448[] = { + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), +}; + +const XMFLOAT3 accessor_449[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_450[] = { + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_451[] = { + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_452[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_453[] = { + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_454[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_455[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_456[] = { + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733559f, -0.0045231f), + XMFLOAT3(-0.0020865f, 0.0732797f, -0.0046957f), + XMFLOAT3(-0.0020865f, 0.0731451f, -0.0049973f), + XMFLOAT3(-0.0020865f, 0.0729449f, -0.0054381f), + XMFLOAT3(-0.0020865f, 0.0726721f, -0.0060248f), + XMFLOAT3(-0.0020865f, 0.0723206f, -0.0067582f), + XMFLOAT3(-0.0020865f, 0.0718868f, -0.0076310f), + XMFLOAT3(-0.0020865f, 0.0713715f, -0.0086251f), + XMFLOAT3(-0.0020865f, 0.0707815f, -0.0097117f), + XMFLOAT3(-0.0020865f, 0.0701300f, -0.0108540f), + XMFLOAT3(-0.0020865f, 0.0694352f, -0.0120121f), + XMFLOAT3(-0.0020865f, 0.0687175f, -0.0131494f), + XMFLOAT3(-0.0020865f, 0.0679968f, -0.0142367f), + XMFLOAT3(-0.0020865f, 0.0672901f, -0.0152545f), + XMFLOAT3(-0.0020865f, 0.0666102f, -0.0161917f), + XMFLOAT3(-0.0020865f, 0.0659661f, -0.0170446f), + XMFLOAT3(-0.0020865f, 0.0653632f, -0.0178139f), + XMFLOAT3(-0.0020865f, 0.0648043f, -0.0185033f), + XMFLOAT3(-0.0020865f, 0.0642904f, -0.0191182f), + XMFLOAT3(-0.0020865f, 0.0638212f, -0.0196645f), + XMFLOAT3(-0.0020865f, 0.0633954f, -0.0201480f), + XMFLOAT3(-0.0020865f, 0.0630115f, -0.0205745f), + XMFLOAT3(-0.0020865f, 0.0626674f, -0.0209492f), + XMFLOAT3(-0.0020865f, 0.0623612f, -0.0212770f), + XMFLOAT3(-0.0020865f, 0.0620907f, -0.0215621f), + XMFLOAT3(-0.0020865f, 0.0618539f, -0.0218083f), + XMFLOAT3(-0.0020865f, 0.0616489f, -0.0220190f), + XMFLOAT3(-0.0020865f, 0.0614737f, -0.0221972f), + XMFLOAT3(-0.0020865f, 0.0613267f, -0.0223456f), + XMFLOAT3(-0.0020865f, 0.0612061f, -0.0224664f), + XMFLOAT3(-0.0020865f, 0.0611105f, -0.0225617f), + XMFLOAT3(-0.0020865f, 0.0610383f, -0.0226332f), + XMFLOAT3(-0.0020865f, 0.0609883f, -0.0226827f), + XMFLOAT3(-0.0020865f, 0.0609591f, -0.0227115f), + XMFLOAT3(-0.0020865f, 0.0609496f, -0.0227208f), +}; + +const XMFLOAT4 accessor_457[] = { + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_458[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_459[] = { + XMFLOAT3(-0.0000000f, 0.0919256f, -0.0000004f), + XMFLOAT3( 0.0000000f, 0.0919258f, 0.0000003f), +}; + +const XMFLOAT4 accessor_460[] = { + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_461[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_462[] = { + XMFLOAT3(-0.0000001f, 0.1196961f, 0.0000000f), + XMFLOAT3( 0.0000006f, 0.1196968f, -0.0000000f), +}; + +const XMFLOAT4 accessor_463[] = { + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_464[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_465[] = { + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_466[] = { + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_467[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_468[] = { + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_469[] = { + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_470[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_471[] = { + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_472[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_473[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_474[] = { + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733559f, -0.0045231f), + XMFLOAT3(-0.0020865f, 0.0732797f, -0.0046957f), + XMFLOAT3(-0.0020865f, 0.0731451f, -0.0049973f), + XMFLOAT3(-0.0020865f, 0.0729449f, -0.0054381f), + XMFLOAT3(-0.0020865f, 0.0726721f, -0.0060248f), + XMFLOAT3(-0.0020865f, 0.0723206f, -0.0067582f), + XMFLOAT3(-0.0020865f, 0.0718868f, -0.0076310f), + XMFLOAT3(-0.0020865f, 0.0713715f, -0.0086251f), + XMFLOAT3(-0.0020865f, 0.0707815f, -0.0097117f), + XMFLOAT3(-0.0020865f, 0.0701300f, -0.0108540f), + XMFLOAT3(-0.0020865f, 0.0694352f, -0.0120121f), + XMFLOAT3(-0.0020865f, 0.0687175f, -0.0131494f), + XMFLOAT3(-0.0020865f, 0.0679968f, -0.0142367f), + XMFLOAT3(-0.0020865f, 0.0672901f, -0.0152545f), + XMFLOAT3(-0.0020865f, 0.0666102f, -0.0161917f), + XMFLOAT3(-0.0020865f, 0.0659661f, -0.0170446f), + XMFLOAT3(-0.0020865f, 0.0653632f, -0.0178139f), + XMFLOAT3(-0.0020865f, 0.0648043f, -0.0185033f), + XMFLOAT3(-0.0020865f, 0.0642904f, -0.0191182f), + XMFLOAT3(-0.0020865f, 0.0638212f, -0.0196645f), + XMFLOAT3(-0.0020865f, 0.0633954f, -0.0201480f), + XMFLOAT3(-0.0020865f, 0.0630115f, -0.0205745f), + XMFLOAT3(-0.0020865f, 0.0626674f, -0.0209492f), + XMFLOAT3(-0.0020865f, 0.0623612f, -0.0212770f), + XMFLOAT3(-0.0020865f, 0.0620907f, -0.0215621f), + XMFLOAT3(-0.0020865f, 0.0618539f, -0.0218083f), + XMFLOAT3(-0.0020865f, 0.0616489f, -0.0220190f), + XMFLOAT3(-0.0020865f, 0.0614737f, -0.0221972f), + XMFLOAT3(-0.0020865f, 0.0613267f, -0.0223456f), + XMFLOAT3(-0.0020865f, 0.0612061f, -0.0224664f), + XMFLOAT3(-0.0020865f, 0.0611105f, -0.0225617f), + XMFLOAT3(-0.0020865f, 0.0610383f, -0.0226332f), + XMFLOAT3(-0.0020865f, 0.0609883f, -0.0226827f), + XMFLOAT3(-0.0020865f, 0.0609591f, -0.0227115f), + XMFLOAT3(-0.0020865f, 0.0609496f, -0.0227208f), +}; + +const XMFLOAT4 accessor_475[] = { + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_476[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_477[] = { + XMFLOAT3(-0.0000000f, 0.0919253f, 0.0000002f), + XMFLOAT3(-0.0000000f, 0.0919256f, 0.0000006f), +}; + +const XMFLOAT4 accessor_478[] = { + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_479[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_480[] = { + XMFLOAT3( 0.0000001f, 0.1196966f, 0.0000001f), + XMFLOAT3(-0.0000006f, 0.1196973f, -0.0000000f), +}; + +const XMFLOAT4 accessor_481[] = { + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_482[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_483[] = { + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_484[] = { + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_485[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const float accessor_486[] = { + 0.0, + 0.0416666679084301, + 0.0833333358168602, + 0.125, + 0.1666666716337204, + 0.2083333283662796, + 0.25, + 0.2916666567325592, + 0.3333333432674408, + 0.375, + 0.4166666567325592, + 0.4583333432674408, + 0.5, + 0.5416666865348816, + 0.5833333134651184, + 0.625, + 0.6666666865348816, + 0.7083333134651184, + 0.75, + 0.7916666865348816, + 0.8333333134651184, + 0.875, +}; + +const XMFLOAT3 accessor_487[] = { + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0003281f), + XMFLOAT3(-0.0020865f, 0.0060253f, 0.0095444f), + XMFLOAT3(-0.0020865f, 0.3033686f, 0.0213277f), + XMFLOAT3(-0.0020865f, 0.4733778f, 0.0312002f), + XMFLOAT3(-0.0020865f, 0.5038654f, 0.0353403f), + XMFLOAT3(-0.0020865f, 0.4858294f, 0.0337737f), + XMFLOAT3(-0.0020865f, 0.4345431f, 0.0294427f), + XMFLOAT3(-0.0020865f, 0.3543602f, 0.0229001f), + XMFLOAT3(-0.0020865f, 0.2512789f, 0.0146988f), + XMFLOAT3(-0.0020865f, 0.1325482f, 0.0053917f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0060253f, -0.0044682f), +}; + +const XMFLOAT4 accessor_488[] = { + XMFLOAT4(-0.1597332f, 0.0000000f, 0.0000000f, 0.9871602f), + XMFLOAT4(-0.1100656f, 0.0000000f, 0.0000000f, 0.9939243f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0915657f, 0.0000000f, 0.0000000f, 0.9957991f), + XMFLOAT4( 0.1314797f, 0.0000000f, 0.0000000f, 0.9913189f), + XMFLOAT4( 0.1455576f, 0.0000000f, 0.0000000f, 0.9893497f), + XMFLOAT4( 0.1069211f, 0.0000000f, 0.0000000f, 0.9942675f), + XMFLOAT4( 0.0142430f, 0.0000000f, 0.0000000f, 0.9998986f), + XMFLOAT4(-0.0964659f, 0.0000000f, 0.0000000f, 0.9953363f), + XMFLOAT4(-0.1883665f, 0.0000000f, 0.0000000f, 0.9820988f), + XMFLOAT4(-0.2264674f, 0.0000000f, 0.0000000f, 0.9740188f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_489[] = { + XMFLOAT3( 1.0000000f, 0.8592988f, 0.9980907f), + XMFLOAT3( 1.0000000f, 0.8614748f, 0.9981202f), + XMFLOAT3( 1.0000000f, 0.8698399f, 0.9982337f), + XMFLOAT3( 1.0000000f, 0.8906376f, 0.9985160f), + XMFLOAT3( 1.0000000f, 0.9550318f, 0.9993898f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9842575f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9449011f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8937379f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8425746f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8032182f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.7874756f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8095782f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.8622842f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9251915f, 1.0000000f), + XMFLOAT3( 1.0000000f, 0.9778975f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const float accessor_490[] = { + 0.0, + 0.875, +}; + +const XMFLOAT3 accessor_491[] = { + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), + XMFLOAT3( 0.0004585f, 0.0671507f, 0.0012744f), +}; + +const XMFLOAT4 accessor_492[] = { + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), +}; + +const XMFLOAT3 accessor_493[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_494[] = { + XMFLOAT3(-0.0000000f, 0.2498147f, 0.0000002f), + XMFLOAT3( 0.0000000f, 0.2498153f, 0.0000001f), +}; + +const XMFLOAT4 accessor_495[] = { + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), +}; + +const XMFLOAT3 accessor_496[] = { + XMFLOAT3( 0.9999998f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999998f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_497[] = { + XMFLOAT3( 0.0000000f, 0.1246096f, -0.0425520f), + XMFLOAT3( 0.0000000f, 0.1246156f, -0.0109113f), + XMFLOAT3( 0.0000000f, 0.1246802f, 0.0365921f), + XMFLOAT3( 0.0000000f, 0.1250358f, 0.0385340f), + XMFLOAT3( 0.0000000f, 0.1282260f, 0.0287673f), + XMFLOAT3( 0.0000000f, 0.1395849f, 0.0060578f), + XMFLOAT3( 0.0000000f, 0.1484680f, 0.0039087f), + XMFLOAT3( 0.0000000f, 0.1566569f, 0.0033386f), + XMFLOAT3( 0.0000000f, 0.1634163f, 0.0032947f), + XMFLOAT3( 0.0000000f, 0.1680112f, 0.0027246f), + XMFLOAT3( 0.0000000f, 0.1697062f, 0.0005755f), + XMFLOAT3( 0.0000000f, 0.1406879f, -0.0024007f), + XMFLOAT3( 0.0000000f, 0.1054590f, 0.0167925f), + XMFLOAT3( 0.0000000f, 0.1006418f, 0.0259067f), + XMFLOAT3( 0.0000000f, 0.1068951f, 0.0211514f), + XMFLOAT3( 0.0000000f, 0.1242217f, 0.0079754f), + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), +}; + +const XMFLOAT4 accessor_498[] = { + XMFLOAT4(-0.1225295f, 0.0000000f, 0.0000000f, 0.9924649f), + XMFLOAT4(-0.0567851f, 0.0000000f, 0.0000000f, 0.9983864f), + XMFLOAT4( 0.0156293f, 0.0000000f, 0.0000000f, 0.9998779f), + XMFLOAT4( 0.0233419f, 0.0000000f, 0.0000000f, 0.9997275f), + XMFLOAT4( 0.0253458f, 0.0000000f, 0.0000000f, 0.9996787f), + XMFLOAT4( 0.0284723f, 0.0000000f, 0.0000000f, 0.9995946f), + XMFLOAT4( 0.0373068f, 0.0000000f, 0.0000000f, 0.9993039f), + XMFLOAT4( 0.0474630f, 0.0000000f, 0.0000000f, 0.9988730f), + XMFLOAT4( 0.0523185f, 0.0000000f, 0.0000000f, 0.9986304f), + XMFLOAT4( 0.0365627f, 0.0000000f, 0.0000000f, 0.9993314f), + XMFLOAT4( 0.0124368f, 0.0000000f, 0.0000000f, 0.9999226f), + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4(-0.0243295f, 0.0000000f, 0.0000000f, 0.9997039f), + XMFLOAT4(-0.0313913f, 0.0000000f, -0.0000000f, 0.9995072f), + XMFLOAT4(-0.0308447f, 0.0000000f, -0.0000000f, 0.9995242f), + XMFLOAT4(-0.0278059f, -0.0000000f, -0.0000000f, 0.9996133f), + XMFLOAT4(-0.0206368f, 0.0000000f, -0.0000000f, 0.9997871f), + XMFLOAT4(-0.0113960f, -0.0000000f, -0.0000000f, 0.9999351f), + XMFLOAT4(-0.0034049f, -0.0000000f, -0.0000000f, 0.9999942f), + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_499[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_500[] = { + XMFLOAT3( 0.0000000f, 0.0502506f, -0.0410090f), + XMFLOAT3( 0.0000000f, 0.0519126f, -0.0208252f), + XMFLOAT3( 0.0000000f, 0.0583019f, 0.0102387f), + XMFLOAT3( 0.0000000f, 0.0741867f, 0.0143119f), + XMFLOAT3( 0.0000000f, 0.1233698f, 0.0151672f), + XMFLOAT3(-0.0000000f, 0.1577158f, 0.0105115f), + XMFLOAT3(-0.0000004f, 0.1572729f, 0.0034530f), + XMFLOAT3(-0.0000010f, 0.1562570f, -0.0120758f), + XMFLOAT3(-0.0000011f, 0.1551369f, -0.0276046f), + XMFLOAT3(-0.0000000f, 0.1543814f, -0.0346631f), + XMFLOAT3( 0.0000020f, 0.1537476f, -0.0246590f), + XMFLOAT3( 0.0000000f, 0.1501444f, -0.0060799f), + XMFLOAT3(-0.0001929f, 0.0552515f, 0.0039242f), + XMFLOAT3(-0.0000572f, 0.0875769f, 0.0018956f), + XMFLOAT3( 0.0000000f, 0.1049712f, 0.0007144f), + XMFLOAT3( 0.0000036f, 0.1077900f, 0.0004141f), + XMFLOAT3( 0.0000021f, 0.1105564f, 0.0001193f), + XMFLOAT3( 0.0000000f, 0.1116755f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1116755f, -0.0000000f), + XMFLOAT3( 0.0000000f, 0.1116755f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1116755f, 0.0000000f), + XMFLOAT3( 0.0000000f, 0.1116755f, 0.0000000f), +}; + +const XMFLOAT4 accessor_501[] = { + XMFLOAT4( 0.1687535f, 0.0001485f, -0.0011179f, 0.9856576f), + XMFLOAT4( 0.1641874f, 0.0001452f, -0.0011030f, 0.9864286f), + XMFLOAT4( 0.1321307f, 0.0001089f, -0.0010700f, 0.9912317f), + XMFLOAT4( 0.0425173f, 0.0000089f, -0.0010504f, 0.9990952f), + XMFLOAT4(-0.0663257f, -0.0001048f, -0.0010704f, 0.9977974f), + XMFLOAT4(-0.1190329f, -0.0001542f, -0.0011139f, 0.9928897f), + XMFLOAT4(-0.1009015f, -0.0001354f, -0.0011171f, 0.9948958f), + XMFLOAT4(-0.0608890f, -0.0000947f, -0.0011108f, 0.9981439f), + XMFLOAT4(-0.0207444f, -0.0000514f, -0.0011062f, 0.9997842f), + XMFLOAT4(-0.0024017f, -0.0000256f, -0.0011052f, 0.9999965f), + XMFLOAT4(-0.0023224f, -0.0000197f, -0.0010893f, 0.9999967f), + XMFLOAT4(-0.0023111f, -0.0000189f, -0.0010735f, 0.9999968f), + XMFLOAT4(-0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), + XMFLOAT4( 0.1278805f, 0.0001342f, -0.0011007f, 0.9917890f), + XMFLOAT4( 0.1862075f, 0.0001850f, -0.0011025f, 0.9825098f), + XMFLOAT4( 0.1473228f, 0.0001447f, -0.0010961f, 0.9890879f), + XMFLOAT4( 0.0385725f, 0.0000335f, -0.0011042f, 0.9992552f), + XMFLOAT4(-0.0275612f, -0.0000311f, -0.0011282f, 0.9996195f), + XMFLOAT4(-0.0204169f, -0.0000230f, -0.0011289f, 0.9997909f), + XMFLOAT4(-0.0071464f, -0.0000081f, -0.0011287f, 0.9999738f), + XMFLOAT4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), + XMFLOAT4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), +}; + +const XMFLOAT3 accessor_502[] = { + XMFLOAT3( 1.1119719f, 0.8161622f, 0.9900533f), + XMFLOAT3( 1.1116862f, 0.8166313f, 0.9900787f), + XMFLOAT3( 1.1107776f, 0.8181230f, 0.9901594f), + XMFLOAT3( 1.1091545f, 0.8207878f, 0.9903035f), + XMFLOAT3( 1.1066993f, 0.8248183f, 0.9905214f), + XMFLOAT3( 1.1032585f, 0.8304676f, 0.9908272f), + XMFLOAT3( 1.0986220f, 0.8380802f, 0.9912391f), + XMFLOAT3( 1.0924909f, 0.8481464f, 0.9917837f), + XMFLOAT3( 1.0844150f, 0.8614054f, 0.9925011f), + XMFLOAT3( 1.0736568f, 0.8790682f, 0.9934567f), + XMFLOAT3( 1.0588560f, 0.9033691f, 0.9947716f), + XMFLOAT3( 1.0370246f, 0.9392121f, 0.9967110f), + XMFLOAT3( 1.1109213f, 0.9073296f, 1.0868970f), + XMFLOAT3( 1.0325081f, 0.9749655f, 1.0277655f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_503[] = { + XMFLOAT3(-0.0005524f, 0.0688295f, -0.0213631f), + XMFLOAT3(-0.0005524f, 0.0688295f, -0.0213631f), +}; + +const XMFLOAT4 accessor_504[] = { + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), +}; + +const XMFLOAT3 accessor_505[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_506[] = { + XMFLOAT3( 0.0000000f, 0.3082177f, -0.0000000f), + XMFLOAT3( 0.0000000f, 0.3082207f, -0.0000010f), +}; + +const XMFLOAT4 accessor_507[] = { + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), +}; + +const XMFLOAT3 accessor_508[] = { + XMFLOAT3( 1.0000001f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_509[] = { + XMFLOAT3( 0.0000000f, 0.3056000f, 0.0000004f), + XMFLOAT3( 0.0000000f, 0.3056044f, 0.0000002f), +}; + +const XMFLOAT4 accessor_510[] = { + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994640f), +}; + +const XMFLOAT3 accessor_511[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_512[] = { + XMFLOAT3( 0.0015863f, 0.2239287f, 0.1576325f), + XMFLOAT3( 0.0015815f, 0.2262271f, 0.1552215f), + XMFLOAT3( 0.0015629f, 0.2350632f, 0.1459529f), + XMFLOAT3( 0.0015168f, 0.2570313f, 0.1229092f), + XMFLOAT3( 0.0013740f, 0.3250495f, 0.0515609f), + XMFLOAT3( 0.0012743f, 0.3725485f, 0.0017364f), + XMFLOAT3( 0.0012766f, 0.3712253f, 0.0058122f), + XMFLOAT3( 0.0012820f, 0.3679919f, 0.0155316f), + XMFLOAT3( 0.0012880f, 0.3639544f, 0.0271321f), + XMFLOAT3( 0.0012921f, 0.3602186f, 0.0368514f), + XMFLOAT3( 0.0012917f, 0.3578899f, 0.0409273f), + XMFLOAT3( 0.0012936f, 0.3542452f, 0.0314968f), + XMFLOAT3( 0.0013404f, 0.3312452f, 0.0139831f), + XMFLOAT3( 0.0014914f, 0.2635176f, 0.0045527f), + XMFLOAT3( 0.0014853f, 0.2667689f, 0.0306998f), + XMFLOAT3( 0.0014733f, 0.2723543f, 0.0431161f), + XMFLOAT3( 0.0014353f, 0.2890725f, 0.0360221f), + XMFLOAT3( 0.0013524f, 0.3254242f, 0.0163660f), + XMFLOAT3( 0.0013047f, 0.3463302f, 0.0044682f), + XMFLOAT3( 0.0013047f, 0.3463302f, 0.0044682f), + XMFLOAT3( 0.0013047f, 0.3463302f, 0.0044682f), + XMFLOAT3( 0.0013047f, 0.3463302f, 0.0044682f), +}; + +const XMFLOAT4 accessor_513[] = { + XMFLOAT4( 0.1017744f, 0.0000985f, 0.0011238f, 0.9948069f), + XMFLOAT4(-0.0664422f, -0.0001058f, 0.0011271f, 0.9977896f), + XMFLOAT4( 0.0065792f, 0.0000296f, 0.0011345f, 0.9999777f), + XMFLOAT4( 0.0795655f, 0.0002030f, 0.0011273f, 0.9968290f), + XMFLOAT4(-0.0557642f, 0.0000156f, 0.0011423f, 0.9984434f), + XMFLOAT4(-0.1900725f, -0.0002299f, 0.0011131f, 0.9817694f), + XMFLOAT4(-0.1829499f, -0.0002084f, 0.0011183f, 0.9831216f), + XMFLOAT4(-0.1601227f, -0.0001502f, 0.0011308f, 0.9870965f), + XMFLOAT4(-0.1193056f, -0.0000647f, 0.0011439f, 0.9928569f), + XMFLOAT4(-0.0581474f, 0.0000385f, 0.0011502f, 0.9983073f), + XMFLOAT4( 0.0255451f, 0.0001485f, 0.0011437f, 0.9996730f), + XMFLOAT4( 0.1210316f, 0.0002301f, 0.0011107f, 0.9926480f), + XMFLOAT4( 0.1685038f, 0.0002263f, 0.0010911f, 0.9857004f), + XMFLOAT4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), + XMFLOAT4( 0.1871809f, 0.0002103f, 0.0011074f, 0.9823248f), + XMFLOAT4( 0.2741494f, 0.0003082f, 0.0010835f, 0.9616865f), + XMFLOAT4( 0.2213597f, 0.0002489f, 0.0010990f, 0.9751916f), + XMFLOAT4( 0.0721470f, 0.0000812f, 0.0011251f, 0.9973934f), + XMFLOAT4(-0.0191888f, -0.0000217f, 0.0011285f, 0.9998153f), + XMFLOAT4(-0.0142143f, -0.0000161f, 0.0011286f, 0.9998984f), + XMFLOAT4(-0.0049752f, -0.0000056f, 0.0011287f, 0.9999870f), + XMFLOAT4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), +}; + +const XMFLOAT3 accessor_514[] = { + XMFLOAT3( 1.0000001f, 1.1504586f, 1.0014217f), + XMFLOAT3( 0.9999999f, 1.1500801f, 1.0014180f), + XMFLOAT3( 0.9999999f, 1.1488986f, 1.0014070f), + XMFLOAT3( 0.9999999f, 1.1468363f, 1.0013874f), + XMFLOAT3( 1.0000001f, 1.1437984f, 1.0013586f), + XMFLOAT3( 0.9999999f, 1.1396673f, 1.0013196f), + XMFLOAT3( 1.0000001f, 1.1342896f, 1.0012687f), + XMFLOAT3( 1.0000001f, 1.1274629f, 1.0012043f), + XMFLOAT3( 0.9999999f, 1.1189024f, 1.0011234f), + XMFLOAT3( 0.9999999f, 1.1081806f, 1.0010223f), + XMFLOAT3( 0.9999999f, 1.0945803f, 1.0008935f), + XMFLOAT3( 1.0000001f, 1.0766469f, 1.0007242f), + XMFLOAT3( 0.9999999f, 1.0497506f, 1.0004700f), + XMFLOAT3( 0.9999999f, 1.0188085f, 1.0001775f), + XMFLOAT3( 1.0000001f, 1.0037625f, 1.0000355f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 1.0000001f, 1.0000001f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000001f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_515[] = { + XMFLOAT3( 0.2488541f, 0.2033312f, -0.0450287f), + XMFLOAT3( 0.2488541f, 0.2033313f, -0.0450287f), +}; + +const XMFLOAT4 accessor_516[] = { + XMFLOAT4(-0.1444001f, -0.0000000f, 0.0000000f, 0.9895194f), + XMFLOAT4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), +}; + +const XMFLOAT3 accessor_517[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_518[] = { + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), + XMFLOAT3(-0.0004506f, 0.2559274f, 0.0044682f), +}; + +const XMFLOAT4 accessor_519[] = { + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), +}; + +const XMFLOAT3 accessor_520[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_521[] = { + XMFLOAT3( 0.0000000f, 0.1024673f, 0.0000021f), + XMFLOAT3(-0.0000000f, 0.1024613f, -0.0000003f), +}; + +const XMFLOAT4 accessor_522[] = { + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_523[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000000f), +}; + +const XMFLOAT3 accessor_524[] = { + XMFLOAT3( 0.0000000f, 0.1026061f, -0.0000003f), + XMFLOAT3(-0.0000000f, 0.1026039f, 0.0000002f), +}; + +const XMFLOAT4 accessor_525[] = { + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_526[] = { + XMFLOAT3( 1.0000000f, 0.9999998f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), +}; + +const XMFLOAT3 accessor_527[] = { + XMFLOAT3( 0.0000000f, 0.1033664f, 0.0000010f), + XMFLOAT3(-0.0000000f, 0.1033674f, -0.0000001f), +}; + +const XMFLOAT4 accessor_528[] = { + XMFLOAT4( 0.0181255f, 0.0003339f, 0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_529[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_530[] = { + XMFLOAT3( 0.0000000f, 0.1012722f, -0.0000000f), + XMFLOAT3(-0.0000000f, 0.1012845f, -0.0000001f), +}; + +const XMFLOAT4 accessor_531[] = { + XMFLOAT4( 0.0250840f, -0.0004247f, 0.0000082f, 0.9996852f), + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996852f), +}; + +const XMFLOAT3 accessor_532[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999998f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_533[] = { + XMFLOAT3( 0.0000000f, 0.1024291f, 0.0000000f), + XMFLOAT3(-0.0000000f, 0.1024317f, -0.0000007f), +}; + +const XMFLOAT4 accessor_534[] = { + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992629f), +}; + +const XMFLOAT3 accessor_535[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_536[] = { + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), + XMFLOAT3(-0.0027646f, 0.0680362f, -0.0078378f), +}; + +const XMFLOAT4 accessor_537[] = { + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), +}; + +const XMFLOAT3 accessor_538[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_539[] = { + XMFLOAT3( 0.0000000f, 0.3082150f, 0.0000001f), + XMFLOAT3(-0.0000000f, 0.3082130f, -0.0000004f), +}; + +const XMFLOAT4 accessor_540[] = { + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), +}; + +const XMFLOAT3 accessor_541[] = { + XMFLOAT3( 1.0000001f, 1.0000001f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), +}; + +const XMFLOAT3 accessor_542[] = { + XMFLOAT3(-0.0000000f, 0.3056078f, -0.0000030f), + XMFLOAT3(-0.0000000f, 0.3055982f, -0.0000002f), +}; + +const XMFLOAT4 accessor_543[] = { + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), +}; + +const XMFLOAT3 accessor_544[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999998f), + XMFLOAT3( 1.0000000f, 1.0000001f, 0.9999999f), +}; + +const XMFLOAT3 accessor_545[] = { + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), + XMFLOAT3(-0.0013285f, 0.2559254f, 0.0044682f), +}; + +const XMFLOAT4 accessor_546[] = { + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990952f, 0.0011277f), +}; + +const XMFLOAT3 accessor_547[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100001f), +}; + +const XMFLOAT3 accessor_548[] = { + XMFLOAT3(-0.0000000f, 0.1024574f, -0.0000004f), + XMFLOAT3( 0.0000000f, 0.1024598f, 0.0000012f), +}; + +const XMFLOAT4 accessor_549[] = { + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), +}; + +const XMFLOAT3 accessor_550[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_551[] = { + XMFLOAT3(-0.0000001f, 0.1026062f, 0.0000004f), + XMFLOAT3(-0.0000000f, 0.1026044f, 0.0000000f), +}; + +const XMFLOAT4 accessor_552[] = { + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), +}; + +const XMFLOAT3 accessor_553[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000001f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), +}; + +const XMFLOAT3 accessor_554[] = { + XMFLOAT3(-0.0000000f, 0.1033718f, 0.0000001f), + XMFLOAT3( 0.0000000f, 0.1033676f, 0.0000003f), +}; + +const XMFLOAT4 accessor_555[] = { + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), +}; + +const XMFLOAT3 accessor_556[] = { + XMFLOAT3( 1.0000001f, 1.0000001f, 1.0000001f), + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), +}; + +const XMFLOAT3 accessor_557[] = { + XMFLOAT3( 0.0000000f, 0.1012763f, -0.0000033f), + XMFLOAT3(-0.0000000f, 0.1012811f, -0.0000002f), +}; + +const XMFLOAT4 accessor_558[] = { + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), +}; + +const XMFLOAT3 accessor_559[] = { + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), +}; + +const XMFLOAT3 accessor_560[] = { + XMFLOAT3( 0.0000000f, 0.1024334f, -0.0000007f), + XMFLOAT3( 0.0000000f, 0.1024289f, -0.0000001f), +}; + +const XMFLOAT4 accessor_561[] = { + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), +}; + +const XMFLOAT3 accessor_562[] = { + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_563[] = { + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), + XMFLOAT3(-0.0000347f, 0.0679304f, -0.0016926f), +}; + +const XMFLOAT4 accessor_564[] = { + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), +}; + +const XMFLOAT3 accessor_565[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_566[] = { + XMFLOAT3( 0.0000000f, 0.2498151f, 0.0000002f), + XMFLOAT3(-0.0000000f, 0.2498146f, 0.0000000f), +}; + +const XMFLOAT4 accessor_567[] = { + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), +}; + +const XMFLOAT3 accessor_568[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_569[] = { + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_570[] = { + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_571[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_572[] = { + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_573[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_574[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_575[] = { + XMFLOAT3(-0.0020865f, 0.0609496f, -0.0227208f), + XMFLOAT3(-0.0020865f, 0.0626438f, -0.0130235f), + XMFLOAT3(-0.0020865f, 0.0646131f, 0.0095444f), + XMFLOAT3(-0.0020865f, 0.3623514f, 0.0322673f), + XMFLOAT3(-0.0020865f, 0.5354798f, 0.0479684f), + XMFLOAT3(-0.0020865f, 0.5683661f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.5506080f, 0.0478690f), + XMFLOAT3(-0.0020865f, 0.4981608f, 0.0312555f), + XMFLOAT3(-0.0020865f, 0.4134374f, 0.0113403f), + XMFLOAT3(-0.0020865f, 0.3040031f, -0.0062986f), + XMFLOAT3(-0.0020865f, 0.1810994f, -0.0184756f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0605542f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0641042f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0683413f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0718913f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), +}; + +const XMFLOAT4 accessor_576[] = { + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_577[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_578[] = { + XMFLOAT3( 0.0000000f, 0.0919258f, 0.0000003f), + XMFLOAT3(-0.0000000f, 0.0919256f, -0.0000004f), +}; + +const XMFLOAT4 accessor_579[] = { + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, -0.2451639f, 0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_580[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_581[] = { + XMFLOAT3( 0.0000006f, 0.1196968f, -0.0000000f), + XMFLOAT3(-0.0000001f, 0.1196961f, 0.0000000f), +}; + +const XMFLOAT4 accessor_582[] = { + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_583[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_584[] = { + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_585[] = { + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_586[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const XMFLOAT3 accessor_587[] = { + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), +}; + +const XMFLOAT4 accessor_588[] = { + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), +}; + +const XMFLOAT3 accessor_589[] = { + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), +}; + +const XMFLOAT3 accessor_590[] = { + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), +}; + +const XMFLOAT4 accessor_591[] = { + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), +}; + +const XMFLOAT3 accessor_592[] = { + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), +}; + +const XMFLOAT3 accessor_593[] = { + XMFLOAT3(-0.0020865f, 0.0609496f, -0.0227208f), + XMFLOAT3(-0.0020865f, 0.0626438f, -0.0130235f), + XMFLOAT3(-0.0020865f, 0.0646131f, 0.0095444f), + XMFLOAT3(-0.0020865f, 0.3623514f, 0.0322673f), + XMFLOAT3(-0.0020865f, 0.5354798f, 0.0479684f), + XMFLOAT3(-0.0020865f, 0.5683661f, 0.0547394f), + XMFLOAT3(-0.0020865f, 0.5506080f, 0.0478690f), + XMFLOAT3(-0.0020865f, 0.4981608f, 0.0312555f), + XMFLOAT3(-0.0020865f, 0.4134374f, 0.0113403f), + XMFLOAT3(-0.0020865f, 0.3040031f, -0.0062986f), + XMFLOAT3(-0.0020865f, 0.1810994f, -0.0184756f), + XMFLOAT3(-0.0020865f, 0.0590655f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0605542f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0641042f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0683413f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0718913f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), + XMFLOAT3(-0.0020865f, 0.0733800f, -0.0044682f), +}; + +const XMFLOAT4 accessor_594[] = { + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), +}; + +const XMFLOAT3 accessor_595[] = { + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), + XMFLOAT3( 0.0100000f, 0.0100000f, 0.0100000f), +}; + +const XMFLOAT3 accessor_596[] = { + XMFLOAT3(-0.0000000f, 0.0919256f, 0.0000006f), + XMFLOAT3(-0.0000000f, 0.0919253f, 0.0000002f), +}; + +const XMFLOAT4 accessor_597[] = { + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), + XMFLOAT4(-0.6492797f, 0.2451639f, -0.6809444f, 0.2337631f), +}; + +const XMFLOAT3 accessor_598[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999996f), +}; + +const XMFLOAT3 accessor_599[] = { + XMFLOAT3(-0.0000006f, 0.1196973f, -0.0000000f), + XMFLOAT3( 0.0000001f, 0.1196966f, 0.0000001f), +}; + +const XMFLOAT4 accessor_600[] = { + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402116f, 0.9402092f), +}; + +const XMFLOAT3 accessor_601[] = { + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), + XMFLOAT3( 0.9999999f, 0.9999999f, 0.9999999f), +}; + +const XMFLOAT3 accessor_602[] = { + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), +}; + +const XMFLOAT4 accessor_603[] = { + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), +}; + +const XMFLOAT3 accessor_604[] = { + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), +}; + +const Mesh mesh_0 = { + accessor_0, // position + accessor_0__size, + accessor_1, // normal + accessor_1__size, + accessor_2, // texcoord_0 + accessor_2__size, + accessor_7, // weights_0 + accessor_7__size, + accessor_6, // joints_0 + accessor_6__size, + accessor_8, // indices + accessor_8__size, +}; + +extern const Skin skin_0; +const Node node_0 = { + 1, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.2498147f, 0.0000000f), // translation + XMFLOAT4( 0.0460666f, -0.0000356f, 0.0064854f, 0.9989173f), // rotation + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), // scale +}; + +const Node node_1 = { + 26, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.1015303f, -0.0197356f, -0.0097731f), // translation + XMFLOAT4( 0.9995915f, 0.0019270f, 0.0000546f, 0.0285151f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000026f), // scale +}; + +const Node node_2 = { + 3, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.3056018f, 0.0000000f), // translation + XMFLOAT4( 0.0327397f, 0.0000000f, 0.0000000f, 0.9994639f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), // scale +}; + +const Node node_3 = { + 4, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.3082168f, -0.0000000f), // translation + XMFLOAT4(-0.0651667f, -0.0000004f, 0.0015632f, 0.9978732f), // rotation + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), // scale +}; + +const Node node_4 = { + 22, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.2405819f, 0.2553854f, 0.0044682f), // translation + XMFLOAT4(-0.9994694f, -0.0026905f, 0.0000869f, 0.0324617f), // rotation + XMFLOAT3( 1.0000001f, 0.9999999f, 1.0000046f), // scale +}; + +const Node node_5 = { + 6, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.2488541f, 0.2033314f, -0.0450287f), // translation + XMFLOAT4(-0.1444001f, 0.0000000f, -0.0000000f, 0.9895194f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_6 = { + 22, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0013047f, 0.3463302f, 0.0044682f), // translation + XMFLOAT4(-0.0000001f, -0.0000000f, 0.0011287f, 0.9999994f), // rotation + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), // scale +}; + +const Node node_7 = { + 8, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.1024318f, -0.0000000f), // translation + XMFLOAT4( 0.0383830f, 0.0004671f, 0.0000142f, 0.9992630f), // rotation + XMFLOAT3( 1.0000001f, 1.0000000f, 1.0000001f), // scale +}; + +const Node node_8 = { + 9, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.1012803f, 0.0000000f), // translation + XMFLOAT4( 0.0250839f, -0.0004247f, 0.0000082f, 0.9996853f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), // scale +}; + +const Node node_9 = { + 10, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.1033667f, 0.0000000f), // translation + XMFLOAT4( 0.0181256f, 0.0003339f, 0.0000077f, 0.9998357f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), // scale +}; + +const Node node_10 = { + 11, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.1026060f, 0.0000000f), // translation + XMFLOAT4( 0.0140155f, 0.0000431f, 0.0000006f, 0.9999018f), // rotation + XMFLOAT3( 1.0000000f, 0.9999999f, 1.0000000f), // scale +}; + +const Node node_11 = { + 12, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.1024613f, -0.0000000f), // translation + XMFLOAT4( 0.0113911f, 0.0000000f, -0.0000000f, 0.9999352f), // rotation + XMFLOAT3( 1.0000000f, 1.0000002f, 1.0000001f), // scale +}; + +const Node node_12 = { + 22, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.2396202f, 0.2553876f, 0.0044682f), // translation + XMFLOAT4(-0.0000482f, 0.0425160f, -0.9990951f, 0.0011277f), // rotation + XMFLOAT3( 0.9999999f, 1.0000001f, 1.0000131f), // scale +}; + +const Node node_13 = { + 14, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.3056017f, 0.0000000f), // translation + XMFLOAT4( 0.0327397f, -0.0000000f, -0.0000000f, 0.9994639f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), // scale +}; + +const Node node_14 = { + 15, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.3082169f, 0.0000000f), // translation + XMFLOAT4(-0.0651667f, 0.0000004f, -0.0015633f, 0.9978732f), // rotation + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), // scale +}; + +const Node node_15 = { + 22, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.2435993f, 0.2564784f, 0.0044683f), // translation + XMFLOAT4(-0.9994729f, 0.0004344f, -0.0000141f, 0.0324618f), // rotation + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000043f), // scale +}; + +const Node node_16 = { + 17, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.1024322f, 0.0000000f), // translation + XMFLOAT4( 0.0382110f, -0.0004670f, -0.0000141f, 0.9992696f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), // scale +}; + +const Node node_17 = { + 18, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.1012807f, 0.0000000f), // translation + XMFLOAT4( 0.0250840f, 0.0004248f, -0.0000082f, 0.9996853f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000001f), // scale +}; + +const Node node_18 = { + 19, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.1033668f, 0.0000000f), // translation + XMFLOAT4( 0.0181256f, -0.0003326f, -0.0000077f, 0.9998357f), // rotation + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), // scale +}; + +const Node node_19 = { + 20, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.1026065f, -0.0000000f), // translation + XMFLOAT4( 0.0140155f, -0.0000444f, -0.0000006f, 0.9999018f), // rotation + XMFLOAT3( 1.0000000f, 0.9999999f, 0.9999999f), // scale +}; + +const Node node_20 = { + 21, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.1024597f, -0.0000000f), // translation + XMFLOAT4( 0.0113909f, -0.0000002f, -0.0000000f, 0.9999352f), // rotation + XMFLOAT3( 1.0000000f, 1.0000001f, 1.0000000f), // scale +}; + +const Node node_21 = { + 22, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.2426377f, 0.2564762f, 0.0044682f), // translation + XMFLOAT4(-0.0000471f, 0.0425161f, -0.9990951f, 0.0011277f), // rotation + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000130f), // scale +}; + +const Node node_22 = { + 23, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.1116755f, 0.0000000f), // translation + XMFLOAT4( 0.0000000f, 0.0000000f, -0.0011287f, 0.9999994f), // rotation + XMFLOAT3( 0.9999999f, 0.9999999f, 1.0000000f), // scale +}; + +const Node node_23 = { + 26, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.1347095f, 0.0000000f), // translation + XMFLOAT4( 0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_24 = { + 25, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.2498148f, -0.0000000f), // translation + XMFLOAT4( 0.0460732f, 0.0000356f, -0.0064861f, 0.9989170f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_25 = { + 26, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.1057032f, -0.0197356f, -0.0097731f), // translation + XMFLOAT4( 0.9995847f, -0.0020122f, 0.0037813f, 0.0284986f), // rotation + XMFLOAT3( 0.9999999f, 1.0000000f, 1.0000025f), // scale +}; + +const Node node_26 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0020865f, 0.6493472f, -0.0044682f), // translation + XMFLOAT4(-0.0000001f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_27 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.2411295f, 0.5391477f, -0.0000001f), // translation + XMFLOAT4(-0.4999999f, -0.5000001f, 0.5000001f, 0.4999999f), // rotation + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), // scale +}; + +const Node node_28 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.2411295f, 0.8440942f, -0.0870393f), // translation + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_29 = { + 30, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.1196966f, 0.0000000f), // translation + XMFLOAT4( 0.0162064f, 0.0000001f, 0.3402117f, 0.9402092f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_30 = { + 31, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.0919261f, 0.0000000f), // translation + XMFLOAT4(-0.6492796f, -0.2451639f, 0.6809445f, 0.2337631f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999996f), // scale +}; + +const Node node_31 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.1054246f, 0.0000000f, -0.0106811f), // translation + XMFLOAT4( 0.7084953f, 0.0000003f, 0.0000003f, 0.7057156f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), // scale +}; + +const Node node_32 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.1026551f, 0.3802050f, 0.2318209f), // translation + XMFLOAT4( 0.7071055f, 0.0013631f, 0.0013631f, 0.7071055f), // rotation + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), // scale +}; + +const Node node_33 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.2411295f, 0.5391477f, -0.0000001f), // translation + XMFLOAT4(-0.4999999f, 0.5000001f, -0.5000001f, 0.4999999f), // rotation + XMFLOAT3( 0.9999999f, 0.9999998f, 0.9999999f), // scale +}; + +const Node node_34 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.2411295f, 0.8440942f, -0.0870393f), // translation + XMFLOAT4(-0.7071068f, 0.0000000f, 0.0000000f, 0.7071068f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_35 = { + 36, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3(-0.0000000f, 0.1196966f, 0.0000000f), // translation + XMFLOAT4( 0.0162064f, -0.0000001f, -0.3402117f, 0.9402092f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_36 = { + 37, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.0919261f, 0.0000000f), // translation + XMFLOAT4(-0.6492796f, 0.2451639f, -0.6809445f, 0.2337631f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999996f), // scale +}; + +const Node node_37 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.1054246f, 0.0000000f, -0.0106811f), // translation + XMFLOAT4( 0.7084953f, -0.0000003f, -0.0000003f, 0.7057156f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 0.9999999f), // scale +}; + +const Node node_38 = { + 40, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.1026551f, 0.3802050f, 0.2318209f), // translation + XMFLOAT4( 0.7071055f, -0.0013631f, -0.0013631f, 0.7071055f), // rotation + XMFLOAT3( 0.9999999f, 1.0000000f, 0.9999999f), // scale +}; + +const Node node_39 = { + 40, // parent_ix + &skin_0, // skin + &mesh_0, // mesh + XMFLOAT3( 0.0000000f, 0.0000000f, 0.0000000f), // translation + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node node_40 = { + (int)-1, // parent_ix + NULL, // skin + NULL, // mesh + XMFLOAT3( 0.0000000f, 0.0000000f, 0.0000000f), // translation + XMFLOAT4( 0.0000000f, 0.0000000f, 0.0000000f, 1.0000000f), // rotation + XMFLOAT3( 1.0000000f, 1.0000000f, 1.0000000f), // scale +}; + +const Node * nodes[] = { + &node_0, + &node_1, + &node_2, + &node_3, + &node_4, + &node_5, + &node_6, + &node_7, + &node_8, + &node_9, + &node_10, + &node_11, + &node_12, + &node_13, + &node_14, + &node_15, + &node_16, + &node_17, + &node_18, + &node_19, + &node_20, + &node_21, + &node_22, + &node_23, + &node_24, + &node_25, + &node_26, + &node_27, + &node_28, + &node_29, + &node_30, + &node_31, + &node_32, + &node_33, + &node_34, + &node_35, + &node_36, + &node_37, + &node_38, + &node_39, + &node_40, +}; + +const int skin_0__joints[] = { + 26, + 1, + 0, + 23, + 22, + 4, + 3, + 2, + 6, + 5, + 12, + 11, + 10, + 9, + 8, + 7, + 15, + 14, + 13, + 21, + 20, + 19, + 18, + 17, + 16, + 25, + 24, + 27, + 28, + 31, + 30, + 29, + 32, + 33, + 34, + 37, + 36, + 35, + 38, +}; + +const Skin skin_0 = { + accessor_9, // inverse bind matrices + skin_0__joints, // joints + 39, // joints length +}; + +const AnimationSampler animation_0__sampler_0 = { + accessor_10, // input, keyframe timestamps + accessor_11, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_1 = { + accessor_10, // input, keyframe timestamps + accessor_12, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_2 = { + accessor_10, // input, keyframe timestamps + accessor_13, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_3 = { + accessor_14, // input, keyframe timestamps + accessor_15, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_4 = { + accessor_14, // input, keyframe timestamps + accessor_16, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_5 = { + accessor_14, // input, keyframe timestamps + accessor_17, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_6 = { + accessor_14, // input, keyframe timestamps + accessor_18, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_7 = { + accessor_14, // input, keyframe timestamps + accessor_19, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_8 = { + accessor_14, // input, keyframe timestamps + accessor_20, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_9 = { + accessor_10, // input, keyframe timestamps + accessor_21, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_10 = { + accessor_10, // input, keyframe timestamps + accessor_22, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_11 = { + accessor_14, // input, keyframe timestamps + accessor_23, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_12 = { + accessor_10, // input, keyframe timestamps + accessor_24, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_13 = { + accessor_10, // input, keyframe timestamps + accessor_25, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_14 = { + accessor_14, // input, keyframe timestamps + accessor_26, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_15 = { + accessor_14, // input, keyframe timestamps + accessor_27, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_16 = { + accessor_14, // input, keyframe timestamps + accessor_28, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_17 = { + accessor_14, // input, keyframe timestamps + accessor_29, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_18 = { + accessor_14, // input, keyframe timestamps + accessor_30, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_19 = { + accessor_14, // input, keyframe timestamps + accessor_31, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_20 = { + accessor_14, // input, keyframe timestamps + accessor_32, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_21 = { + accessor_14, // input, keyframe timestamps + accessor_33, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_22 = { + accessor_14, // input, keyframe timestamps + accessor_34, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_23 = { + accessor_14, // input, keyframe timestamps + accessor_35, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_24 = { + accessor_10, // input, keyframe timestamps + accessor_36, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_25 = { + accessor_10, // input, keyframe timestamps + accessor_37, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_26 = { + accessor_14, // input, keyframe timestamps + accessor_38, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_27 = { + accessor_14, // input, keyframe timestamps + accessor_39, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_28 = { + accessor_14, // input, keyframe timestamps + accessor_40, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_29 = { + accessor_14, // input, keyframe timestamps + accessor_41, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_30 = { + accessor_14, // input, keyframe timestamps + accessor_42, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_31 = { + accessor_14, // input, keyframe timestamps + accessor_43, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_32 = { + accessor_14, // input, keyframe timestamps + accessor_44, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_33 = { + accessor_14, // input, keyframe timestamps + accessor_45, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_34 = { + accessor_14, // input, keyframe timestamps + accessor_46, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_35 = { + accessor_14, // input, keyframe timestamps + accessor_47, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_36 = { + accessor_14, // input, keyframe timestamps + accessor_48, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_37 = { + accessor_14, // input, keyframe timestamps + accessor_49, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_38 = { + accessor_14, // input, keyframe timestamps + accessor_50, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_39 = { + accessor_14, // input, keyframe timestamps + accessor_51, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_40 = { + accessor_14, // input, keyframe timestamps + accessor_52, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_41 = { + accessor_14, // input, keyframe timestamps + accessor_53, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_42 = { + accessor_14, // input, keyframe timestamps + accessor_54, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_43 = { + accessor_14, // input, keyframe timestamps + accessor_55, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_44 = { + accessor_14, // input, keyframe timestamps + accessor_56, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_45 = { + accessor_14, // input, keyframe timestamps + accessor_57, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_46 = { + accessor_14, // input, keyframe timestamps + accessor_58, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_47 = { + accessor_14, // input, keyframe timestamps + accessor_59, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_48 = { + accessor_14, // input, keyframe timestamps + accessor_60, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_49 = { + accessor_14, // input, keyframe timestamps + accessor_61, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_50 = { + accessor_14, // input, keyframe timestamps + accessor_62, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_51 = { + accessor_14, // input, keyframe timestamps + accessor_63, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_52 = { + accessor_14, // input, keyframe timestamps + accessor_64, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_53 = { + accessor_14, // input, keyframe timestamps + accessor_65, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_54 = { + accessor_14, // input, keyframe timestamps + accessor_66, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_55 = { + accessor_14, // input, keyframe timestamps + accessor_67, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_56 = { + accessor_14, // input, keyframe timestamps + accessor_68, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_57 = { + accessor_14, // input, keyframe timestamps + accessor_69, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_58 = { + accessor_14, // input, keyframe timestamps + accessor_70, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_59 = { + accessor_14, // input, keyframe timestamps + accessor_71, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_60 = { + accessor_14, // input, keyframe timestamps + accessor_72, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_61 = { + accessor_14, // input, keyframe timestamps + accessor_73, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_62 = { + accessor_14, // input, keyframe timestamps + accessor_74, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_63 = { + accessor_14, // input, keyframe timestamps + accessor_75, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_64 = { + accessor_14, // input, keyframe timestamps + accessor_76, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_65 = { + accessor_14, // input, keyframe timestamps + accessor_77, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_66 = { + accessor_14, // input, keyframe timestamps + accessor_78, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_67 = { + accessor_14, // input, keyframe timestamps + accessor_79, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_68 = { + accessor_14, // input, keyframe timestamps + accessor_80, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_69 = { + accessor_14, // input, keyframe timestamps + accessor_81, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_70 = { + accessor_14, // input, keyframe timestamps + accessor_82, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_71 = { + accessor_14, // input, keyframe timestamps + accessor_83, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_72 = { + accessor_14, // input, keyframe timestamps + accessor_84, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_73 = { + accessor_14, // input, keyframe timestamps + accessor_85, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_74 = { + accessor_14, // input, keyframe timestamps + accessor_86, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_75 = { + accessor_14, // input, keyframe timestamps + accessor_87, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_76 = { + accessor_14, // input, keyframe timestamps + accessor_88, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_77 = { + accessor_14, // input, keyframe timestamps + accessor_89, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_78 = { + accessor_14, // input, keyframe timestamps + accessor_90, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_79 = { + accessor_14, // input, keyframe timestamps + accessor_91, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_80 = { + accessor_14, // input, keyframe timestamps + accessor_92, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_81 = { + accessor_14, // input, keyframe timestamps + accessor_93, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_82 = { + accessor_14, // input, keyframe timestamps + accessor_94, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_83 = { + accessor_14, // input, keyframe timestamps + accessor_95, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_84 = { + accessor_14, // input, keyframe timestamps + accessor_96, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_85 = { + accessor_14, // input, keyframe timestamps + accessor_97, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_86 = { + accessor_14, // input, keyframe timestamps + accessor_98, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_87 = { + accessor_10, // input, keyframe timestamps + accessor_99, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_88 = { + accessor_14, // input, keyframe timestamps + accessor_100, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_89 = { + accessor_14, // input, keyframe timestamps + accessor_101, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_90 = { + accessor_14, // input, keyframe timestamps + accessor_102, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_91 = { + accessor_14, // input, keyframe timestamps + accessor_103, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_92 = { + accessor_14, // input, keyframe timestamps + accessor_104, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_93 = { + accessor_14, // input, keyframe timestamps + accessor_105, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_94 = { + accessor_14, // input, keyframe timestamps + accessor_106, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_95 = { + accessor_14, // input, keyframe timestamps + accessor_107, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_96 = { + accessor_14, // input, keyframe timestamps + accessor_108, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_97 = { + accessor_14, // input, keyframe timestamps + accessor_109, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_98 = { + accessor_14, // input, keyframe timestamps + accessor_110, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_99 = { + accessor_14, // input, keyframe timestamps + accessor_111, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_100 = { + accessor_14, // input, keyframe timestamps + accessor_112, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_101 = { + accessor_14, // input, keyframe timestamps + accessor_113, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_102 = { + accessor_14, // input, keyframe timestamps + accessor_114, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_103 = { + accessor_14, // input, keyframe timestamps + accessor_115, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_104 = { + accessor_14, // input, keyframe timestamps + accessor_116, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_105 = { + accessor_10, // input, keyframe timestamps + accessor_117, // output, keyframe values (void *) + accessor_10__length, // length +}; + +const AnimationSampler animation_0__sampler_106 = { + accessor_14, // input, keyframe timestamps + accessor_118, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_107 = { + accessor_14, // input, keyframe timestamps + accessor_119, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_108 = { + accessor_14, // input, keyframe timestamps + accessor_120, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_109 = { + accessor_14, // input, keyframe timestamps + accessor_121, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_110 = { + accessor_14, // input, keyframe timestamps + accessor_122, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_111 = { + accessor_14, // input, keyframe timestamps + accessor_123, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_112 = { + accessor_14, // input, keyframe timestamps + accessor_124, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_113 = { + accessor_14, // input, keyframe timestamps + accessor_125, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_114 = { + accessor_14, // input, keyframe timestamps + accessor_126, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_115 = { + accessor_14, // input, keyframe timestamps + accessor_127, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationSampler animation_0__sampler_116 = { + accessor_14, // input, keyframe timestamps + accessor_128, // output, keyframe values (void *) + accessor_14__length, // length +}; + +const AnimationChannel animation_0__channels[] = { + &animation_0__sampler_0, // animation sampler + { + 26, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_1, // animation sampler + { + 26, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_2, // animation sampler + { + 26, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_3, // animation sampler + { + 1, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_4, // animation sampler + { + 1, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_5, // animation sampler + { + 1, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_6, // animation sampler + { + 0, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_7, // animation sampler + { + 0, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_8, // animation sampler + { + 0, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_9, // animation sampler + { + 23, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_10, // animation sampler + { + 23, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_11, // animation sampler + { + 23, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_12, // animation sampler + { + 22, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_13, // animation sampler + { + 22, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_14, // animation sampler + { + 22, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_15, // animation sampler + { + 4, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_16, // animation sampler + { + 4, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_17, // animation sampler + { + 4, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_18, // animation sampler + { + 3, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_19, // animation sampler + { + 3, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_20, // animation sampler + { + 3, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_21, // animation sampler + { + 2, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_22, // animation sampler + { + 2, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_23, // animation sampler + { + 2, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_24, // animation sampler + { + 6, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_25, // animation sampler + { + 6, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_26, // animation sampler + { + 6, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_27, // animation sampler + { + 5, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_28, // animation sampler + { + 5, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_29, // animation sampler + { + 5, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_30, // animation sampler + { + 12, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_31, // animation sampler + { + 12, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_32, // animation sampler + { + 12, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_33, // animation sampler + { + 11, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_34, // animation sampler + { + 11, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_35, // animation sampler + { + 11, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_36, // animation sampler + { + 10, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_37, // animation sampler + { + 10, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_38, // animation sampler + { + 10, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_39, // animation sampler + { + 9, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_40, // animation sampler + { + 9, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_41, // animation sampler + { + 9, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_42, // animation sampler + { + 8, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_43, // animation sampler + { + 8, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_44, // animation sampler + { + 8, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_45, // animation sampler + { + 7, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_46, // animation sampler + { + 7, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_47, // animation sampler + { + 7, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_48, // animation sampler + { + 15, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_49, // animation sampler + { + 15, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_50, // animation sampler + { + 15, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_51, // animation sampler + { + 14, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_52, // animation sampler + { + 14, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_53, // animation sampler + { + 14, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_54, // animation sampler + { + 13, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_55, // animation sampler + { + 13, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_56, // animation sampler + { + 13, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_57, // animation sampler + { + 21, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_58, // animation sampler + { + 21, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_59, // animation sampler + { + 21, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_60, // animation sampler + { + 20, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_61, // animation sampler + { + 20, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_62, // animation sampler + { + 20, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_63, // animation sampler + { + 19, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_64, // animation sampler + { + 19, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_65, // animation sampler + { + 19, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_66, // animation sampler + { + 18, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_67, // animation sampler + { + 18, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_68, // animation sampler + { + 18, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_69, // animation sampler + { + 17, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_70, // animation sampler + { + 17, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_71, // animation sampler + { + 17, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_72, // animation sampler + { + 16, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_73, // animation sampler + { + 16, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_74, // animation sampler + { + 16, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_75, // animation sampler + { + 25, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_76, // animation sampler + { + 25, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_77, // animation sampler + { + 25, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_78, // animation sampler + { + 24, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_79, // animation sampler + { + 24, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_80, // animation sampler + { + 24, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_81, // animation sampler + { + 27, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_82, // animation sampler + { + 27, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_83, // animation sampler + { + 27, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_84, // animation sampler + { + 28, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_85, // animation sampler + { + 28, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_86, // animation sampler + { + 28, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_87, // animation sampler + { + 31, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_88, // animation sampler + { + 31, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_89, // animation sampler + { + 31, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_90, // animation sampler + { + 30, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_91, // animation sampler + { + 30, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_92, // animation sampler + { + 30, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_93, // animation sampler + { + 29, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_94, // animation sampler + { + 29, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_95, // animation sampler + { + 29, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_96, // animation sampler + { + 32, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_97, // animation sampler + { + 32, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_98, // animation sampler + { + 32, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_99, // animation sampler + { + 33, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_100, // animation sampler + { + 33, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_101, // animation sampler + { + 33, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_102, // animation sampler + { + 34, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_103, // animation sampler + { + 34, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_104, // animation sampler + { + 34, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_105, // animation sampler + { + 37, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_106, // animation sampler + { + 37, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_107, // animation sampler + { + 37, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_108, // animation sampler + { + 36, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_109, // animation sampler + { + 36, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_110, // animation sampler + { + 36, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_111, // animation sampler + { + 35, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_112, // animation sampler + { + 35, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_113, // animation sampler + { + 35, // target node index + ACP__SCALE, // target path + }, + &animation_0__sampler_114, // animation sampler + { + 38, // target node index + ACP__TRANSLATION, // target path + }, + &animation_0__sampler_115, // animation sampler + { + 38, // target node index + ACP__ROTATION, // target path + }, + &animation_0__sampler_116, // animation sampler + { + 38, // target node index + ACP__SCALE, // target path + }, +}; + +const AnimationSampler animation_1__sampler_0 = { + accessor_129, // input, keyframe timestamps + accessor_130, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_1 = { + accessor_129, // input, keyframe timestamps + accessor_131, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_2 = { + accessor_132, // input, keyframe timestamps + accessor_133, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_3 = { + accessor_132, // input, keyframe timestamps + accessor_134, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_4 = { + accessor_132, // input, keyframe timestamps + accessor_135, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_5 = { + accessor_132, // input, keyframe timestamps + accessor_136, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_6 = { + accessor_132, // input, keyframe timestamps + accessor_137, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_7 = { + accessor_132, // input, keyframe timestamps + accessor_138, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_8 = { + accessor_132, // input, keyframe timestamps + accessor_139, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_9 = { + accessor_129, // input, keyframe timestamps + accessor_140, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_10 = { + accessor_129, // input, keyframe timestamps + accessor_141, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_11 = { + accessor_132, // input, keyframe timestamps + accessor_142, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_12 = { + accessor_129, // input, keyframe timestamps + accessor_143, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_13 = { + accessor_129, // input, keyframe timestamps + accessor_144, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_14 = { + accessor_129, // input, keyframe timestamps + accessor_145, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_15 = { + accessor_132, // input, keyframe timestamps + accessor_146, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_16 = { + accessor_132, // input, keyframe timestamps + accessor_147, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_17 = { + accessor_132, // input, keyframe timestamps + accessor_148, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_18 = { + accessor_132, // input, keyframe timestamps + accessor_149, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_19 = { + accessor_132, // input, keyframe timestamps + accessor_150, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_20 = { + accessor_132, // input, keyframe timestamps + accessor_151, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_21 = { + accessor_132, // input, keyframe timestamps + accessor_152, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_22 = { + accessor_132, // input, keyframe timestamps + accessor_153, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_23 = { + accessor_132, // input, keyframe timestamps + accessor_154, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_24 = { + accessor_129, // input, keyframe timestamps + accessor_155, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_25 = { + accessor_129, // input, keyframe timestamps + accessor_156, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_26 = { + accessor_132, // input, keyframe timestamps + accessor_157, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_27 = { + accessor_132, // input, keyframe timestamps + accessor_158, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_28 = { + accessor_132, // input, keyframe timestamps + accessor_159, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_29 = { + accessor_132, // input, keyframe timestamps + accessor_160, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_30 = { + accessor_132, // input, keyframe timestamps + accessor_161, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_31 = { + accessor_132, // input, keyframe timestamps + accessor_162, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_32 = { + accessor_132, // input, keyframe timestamps + accessor_163, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_33 = { + accessor_132, // input, keyframe timestamps + accessor_164, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_34 = { + accessor_132, // input, keyframe timestamps + accessor_165, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_35 = { + accessor_132, // input, keyframe timestamps + accessor_166, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_36 = { + accessor_132, // input, keyframe timestamps + accessor_167, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_37 = { + accessor_132, // input, keyframe timestamps + accessor_168, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_38 = { + accessor_132, // input, keyframe timestamps + accessor_169, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_39 = { + accessor_132, // input, keyframe timestamps + accessor_170, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_40 = { + accessor_132, // input, keyframe timestamps + accessor_171, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_41 = { + accessor_132, // input, keyframe timestamps + accessor_172, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_42 = { + accessor_132, // input, keyframe timestamps + accessor_173, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_43 = { + accessor_132, // input, keyframe timestamps + accessor_174, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_44 = { + accessor_132, // input, keyframe timestamps + accessor_175, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_45 = { + accessor_132, // input, keyframe timestamps + accessor_176, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_46 = { + accessor_132, // input, keyframe timestamps + accessor_177, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_47 = { + accessor_132, // input, keyframe timestamps + accessor_178, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_48 = { + accessor_132, // input, keyframe timestamps + accessor_179, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_49 = { + accessor_132, // input, keyframe timestamps + accessor_180, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_50 = { + accessor_132, // input, keyframe timestamps + accessor_181, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_51 = { + accessor_132, // input, keyframe timestamps + accessor_182, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_52 = { + accessor_132, // input, keyframe timestamps + accessor_183, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_53 = { + accessor_132, // input, keyframe timestamps + accessor_184, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_54 = { + accessor_132, // input, keyframe timestamps + accessor_185, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_55 = { + accessor_132, // input, keyframe timestamps + accessor_186, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_56 = { + accessor_132, // input, keyframe timestamps + accessor_187, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_57 = { + accessor_132, // input, keyframe timestamps + accessor_188, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_58 = { + accessor_132, // input, keyframe timestamps + accessor_189, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_59 = { + accessor_132, // input, keyframe timestamps + accessor_190, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_60 = { + accessor_132, // input, keyframe timestamps + accessor_191, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_61 = { + accessor_132, // input, keyframe timestamps + accessor_192, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_62 = { + accessor_132, // input, keyframe timestamps + accessor_193, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_63 = { + accessor_132, // input, keyframe timestamps + accessor_194, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_64 = { + accessor_132, // input, keyframe timestamps + accessor_195, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_65 = { + accessor_132, // input, keyframe timestamps + accessor_196, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_66 = { + accessor_132, // input, keyframe timestamps + accessor_197, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_67 = { + accessor_132, // input, keyframe timestamps + accessor_198, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_68 = { + accessor_132, // input, keyframe timestamps + accessor_199, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_69 = { + accessor_132, // input, keyframe timestamps + accessor_200, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_70 = { + accessor_132, // input, keyframe timestamps + accessor_201, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_71 = { + accessor_132, // input, keyframe timestamps + accessor_202, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_72 = { + accessor_132, // input, keyframe timestamps + accessor_203, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_73 = { + accessor_132, // input, keyframe timestamps + accessor_204, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_74 = { + accessor_132, // input, keyframe timestamps + accessor_205, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_75 = { + accessor_132, // input, keyframe timestamps + accessor_206, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_76 = { + accessor_132, // input, keyframe timestamps + accessor_207, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_77 = { + accessor_132, // input, keyframe timestamps + accessor_208, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_78 = { + accessor_132, // input, keyframe timestamps + accessor_209, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_79 = { + accessor_132, // input, keyframe timestamps + accessor_210, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_80 = { + accessor_132, // input, keyframe timestamps + accessor_211, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_81 = { + accessor_132, // input, keyframe timestamps + accessor_212, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_82 = { + accessor_132, // input, keyframe timestamps + accessor_213, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_83 = { + accessor_132, // input, keyframe timestamps + accessor_214, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_84 = { + accessor_132, // input, keyframe timestamps + accessor_215, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_85 = { + accessor_132, // input, keyframe timestamps + accessor_216, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_86 = { + accessor_132, // input, keyframe timestamps + accessor_217, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_87 = { + accessor_129, // input, keyframe timestamps + accessor_218, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_88 = { + accessor_132, // input, keyframe timestamps + accessor_219, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_89 = { + accessor_132, // input, keyframe timestamps + accessor_220, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_90 = { + accessor_132, // input, keyframe timestamps + accessor_221, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_91 = { + accessor_132, // input, keyframe timestamps + accessor_222, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_92 = { + accessor_132, // input, keyframe timestamps + accessor_223, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_93 = { + accessor_132, // input, keyframe timestamps + accessor_224, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_94 = { + accessor_132, // input, keyframe timestamps + accessor_225, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_95 = { + accessor_132, // input, keyframe timestamps + accessor_226, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_96 = { + accessor_132, // input, keyframe timestamps + accessor_227, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_97 = { + accessor_132, // input, keyframe timestamps + accessor_228, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_98 = { + accessor_132, // input, keyframe timestamps + accessor_229, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_99 = { + accessor_132, // input, keyframe timestamps + accessor_230, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_100 = { + accessor_132, // input, keyframe timestamps + accessor_231, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_101 = { + accessor_132, // input, keyframe timestamps + accessor_232, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_102 = { + accessor_132, // input, keyframe timestamps + accessor_233, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_103 = { + accessor_132, // input, keyframe timestamps + accessor_234, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_104 = { + accessor_132, // input, keyframe timestamps + accessor_235, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_105 = { + accessor_129, // input, keyframe timestamps + accessor_236, // output, keyframe values (void *) + accessor_129__length, // length +}; + +const AnimationSampler animation_1__sampler_106 = { + accessor_132, // input, keyframe timestamps + accessor_237, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_107 = { + accessor_132, // input, keyframe timestamps + accessor_238, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_108 = { + accessor_132, // input, keyframe timestamps + accessor_239, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_109 = { + accessor_132, // input, keyframe timestamps + accessor_240, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_110 = { + accessor_132, // input, keyframe timestamps + accessor_241, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_111 = { + accessor_132, // input, keyframe timestamps + accessor_242, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_112 = { + accessor_132, // input, keyframe timestamps + accessor_243, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_113 = { + accessor_132, // input, keyframe timestamps + accessor_244, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_114 = { + accessor_132, // input, keyframe timestamps + accessor_245, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_115 = { + accessor_132, // input, keyframe timestamps + accessor_246, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationSampler animation_1__sampler_116 = { + accessor_132, // input, keyframe timestamps + accessor_247, // output, keyframe values (void *) + accessor_132__length, // length +}; + +const AnimationChannel animation_1__channels[] = { + &animation_1__sampler_0, // animation sampler + { + 26, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_1, // animation sampler + { + 26, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_2, // animation sampler + { + 26, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_3, // animation sampler + { + 1, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_4, // animation sampler + { + 1, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_5, // animation sampler + { + 1, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_6, // animation sampler + { + 0, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_7, // animation sampler + { + 0, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_8, // animation sampler + { + 0, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_9, // animation sampler + { + 23, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_10, // animation sampler + { + 23, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_11, // animation sampler + { + 23, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_12, // animation sampler + { + 22, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_13, // animation sampler + { + 22, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_14, // animation sampler + { + 22, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_15, // animation sampler + { + 4, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_16, // animation sampler + { + 4, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_17, // animation sampler + { + 4, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_18, // animation sampler + { + 3, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_19, // animation sampler + { + 3, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_20, // animation sampler + { + 3, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_21, // animation sampler + { + 2, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_22, // animation sampler + { + 2, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_23, // animation sampler + { + 2, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_24, // animation sampler + { + 6, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_25, // animation sampler + { + 6, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_26, // animation sampler + { + 6, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_27, // animation sampler + { + 5, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_28, // animation sampler + { + 5, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_29, // animation sampler + { + 5, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_30, // animation sampler + { + 12, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_31, // animation sampler + { + 12, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_32, // animation sampler + { + 12, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_33, // animation sampler + { + 11, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_34, // animation sampler + { + 11, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_35, // animation sampler + { + 11, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_36, // animation sampler + { + 10, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_37, // animation sampler + { + 10, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_38, // animation sampler + { + 10, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_39, // animation sampler + { + 9, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_40, // animation sampler + { + 9, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_41, // animation sampler + { + 9, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_42, // animation sampler + { + 8, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_43, // animation sampler + { + 8, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_44, // animation sampler + { + 8, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_45, // animation sampler + { + 7, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_46, // animation sampler + { + 7, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_47, // animation sampler + { + 7, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_48, // animation sampler + { + 15, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_49, // animation sampler + { + 15, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_50, // animation sampler + { + 15, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_51, // animation sampler + { + 14, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_52, // animation sampler + { + 14, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_53, // animation sampler + { + 14, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_54, // animation sampler + { + 13, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_55, // animation sampler + { + 13, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_56, // animation sampler + { + 13, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_57, // animation sampler + { + 21, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_58, // animation sampler + { + 21, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_59, // animation sampler + { + 21, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_60, // animation sampler + { + 20, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_61, // animation sampler + { + 20, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_62, // animation sampler + { + 20, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_63, // animation sampler + { + 19, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_64, // animation sampler + { + 19, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_65, // animation sampler + { + 19, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_66, // animation sampler + { + 18, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_67, // animation sampler + { + 18, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_68, // animation sampler + { + 18, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_69, // animation sampler + { + 17, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_70, // animation sampler + { + 17, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_71, // animation sampler + { + 17, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_72, // animation sampler + { + 16, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_73, // animation sampler + { + 16, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_74, // animation sampler + { + 16, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_75, // animation sampler + { + 25, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_76, // animation sampler + { + 25, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_77, // animation sampler + { + 25, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_78, // animation sampler + { + 24, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_79, // animation sampler + { + 24, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_80, // animation sampler + { + 24, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_81, // animation sampler + { + 27, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_82, // animation sampler + { + 27, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_83, // animation sampler + { + 27, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_84, // animation sampler + { + 28, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_85, // animation sampler + { + 28, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_86, // animation sampler + { + 28, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_87, // animation sampler + { + 31, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_88, // animation sampler + { + 31, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_89, // animation sampler + { + 31, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_90, // animation sampler + { + 30, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_91, // animation sampler + { + 30, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_92, // animation sampler + { + 30, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_93, // animation sampler + { + 29, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_94, // animation sampler + { + 29, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_95, // animation sampler + { + 29, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_96, // animation sampler + { + 32, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_97, // animation sampler + { + 32, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_98, // animation sampler + { + 32, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_99, // animation sampler + { + 33, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_100, // animation sampler + { + 33, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_101, // animation sampler + { + 33, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_102, // animation sampler + { + 34, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_103, // animation sampler + { + 34, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_104, // animation sampler + { + 34, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_105, // animation sampler + { + 37, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_106, // animation sampler + { + 37, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_107, // animation sampler + { + 37, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_108, // animation sampler + { + 36, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_109, // animation sampler + { + 36, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_110, // animation sampler + { + 36, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_111, // animation sampler + { + 35, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_112, // animation sampler + { + 35, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_113, // animation sampler + { + 35, // target node index + ACP__SCALE, // target path + }, + &animation_1__sampler_114, // animation sampler + { + 38, // target node index + ACP__TRANSLATION, // target path + }, + &animation_1__sampler_115, // animation sampler + { + 38, // target node index + ACP__ROTATION, // target path + }, + &animation_1__sampler_116, // animation sampler + { + 38, // target node index + ACP__SCALE, // target path + }, +}; + +const AnimationSampler animation_2__sampler_0 = { + accessor_248, // input, keyframe timestamps + accessor_249, // output, keyframe values (void *) + accessor_248__length, // length +}; + +const AnimationSampler animation_2__sampler_1 = { + accessor_248, // input, keyframe timestamps + accessor_250, // output, keyframe values (void *) + accessor_248__length, // length +}; + +const AnimationSampler animation_2__sampler_2 = { + accessor_251, // input, keyframe timestamps + accessor_252, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_3 = { + accessor_251, // input, keyframe timestamps + accessor_253, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_4 = { + accessor_251, // input, keyframe timestamps + accessor_254, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_5 = { + accessor_251, // input, keyframe timestamps + accessor_255, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_6 = { + accessor_251, // input, keyframe timestamps + accessor_256, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_7 = { + accessor_251, // input, keyframe timestamps + accessor_257, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_8 = { + accessor_251, // input, keyframe timestamps + accessor_258, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_9 = { + accessor_251, // input, keyframe timestamps + accessor_259, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_10 = { + accessor_248, // input, keyframe timestamps + accessor_260, // output, keyframe values (void *) + accessor_248__length, // length +}; + +const AnimationSampler animation_2__sampler_11 = { + accessor_251, // input, keyframe timestamps + accessor_261, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_12 = { + accessor_251, // input, keyframe timestamps + accessor_262, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_13 = { + accessor_248, // input, keyframe timestamps + accessor_263, // output, keyframe values (void *) + accessor_248__length, // length +}; + +const AnimationSampler animation_2__sampler_14 = { + accessor_251, // input, keyframe timestamps + accessor_264, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_15 = { + accessor_251, // input, keyframe timestamps + accessor_265, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_16 = { + accessor_251, // input, keyframe timestamps + accessor_266, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_17 = { + accessor_251, // input, keyframe timestamps + accessor_267, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_18 = { + accessor_251, // input, keyframe timestamps + accessor_268, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_19 = { + accessor_251, // input, keyframe timestamps + accessor_269, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_20 = { + accessor_251, // input, keyframe timestamps + accessor_270, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_21 = { + accessor_251, // input, keyframe timestamps + accessor_271, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_22 = { + accessor_251, // input, keyframe timestamps + accessor_272, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_23 = { + accessor_251, // input, keyframe timestamps + accessor_273, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_24 = { + accessor_251, // input, keyframe timestamps + accessor_274, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_25 = { + accessor_248, // input, keyframe timestamps + accessor_275, // output, keyframe values (void *) + accessor_248__length, // length +}; + +const AnimationSampler animation_2__sampler_26 = { + accessor_248, // input, keyframe timestamps + accessor_276, // output, keyframe values (void *) + accessor_248__length, // length +}; + +const AnimationSampler animation_2__sampler_27 = { + accessor_251, // input, keyframe timestamps + accessor_277, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_28 = { + accessor_251, // input, keyframe timestamps + accessor_278, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_29 = { + accessor_251, // input, keyframe timestamps + accessor_279, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_30 = { + accessor_251, // input, keyframe timestamps + accessor_280, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_31 = { + accessor_251, // input, keyframe timestamps + accessor_281, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_32 = { + accessor_251, // input, keyframe timestamps + accessor_282, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_33 = { + accessor_251, // input, keyframe timestamps + accessor_283, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_34 = { + accessor_251, // input, keyframe timestamps + accessor_284, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_35 = { + accessor_251, // input, keyframe timestamps + accessor_285, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_36 = { + accessor_251, // input, keyframe timestamps + accessor_286, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_37 = { + accessor_251, // input, keyframe timestamps + accessor_287, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_38 = { + accessor_251, // input, keyframe timestamps + accessor_288, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_39 = { + accessor_251, // input, keyframe timestamps + accessor_289, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_40 = { + accessor_251, // input, keyframe timestamps + accessor_290, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_41 = { + accessor_251, // input, keyframe timestamps + accessor_291, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_42 = { + accessor_251, // input, keyframe timestamps + accessor_292, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_43 = { + accessor_251, // input, keyframe timestamps + accessor_293, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_44 = { + accessor_251, // input, keyframe timestamps + accessor_294, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_45 = { + accessor_251, // input, keyframe timestamps + accessor_295, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_46 = { + accessor_251, // input, keyframe timestamps + accessor_296, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_47 = { + accessor_251, // input, keyframe timestamps + accessor_297, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_48 = { + accessor_251, // input, keyframe timestamps + accessor_298, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_49 = { + accessor_251, // input, keyframe timestamps + accessor_299, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_50 = { + accessor_251, // input, keyframe timestamps + accessor_300, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_51 = { + accessor_251, // input, keyframe timestamps + accessor_301, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_52 = { + accessor_251, // input, keyframe timestamps + accessor_302, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_53 = { + accessor_251, // input, keyframe timestamps + accessor_303, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_54 = { + accessor_251, // input, keyframe timestamps + accessor_304, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_55 = { + accessor_251, // input, keyframe timestamps + accessor_305, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_56 = { + accessor_251, // input, keyframe timestamps + accessor_306, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_57 = { + accessor_251, // input, keyframe timestamps + accessor_307, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_58 = { + accessor_251, // input, keyframe timestamps + accessor_308, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_59 = { + accessor_251, // input, keyframe timestamps + accessor_309, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_60 = { + accessor_251, // input, keyframe timestamps + accessor_310, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_61 = { + accessor_251, // input, keyframe timestamps + accessor_311, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_62 = { + accessor_251, // input, keyframe timestamps + accessor_312, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_63 = { + accessor_251, // input, keyframe timestamps + accessor_313, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_64 = { + accessor_251, // input, keyframe timestamps + accessor_314, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_65 = { + accessor_251, // input, keyframe timestamps + accessor_315, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_66 = { + accessor_251, // input, keyframe timestamps + accessor_316, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_67 = { + accessor_251, // input, keyframe timestamps + accessor_317, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_68 = { + accessor_251, // input, keyframe timestamps + accessor_318, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_69 = { + accessor_251, // input, keyframe timestamps + accessor_319, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_70 = { + accessor_251, // input, keyframe timestamps + accessor_320, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_71 = { + accessor_251, // input, keyframe timestamps + accessor_321, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_72 = { + accessor_251, // input, keyframe timestamps + accessor_322, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_73 = { + accessor_251, // input, keyframe timestamps + accessor_323, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_74 = { + accessor_251, // input, keyframe timestamps + accessor_324, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_75 = { + accessor_251, // input, keyframe timestamps + accessor_325, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_76 = { + accessor_251, // input, keyframe timestamps + accessor_326, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_77 = { + accessor_251, // input, keyframe timestamps + accessor_327, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_78 = { + accessor_251, // input, keyframe timestamps + accessor_328, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_79 = { + accessor_251, // input, keyframe timestamps + accessor_329, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_80 = { + accessor_251, // input, keyframe timestamps + accessor_330, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_81 = { + accessor_251, // input, keyframe timestamps + accessor_331, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_82 = { + accessor_251, // input, keyframe timestamps + accessor_332, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_83 = { + accessor_251, // input, keyframe timestamps + accessor_333, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_84 = { + accessor_251, // input, keyframe timestamps + accessor_334, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_85 = { + accessor_251, // input, keyframe timestamps + accessor_335, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_86 = { + accessor_251, // input, keyframe timestamps + accessor_336, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_87 = { + accessor_248, // input, keyframe timestamps + accessor_337, // output, keyframe values (void *) + accessor_248__length, // length +}; + +const AnimationSampler animation_2__sampler_88 = { + accessor_251, // input, keyframe timestamps + accessor_338, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_89 = { + accessor_251, // input, keyframe timestamps + accessor_339, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_90 = { + accessor_251, // input, keyframe timestamps + accessor_340, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_91 = { + accessor_251, // input, keyframe timestamps + accessor_341, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_92 = { + accessor_251, // input, keyframe timestamps + accessor_342, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_93 = { + accessor_251, // input, keyframe timestamps + accessor_343, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_94 = { + accessor_251, // input, keyframe timestamps + accessor_344, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_95 = { + accessor_251, // input, keyframe timestamps + accessor_345, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_96 = { + accessor_251, // input, keyframe timestamps + accessor_346, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_97 = { + accessor_251, // input, keyframe timestamps + accessor_347, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_98 = { + accessor_251, // input, keyframe timestamps + accessor_348, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_99 = { + accessor_251, // input, keyframe timestamps + accessor_349, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_100 = { + accessor_251, // input, keyframe timestamps + accessor_350, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_101 = { + accessor_251, // input, keyframe timestamps + accessor_351, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_102 = { + accessor_251, // input, keyframe timestamps + accessor_352, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_103 = { + accessor_251, // input, keyframe timestamps + accessor_353, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_104 = { + accessor_251, // input, keyframe timestamps + accessor_354, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_105 = { + accessor_248, // input, keyframe timestamps + accessor_355, // output, keyframe values (void *) + accessor_248__length, // length +}; + +const AnimationSampler animation_2__sampler_106 = { + accessor_251, // input, keyframe timestamps + accessor_356, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_107 = { + accessor_251, // input, keyframe timestamps + accessor_357, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_108 = { + accessor_251, // input, keyframe timestamps + accessor_358, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_109 = { + accessor_251, // input, keyframe timestamps + accessor_359, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_110 = { + accessor_251, // input, keyframe timestamps + accessor_360, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_111 = { + accessor_251, // input, keyframe timestamps + accessor_361, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_112 = { + accessor_251, // input, keyframe timestamps + accessor_362, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_113 = { + accessor_251, // input, keyframe timestamps + accessor_363, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_114 = { + accessor_251, // input, keyframe timestamps + accessor_364, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_115 = { + accessor_251, // input, keyframe timestamps + accessor_365, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationSampler animation_2__sampler_116 = { + accessor_251, // input, keyframe timestamps + accessor_366, // output, keyframe values (void *) + accessor_251__length, // length +}; + +const AnimationChannel animation_2__channels[] = { + &animation_2__sampler_0, // animation sampler + { + 26, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_1, // animation sampler + { + 26, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_2, // animation sampler + { + 26, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_3, // animation sampler + { + 1, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_4, // animation sampler + { + 1, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_5, // animation sampler + { + 1, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_6, // animation sampler + { + 0, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_7, // animation sampler + { + 0, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_8, // animation sampler + { + 0, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_9, // animation sampler + { + 23, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_10, // animation sampler + { + 23, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_11, // animation sampler + { + 23, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_12, // animation sampler + { + 22, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_13, // animation sampler + { + 22, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_14, // animation sampler + { + 22, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_15, // animation sampler + { + 4, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_16, // animation sampler + { + 4, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_17, // animation sampler + { + 4, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_18, // animation sampler + { + 3, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_19, // animation sampler + { + 3, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_20, // animation sampler + { + 3, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_21, // animation sampler + { + 2, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_22, // animation sampler + { + 2, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_23, // animation sampler + { + 2, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_24, // animation sampler + { + 6, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_25, // animation sampler + { + 6, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_26, // animation sampler + { + 6, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_27, // animation sampler + { + 5, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_28, // animation sampler + { + 5, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_29, // animation sampler + { + 5, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_30, // animation sampler + { + 12, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_31, // animation sampler + { + 12, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_32, // animation sampler + { + 12, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_33, // animation sampler + { + 11, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_34, // animation sampler + { + 11, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_35, // animation sampler + { + 11, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_36, // animation sampler + { + 10, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_37, // animation sampler + { + 10, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_38, // animation sampler + { + 10, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_39, // animation sampler + { + 9, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_40, // animation sampler + { + 9, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_41, // animation sampler + { + 9, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_42, // animation sampler + { + 8, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_43, // animation sampler + { + 8, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_44, // animation sampler + { + 8, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_45, // animation sampler + { + 7, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_46, // animation sampler + { + 7, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_47, // animation sampler + { + 7, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_48, // animation sampler + { + 15, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_49, // animation sampler + { + 15, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_50, // animation sampler + { + 15, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_51, // animation sampler + { + 14, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_52, // animation sampler + { + 14, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_53, // animation sampler + { + 14, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_54, // animation sampler + { + 13, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_55, // animation sampler + { + 13, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_56, // animation sampler + { + 13, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_57, // animation sampler + { + 21, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_58, // animation sampler + { + 21, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_59, // animation sampler + { + 21, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_60, // animation sampler + { + 20, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_61, // animation sampler + { + 20, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_62, // animation sampler + { + 20, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_63, // animation sampler + { + 19, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_64, // animation sampler + { + 19, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_65, // animation sampler + { + 19, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_66, // animation sampler + { + 18, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_67, // animation sampler + { + 18, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_68, // animation sampler + { + 18, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_69, // animation sampler + { + 17, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_70, // animation sampler + { + 17, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_71, // animation sampler + { + 17, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_72, // animation sampler + { + 16, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_73, // animation sampler + { + 16, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_74, // animation sampler + { + 16, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_75, // animation sampler + { + 25, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_76, // animation sampler + { + 25, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_77, // animation sampler + { + 25, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_78, // animation sampler + { + 24, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_79, // animation sampler + { + 24, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_80, // animation sampler + { + 24, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_81, // animation sampler + { + 27, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_82, // animation sampler + { + 27, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_83, // animation sampler + { + 27, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_84, // animation sampler + { + 28, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_85, // animation sampler + { + 28, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_86, // animation sampler + { + 28, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_87, // animation sampler + { + 31, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_88, // animation sampler + { + 31, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_89, // animation sampler + { + 31, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_90, // animation sampler + { + 30, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_91, // animation sampler + { + 30, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_92, // animation sampler + { + 30, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_93, // animation sampler + { + 29, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_94, // animation sampler + { + 29, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_95, // animation sampler + { + 29, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_96, // animation sampler + { + 32, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_97, // animation sampler + { + 32, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_98, // animation sampler + { + 32, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_99, // animation sampler + { + 33, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_100, // animation sampler + { + 33, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_101, // animation sampler + { + 33, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_102, // animation sampler + { + 34, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_103, // animation sampler + { + 34, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_104, // animation sampler + { + 34, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_105, // animation sampler + { + 37, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_106, // animation sampler + { + 37, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_107, // animation sampler + { + 37, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_108, // animation sampler + { + 36, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_109, // animation sampler + { + 36, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_110, // animation sampler + { + 36, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_111, // animation sampler + { + 35, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_112, // animation sampler + { + 35, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_113, // animation sampler + { + 35, // target node index + ACP__SCALE, // target path + }, + &animation_2__sampler_114, // animation sampler + { + 38, // target node index + ACP__TRANSLATION, // target path + }, + &animation_2__sampler_115, // animation sampler + { + 38, // target node index + ACP__ROTATION, // target path + }, + &animation_2__sampler_116, // animation sampler + { + 38, // target node index + ACP__SCALE, // target path + }, +}; + +const AnimationSampler animation_3__sampler_0 = { + accessor_367, // input, keyframe timestamps + accessor_368, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_1 = { + accessor_369, // input, keyframe timestamps + accessor_370, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_2 = { + accessor_369, // input, keyframe timestamps + accessor_371, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_3 = { + accessor_367, // input, keyframe timestamps + accessor_372, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_4 = { + accessor_367, // input, keyframe timestamps + accessor_373, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_5 = { + accessor_367, // input, keyframe timestamps + accessor_374, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_6 = { + accessor_367, // input, keyframe timestamps + accessor_375, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_7 = { + accessor_367, // input, keyframe timestamps + accessor_376, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_8 = { + accessor_367, // input, keyframe timestamps + accessor_377, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_9 = { + accessor_369, // input, keyframe timestamps + accessor_378, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_10 = { + accessor_369, // input, keyframe timestamps + accessor_379, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_11 = { + accessor_367, // input, keyframe timestamps + accessor_380, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_12 = { + accessor_369, // input, keyframe timestamps + accessor_381, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_13 = { + accessor_369, // input, keyframe timestamps + accessor_382, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_14 = { + accessor_369, // input, keyframe timestamps + accessor_383, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_15 = { + accessor_367, // input, keyframe timestamps + accessor_384, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_16 = { + accessor_367, // input, keyframe timestamps + accessor_385, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_17 = { + accessor_367, // input, keyframe timestamps + accessor_386, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_18 = { + accessor_367, // input, keyframe timestamps + accessor_387, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_19 = { + accessor_367, // input, keyframe timestamps + accessor_388, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_20 = { + accessor_367, // input, keyframe timestamps + accessor_389, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_21 = { + accessor_367, // input, keyframe timestamps + accessor_390, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_22 = { + accessor_367, // input, keyframe timestamps + accessor_391, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_23 = { + accessor_367, // input, keyframe timestamps + accessor_392, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_24 = { + accessor_369, // input, keyframe timestamps + accessor_393, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_25 = { + accessor_369, // input, keyframe timestamps + accessor_394, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_26 = { + accessor_369, // input, keyframe timestamps + accessor_395, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_27 = { + accessor_367, // input, keyframe timestamps + accessor_396, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_28 = { + accessor_367, // input, keyframe timestamps + accessor_397, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_29 = { + accessor_367, // input, keyframe timestamps + accessor_398, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_30 = { + accessor_367, // input, keyframe timestamps + accessor_399, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_31 = { + accessor_367, // input, keyframe timestamps + accessor_400, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_32 = { + accessor_367, // input, keyframe timestamps + accessor_401, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_33 = { + accessor_367, // input, keyframe timestamps + accessor_402, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_34 = { + accessor_367, // input, keyframe timestamps + accessor_403, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_35 = { + accessor_367, // input, keyframe timestamps + accessor_404, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_36 = { + accessor_367, // input, keyframe timestamps + accessor_405, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_37 = { + accessor_367, // input, keyframe timestamps + accessor_406, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_38 = { + accessor_367, // input, keyframe timestamps + accessor_407, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_39 = { + accessor_367, // input, keyframe timestamps + accessor_408, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_40 = { + accessor_367, // input, keyframe timestamps + accessor_409, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_41 = { + accessor_367, // input, keyframe timestamps + accessor_410, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_42 = { + accessor_367, // input, keyframe timestamps + accessor_411, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_43 = { + accessor_367, // input, keyframe timestamps + accessor_412, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_44 = { + accessor_367, // input, keyframe timestamps + accessor_413, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_45 = { + accessor_367, // input, keyframe timestamps + accessor_414, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_46 = { + accessor_367, // input, keyframe timestamps + accessor_415, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_47 = { + accessor_367, // input, keyframe timestamps + accessor_416, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_48 = { + accessor_367, // input, keyframe timestamps + accessor_417, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_49 = { + accessor_367, // input, keyframe timestamps + accessor_418, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_50 = { + accessor_367, // input, keyframe timestamps + accessor_419, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_51 = { + accessor_367, // input, keyframe timestamps + accessor_420, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_52 = { + accessor_367, // input, keyframe timestamps + accessor_421, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_53 = { + accessor_367, // input, keyframe timestamps + accessor_422, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_54 = { + accessor_367, // input, keyframe timestamps + accessor_423, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_55 = { + accessor_367, // input, keyframe timestamps + accessor_424, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_56 = { + accessor_367, // input, keyframe timestamps + accessor_425, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_57 = { + accessor_367, // input, keyframe timestamps + accessor_426, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_58 = { + accessor_367, // input, keyframe timestamps + accessor_427, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_59 = { + accessor_367, // input, keyframe timestamps + accessor_428, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_60 = { + accessor_367, // input, keyframe timestamps + accessor_429, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_61 = { + accessor_367, // input, keyframe timestamps + accessor_430, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_62 = { + accessor_367, // input, keyframe timestamps + accessor_431, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_63 = { + accessor_367, // input, keyframe timestamps + accessor_432, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_64 = { + accessor_367, // input, keyframe timestamps + accessor_433, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_65 = { + accessor_367, // input, keyframe timestamps + accessor_434, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_66 = { + accessor_367, // input, keyframe timestamps + accessor_435, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_67 = { + accessor_367, // input, keyframe timestamps + accessor_436, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_68 = { + accessor_367, // input, keyframe timestamps + accessor_437, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_69 = { + accessor_367, // input, keyframe timestamps + accessor_438, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_70 = { + accessor_367, // input, keyframe timestamps + accessor_439, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_71 = { + accessor_367, // input, keyframe timestamps + accessor_440, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_72 = { + accessor_367, // input, keyframe timestamps + accessor_441, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_73 = { + accessor_367, // input, keyframe timestamps + accessor_442, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_74 = { + accessor_367, // input, keyframe timestamps + accessor_443, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_75 = { + accessor_367, // input, keyframe timestamps + accessor_444, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_76 = { + accessor_367, // input, keyframe timestamps + accessor_445, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_77 = { + accessor_367, // input, keyframe timestamps + accessor_446, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_78 = { + accessor_367, // input, keyframe timestamps + accessor_447, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_79 = { + accessor_367, // input, keyframe timestamps + accessor_448, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_80 = { + accessor_367, // input, keyframe timestamps + accessor_449, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_81 = { + accessor_367, // input, keyframe timestamps + accessor_450, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_82 = { + accessor_367, // input, keyframe timestamps + accessor_451, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_83 = { + accessor_367, // input, keyframe timestamps + accessor_452, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_84 = { + accessor_367, // input, keyframe timestamps + accessor_453, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_85 = { + accessor_367, // input, keyframe timestamps + accessor_454, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_86 = { + accessor_367, // input, keyframe timestamps + accessor_455, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_87 = { + accessor_369, // input, keyframe timestamps + accessor_456, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_88 = { + accessor_367, // input, keyframe timestamps + accessor_457, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_89 = { + accessor_367, // input, keyframe timestamps + accessor_458, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_90 = { + accessor_367, // input, keyframe timestamps + accessor_459, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_91 = { + accessor_367, // input, keyframe timestamps + accessor_460, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_92 = { + accessor_367, // input, keyframe timestamps + accessor_461, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_93 = { + accessor_367, // input, keyframe timestamps + accessor_462, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_94 = { + accessor_367, // input, keyframe timestamps + accessor_463, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_95 = { + accessor_367, // input, keyframe timestamps + accessor_464, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_96 = { + accessor_367, // input, keyframe timestamps + accessor_465, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_97 = { + accessor_367, // input, keyframe timestamps + accessor_466, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_98 = { + accessor_367, // input, keyframe timestamps + accessor_467, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_99 = { + accessor_367, // input, keyframe timestamps + accessor_468, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_100 = { + accessor_367, // input, keyframe timestamps + accessor_469, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_101 = { + accessor_367, // input, keyframe timestamps + accessor_470, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_102 = { + accessor_367, // input, keyframe timestamps + accessor_471, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_103 = { + accessor_367, // input, keyframe timestamps + accessor_472, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_104 = { + accessor_367, // input, keyframe timestamps + accessor_473, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_105 = { + accessor_369, // input, keyframe timestamps + accessor_474, // output, keyframe values (void *) + accessor_369__length, // length +}; + +const AnimationSampler animation_3__sampler_106 = { + accessor_367, // input, keyframe timestamps + accessor_475, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_107 = { + accessor_367, // input, keyframe timestamps + accessor_476, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_108 = { + accessor_367, // input, keyframe timestamps + accessor_477, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_109 = { + accessor_367, // input, keyframe timestamps + accessor_478, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_110 = { + accessor_367, // input, keyframe timestamps + accessor_479, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_111 = { + accessor_367, // input, keyframe timestamps + accessor_480, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_112 = { + accessor_367, // input, keyframe timestamps + accessor_481, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_113 = { + accessor_367, // input, keyframe timestamps + accessor_482, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_114 = { + accessor_367, // input, keyframe timestamps + accessor_483, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_115 = { + accessor_367, // input, keyframe timestamps + accessor_484, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationSampler animation_3__sampler_116 = { + accessor_367, // input, keyframe timestamps + accessor_485, // output, keyframe values (void *) + accessor_367__length, // length +}; + +const AnimationChannel animation_3__channels[] = { + &animation_3__sampler_0, // animation sampler + { + 26, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_1, // animation sampler + { + 26, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_2, // animation sampler + { + 26, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_3, // animation sampler + { + 1, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_4, // animation sampler + { + 1, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_5, // animation sampler + { + 1, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_6, // animation sampler + { + 0, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_7, // animation sampler + { + 0, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_8, // animation sampler + { + 0, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_9, // animation sampler + { + 23, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_10, // animation sampler + { + 23, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_11, // animation sampler + { + 23, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_12, // animation sampler + { + 22, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_13, // animation sampler + { + 22, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_14, // animation sampler + { + 22, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_15, // animation sampler + { + 4, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_16, // animation sampler + { + 4, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_17, // animation sampler + { + 4, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_18, // animation sampler + { + 3, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_19, // animation sampler + { + 3, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_20, // animation sampler + { + 3, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_21, // animation sampler + { + 2, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_22, // animation sampler + { + 2, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_23, // animation sampler + { + 2, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_24, // animation sampler + { + 6, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_25, // animation sampler + { + 6, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_26, // animation sampler + { + 6, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_27, // animation sampler + { + 5, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_28, // animation sampler + { + 5, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_29, // animation sampler + { + 5, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_30, // animation sampler + { + 12, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_31, // animation sampler + { + 12, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_32, // animation sampler + { + 12, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_33, // animation sampler + { + 11, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_34, // animation sampler + { + 11, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_35, // animation sampler + { + 11, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_36, // animation sampler + { + 10, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_37, // animation sampler + { + 10, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_38, // animation sampler + { + 10, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_39, // animation sampler + { + 9, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_40, // animation sampler + { + 9, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_41, // animation sampler + { + 9, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_42, // animation sampler + { + 8, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_43, // animation sampler + { + 8, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_44, // animation sampler + { + 8, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_45, // animation sampler + { + 7, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_46, // animation sampler + { + 7, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_47, // animation sampler + { + 7, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_48, // animation sampler + { + 15, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_49, // animation sampler + { + 15, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_50, // animation sampler + { + 15, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_51, // animation sampler + { + 14, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_52, // animation sampler + { + 14, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_53, // animation sampler + { + 14, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_54, // animation sampler + { + 13, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_55, // animation sampler + { + 13, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_56, // animation sampler + { + 13, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_57, // animation sampler + { + 21, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_58, // animation sampler + { + 21, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_59, // animation sampler + { + 21, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_60, // animation sampler + { + 20, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_61, // animation sampler + { + 20, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_62, // animation sampler + { + 20, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_63, // animation sampler + { + 19, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_64, // animation sampler + { + 19, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_65, // animation sampler + { + 19, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_66, // animation sampler + { + 18, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_67, // animation sampler + { + 18, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_68, // animation sampler + { + 18, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_69, // animation sampler + { + 17, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_70, // animation sampler + { + 17, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_71, // animation sampler + { + 17, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_72, // animation sampler + { + 16, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_73, // animation sampler + { + 16, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_74, // animation sampler + { + 16, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_75, // animation sampler + { + 25, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_76, // animation sampler + { + 25, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_77, // animation sampler + { + 25, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_78, // animation sampler + { + 24, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_79, // animation sampler + { + 24, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_80, // animation sampler + { + 24, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_81, // animation sampler + { + 27, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_82, // animation sampler + { + 27, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_83, // animation sampler + { + 27, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_84, // animation sampler + { + 28, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_85, // animation sampler + { + 28, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_86, // animation sampler + { + 28, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_87, // animation sampler + { + 31, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_88, // animation sampler + { + 31, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_89, // animation sampler + { + 31, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_90, // animation sampler + { + 30, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_91, // animation sampler + { + 30, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_92, // animation sampler + { + 30, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_93, // animation sampler + { + 29, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_94, // animation sampler + { + 29, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_95, // animation sampler + { + 29, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_96, // animation sampler + { + 32, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_97, // animation sampler + { + 32, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_98, // animation sampler + { + 32, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_99, // animation sampler + { + 33, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_100, // animation sampler + { + 33, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_101, // animation sampler + { + 33, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_102, // animation sampler + { + 34, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_103, // animation sampler + { + 34, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_104, // animation sampler + { + 34, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_105, // animation sampler + { + 37, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_106, // animation sampler + { + 37, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_107, // animation sampler + { + 37, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_108, // animation sampler + { + 36, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_109, // animation sampler + { + 36, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_110, // animation sampler + { + 36, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_111, // animation sampler + { + 35, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_112, // animation sampler + { + 35, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_113, // animation sampler + { + 35, // target node index + ACP__SCALE, // target path + }, + &animation_3__sampler_114, // animation sampler + { + 38, // target node index + ACP__TRANSLATION, // target path + }, + &animation_3__sampler_115, // animation sampler + { + 38, // target node index + ACP__ROTATION, // target path + }, + &animation_3__sampler_116, // animation sampler + { + 38, // target node index + ACP__SCALE, // target path + }, +}; + +const AnimationSampler animation_4__sampler_0 = { + accessor_486, // input, keyframe timestamps + accessor_487, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_1 = { + accessor_486, // input, keyframe timestamps + accessor_488, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_2 = { + accessor_486, // input, keyframe timestamps + accessor_489, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_3 = { + accessor_490, // input, keyframe timestamps + accessor_491, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_4 = { + accessor_490, // input, keyframe timestamps + accessor_492, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_5 = { + accessor_490, // input, keyframe timestamps + accessor_493, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_6 = { + accessor_490, // input, keyframe timestamps + accessor_494, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_7 = { + accessor_490, // input, keyframe timestamps + accessor_495, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_8 = { + accessor_490, // input, keyframe timestamps + accessor_496, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_9 = { + accessor_486, // input, keyframe timestamps + accessor_497, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_10 = { + accessor_486, // input, keyframe timestamps + accessor_498, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_11 = { + accessor_490, // input, keyframe timestamps + accessor_499, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_12 = { + accessor_486, // input, keyframe timestamps + accessor_500, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_13 = { + accessor_486, // input, keyframe timestamps + accessor_501, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_14 = { + accessor_486, // input, keyframe timestamps + accessor_502, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_15 = { + accessor_490, // input, keyframe timestamps + accessor_503, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_16 = { + accessor_490, // input, keyframe timestamps + accessor_504, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_17 = { + accessor_490, // input, keyframe timestamps + accessor_505, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_18 = { + accessor_490, // input, keyframe timestamps + accessor_506, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_19 = { + accessor_490, // input, keyframe timestamps + accessor_507, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_20 = { + accessor_490, // input, keyframe timestamps + accessor_508, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_21 = { + accessor_490, // input, keyframe timestamps + accessor_509, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_22 = { + accessor_490, // input, keyframe timestamps + accessor_510, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_23 = { + accessor_490, // input, keyframe timestamps + accessor_511, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_24 = { + accessor_486, // input, keyframe timestamps + accessor_512, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_25 = { + accessor_486, // input, keyframe timestamps + accessor_513, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_26 = { + accessor_486, // input, keyframe timestamps + accessor_514, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_27 = { + accessor_490, // input, keyframe timestamps + accessor_515, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_28 = { + accessor_490, // input, keyframe timestamps + accessor_516, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_29 = { + accessor_490, // input, keyframe timestamps + accessor_517, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_30 = { + accessor_490, // input, keyframe timestamps + accessor_518, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_31 = { + accessor_490, // input, keyframe timestamps + accessor_519, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_32 = { + accessor_490, // input, keyframe timestamps + accessor_520, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_33 = { + accessor_490, // input, keyframe timestamps + accessor_521, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_34 = { + accessor_490, // input, keyframe timestamps + accessor_522, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_35 = { + accessor_490, // input, keyframe timestamps + accessor_523, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_36 = { + accessor_490, // input, keyframe timestamps + accessor_524, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_37 = { + accessor_490, // input, keyframe timestamps + accessor_525, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_38 = { + accessor_490, // input, keyframe timestamps + accessor_526, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_39 = { + accessor_490, // input, keyframe timestamps + accessor_527, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_40 = { + accessor_490, // input, keyframe timestamps + accessor_528, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_41 = { + accessor_490, // input, keyframe timestamps + accessor_529, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_42 = { + accessor_490, // input, keyframe timestamps + accessor_530, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_43 = { + accessor_490, // input, keyframe timestamps + accessor_531, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_44 = { + accessor_490, // input, keyframe timestamps + accessor_532, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_45 = { + accessor_490, // input, keyframe timestamps + accessor_533, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_46 = { + accessor_490, // input, keyframe timestamps + accessor_534, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_47 = { + accessor_490, // input, keyframe timestamps + accessor_535, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_48 = { + accessor_490, // input, keyframe timestamps + accessor_536, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_49 = { + accessor_490, // input, keyframe timestamps + accessor_537, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_50 = { + accessor_490, // input, keyframe timestamps + accessor_538, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_51 = { + accessor_490, // input, keyframe timestamps + accessor_539, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_52 = { + accessor_490, // input, keyframe timestamps + accessor_540, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_53 = { + accessor_490, // input, keyframe timestamps + accessor_541, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_54 = { + accessor_490, // input, keyframe timestamps + accessor_542, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_55 = { + accessor_490, // input, keyframe timestamps + accessor_543, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_56 = { + accessor_490, // input, keyframe timestamps + accessor_544, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_57 = { + accessor_490, // input, keyframe timestamps + accessor_545, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_58 = { + accessor_490, // input, keyframe timestamps + accessor_546, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_59 = { + accessor_490, // input, keyframe timestamps + accessor_547, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_60 = { + accessor_490, // input, keyframe timestamps + accessor_548, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_61 = { + accessor_490, // input, keyframe timestamps + accessor_549, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_62 = { + accessor_490, // input, keyframe timestamps + accessor_550, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_63 = { + accessor_490, // input, keyframe timestamps + accessor_551, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_64 = { + accessor_490, // input, keyframe timestamps + accessor_552, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_65 = { + accessor_490, // input, keyframe timestamps + accessor_553, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_66 = { + accessor_490, // input, keyframe timestamps + accessor_554, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_67 = { + accessor_490, // input, keyframe timestamps + accessor_555, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_68 = { + accessor_490, // input, keyframe timestamps + accessor_556, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_69 = { + accessor_490, // input, keyframe timestamps + accessor_557, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_70 = { + accessor_490, // input, keyframe timestamps + accessor_558, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_71 = { + accessor_490, // input, keyframe timestamps + accessor_559, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_72 = { + accessor_490, // input, keyframe timestamps + accessor_560, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_73 = { + accessor_490, // input, keyframe timestamps + accessor_561, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_74 = { + accessor_490, // input, keyframe timestamps + accessor_562, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_75 = { + accessor_490, // input, keyframe timestamps + accessor_563, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_76 = { + accessor_490, // input, keyframe timestamps + accessor_564, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_77 = { + accessor_490, // input, keyframe timestamps + accessor_565, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_78 = { + accessor_490, // input, keyframe timestamps + accessor_566, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_79 = { + accessor_490, // input, keyframe timestamps + accessor_567, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_80 = { + accessor_490, // input, keyframe timestamps + accessor_568, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_81 = { + accessor_490, // input, keyframe timestamps + accessor_569, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_82 = { + accessor_490, // input, keyframe timestamps + accessor_570, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_83 = { + accessor_490, // input, keyframe timestamps + accessor_571, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_84 = { + accessor_490, // input, keyframe timestamps + accessor_572, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_85 = { + accessor_490, // input, keyframe timestamps + accessor_573, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_86 = { + accessor_490, // input, keyframe timestamps + accessor_574, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_87 = { + accessor_486, // input, keyframe timestamps + accessor_575, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_88 = { + accessor_490, // input, keyframe timestamps + accessor_576, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_89 = { + accessor_490, // input, keyframe timestamps + accessor_577, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_90 = { + accessor_490, // input, keyframe timestamps + accessor_578, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_91 = { + accessor_490, // input, keyframe timestamps + accessor_579, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_92 = { + accessor_490, // input, keyframe timestamps + accessor_580, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_93 = { + accessor_490, // input, keyframe timestamps + accessor_581, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_94 = { + accessor_490, // input, keyframe timestamps + accessor_582, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_95 = { + accessor_490, // input, keyframe timestamps + accessor_583, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_96 = { + accessor_490, // input, keyframe timestamps + accessor_584, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_97 = { + accessor_490, // input, keyframe timestamps + accessor_585, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_98 = { + accessor_490, // input, keyframe timestamps + accessor_586, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_99 = { + accessor_490, // input, keyframe timestamps + accessor_587, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_100 = { + accessor_490, // input, keyframe timestamps + accessor_588, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_101 = { + accessor_490, // input, keyframe timestamps + accessor_589, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_102 = { + accessor_490, // input, keyframe timestamps + accessor_590, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_103 = { + accessor_490, // input, keyframe timestamps + accessor_591, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_104 = { + accessor_490, // input, keyframe timestamps + accessor_592, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_105 = { + accessor_486, // input, keyframe timestamps + accessor_593, // output, keyframe values (void *) + accessor_486__length, // length +}; + +const AnimationSampler animation_4__sampler_106 = { + accessor_490, // input, keyframe timestamps + accessor_594, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_107 = { + accessor_490, // input, keyframe timestamps + accessor_595, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_108 = { + accessor_490, // input, keyframe timestamps + accessor_596, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_109 = { + accessor_490, // input, keyframe timestamps + accessor_597, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_110 = { + accessor_490, // input, keyframe timestamps + accessor_598, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_111 = { + accessor_490, // input, keyframe timestamps + accessor_599, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_112 = { + accessor_490, // input, keyframe timestamps + accessor_600, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_113 = { + accessor_490, // input, keyframe timestamps + accessor_601, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_114 = { + accessor_490, // input, keyframe timestamps + accessor_602, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_115 = { + accessor_490, // input, keyframe timestamps + accessor_603, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationSampler animation_4__sampler_116 = { + accessor_490, // input, keyframe timestamps + accessor_604, // output, keyframe values (void *) + accessor_490__length, // length +}; + +const AnimationChannel animation_4__channels[] = { + &animation_4__sampler_0, // animation sampler + { + 26, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_1, // animation sampler + { + 26, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_2, // animation sampler + { + 26, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_3, // animation sampler + { + 1, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_4, // animation sampler + { + 1, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_5, // animation sampler + { + 1, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_6, // animation sampler + { + 0, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_7, // animation sampler + { + 0, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_8, // animation sampler + { + 0, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_9, // animation sampler + { + 23, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_10, // animation sampler + { + 23, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_11, // animation sampler + { + 23, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_12, // animation sampler + { + 22, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_13, // animation sampler + { + 22, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_14, // animation sampler + { + 22, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_15, // animation sampler + { + 4, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_16, // animation sampler + { + 4, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_17, // animation sampler + { + 4, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_18, // animation sampler + { + 3, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_19, // animation sampler + { + 3, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_20, // animation sampler + { + 3, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_21, // animation sampler + { + 2, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_22, // animation sampler + { + 2, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_23, // animation sampler + { + 2, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_24, // animation sampler + { + 6, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_25, // animation sampler + { + 6, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_26, // animation sampler + { + 6, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_27, // animation sampler + { + 5, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_28, // animation sampler + { + 5, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_29, // animation sampler + { + 5, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_30, // animation sampler + { + 12, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_31, // animation sampler + { + 12, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_32, // animation sampler + { + 12, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_33, // animation sampler + { + 11, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_34, // animation sampler + { + 11, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_35, // animation sampler + { + 11, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_36, // animation sampler + { + 10, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_37, // animation sampler + { + 10, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_38, // animation sampler + { + 10, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_39, // animation sampler + { + 9, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_40, // animation sampler + { + 9, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_41, // animation sampler + { + 9, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_42, // animation sampler + { + 8, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_43, // animation sampler + { + 8, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_44, // animation sampler + { + 8, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_45, // animation sampler + { + 7, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_46, // animation sampler + { + 7, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_47, // animation sampler + { + 7, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_48, // animation sampler + { + 15, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_49, // animation sampler + { + 15, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_50, // animation sampler + { + 15, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_51, // animation sampler + { + 14, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_52, // animation sampler + { + 14, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_53, // animation sampler + { + 14, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_54, // animation sampler + { + 13, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_55, // animation sampler + { + 13, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_56, // animation sampler + { + 13, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_57, // animation sampler + { + 21, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_58, // animation sampler + { + 21, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_59, // animation sampler + { + 21, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_60, // animation sampler + { + 20, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_61, // animation sampler + { + 20, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_62, // animation sampler + { + 20, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_63, // animation sampler + { + 19, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_64, // animation sampler + { + 19, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_65, // animation sampler + { + 19, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_66, // animation sampler + { + 18, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_67, // animation sampler + { + 18, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_68, // animation sampler + { + 18, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_69, // animation sampler + { + 17, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_70, // animation sampler + { + 17, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_71, // animation sampler + { + 17, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_72, // animation sampler + { + 16, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_73, // animation sampler + { + 16, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_74, // animation sampler + { + 16, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_75, // animation sampler + { + 25, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_76, // animation sampler + { + 25, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_77, // animation sampler + { + 25, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_78, // animation sampler + { + 24, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_79, // animation sampler + { + 24, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_80, // animation sampler + { + 24, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_81, // animation sampler + { + 27, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_82, // animation sampler + { + 27, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_83, // animation sampler + { + 27, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_84, // animation sampler + { + 28, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_85, // animation sampler + { + 28, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_86, // animation sampler + { + 28, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_87, // animation sampler + { + 31, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_88, // animation sampler + { + 31, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_89, // animation sampler + { + 31, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_90, // animation sampler + { + 30, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_91, // animation sampler + { + 30, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_92, // animation sampler + { + 30, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_93, // animation sampler + { + 29, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_94, // animation sampler + { + 29, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_95, // animation sampler + { + 29, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_96, // animation sampler + { + 32, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_97, // animation sampler + { + 32, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_98, // animation sampler + { + 32, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_99, // animation sampler + { + 33, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_100, // animation sampler + { + 33, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_101, // animation sampler + { + 33, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_102, // animation sampler + { + 34, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_103, // animation sampler + { + 34, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_104, // animation sampler + { + 34, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_105, // animation sampler + { + 37, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_106, // animation sampler + { + 37, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_107, // animation sampler + { + 37, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_108, // animation sampler + { + 36, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_109, // animation sampler + { + 36, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_110, // animation sampler + { + 36, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_111, // animation sampler + { + 35, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_112, // animation sampler + { + 35, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_113, // animation sampler + { + 35, // target node index + ACP__SCALE, // target path + }, + &animation_4__sampler_114, // animation sampler + { + 38, // target node index + ACP__TRANSLATION, // target path + }, + &animation_4__sampler_115, // animation sampler + { + 38, // target node index + ACP__ROTATION, // target path + }, + &animation_4__sampler_116, // animation sampler + { + 38, // target node index + ACP__SCALE, // target path + }, +}; + +}