xnamath.h 113 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938
  1. /*++
  2. Copyright (c) Microsoft Corporation. All rights reserved.
  3. Module Name:
  4. xnamath.h
  5. Abstract:
  6. XNA math library for Windows and Xbox 360
  7. --*/
  8. #if defined(_MSC_VER) && (_MSC_VER > 1000)
  9. #pragma once
  10. #endif
  11. #ifndef __XNAMATH_H__
  12. #define __XNAMATH_H__
  13. #ifdef __XBOXMATH_H__
  14. #error XNAMATH and XBOXMATH are incompatible in the same compilation module. Use one or the other.
  15. #endif
  16. #define XNAMATH_VERSION 203
  17. #if !defined(_XM_X64_) && !defined(_XM_X86_)
  18. #if defined(_M_AMD64) || defined(_AMD64_)
  19. #define _XM_X64_
  20. #elif defined(_M_IX86) || defined(_X86_)
  21. #define _XM_X86_
  22. #endif
  23. #endif
  24. #if !defined(_XM_BIGENDIAN_) && !defined(_XM_LITTLEENDIAN_)
  25. #if defined(_XM_X64_) || defined(_XM_X86_)
  26. #define _XM_LITTLEENDIAN_
  27. #elif defined(_XBOX_VER)
  28. #define _XM_BIGENDIAN_
  29. #else
  30. #error xnamath.h only supports x86, x64, or XBox 360 targets
  31. #endif
  32. #endif
  33. #if defined(_XM_X86_) || defined(_XM_X64_)
  34. #define _XM_SSE_INTRINSICS_
  35. #if !defined(__cplusplus) && !defined(_XM_NO_INTRINSICS_)
  36. #error xnamath.h only supports C compliation for Xbox 360 targets and no intrinsics cases for x86/x64
  37. #endif
  38. #elif defined(_XBOX_VER)
  39. #if !defined(__VMX128_SUPPORTED) && !defined(_XM_NO_INTRINSICS_)
  40. #error xnamath.h requires VMX128 compiler support for XBOX 360
  41. #endif // !__VMX128_SUPPORTED && !_XM_NO_INTRINSICS_
  42. #define _XM_VMX128_INTRINSICS_
  43. #else
  44. #error xnamath.h only supports x86, x64, or XBox 360 targets
  45. #endif
  46. #if defined(_XM_SSE_INTRINSICS_)
  47. #ifndef _XM_NO_INTRINSICS_
  48. #include <xmmintrin.h>
  49. #include <emmintrin.h>
  50. #endif
  51. #elif defined(_XM_VMX128_INTRINSICS_)
  52. #error This version of xnamath.h is for Windows use only
  53. #endif
  54. #if defined(_XM_SSE_INTRINSICS_)
  55. #pragma warning(push)
  56. #pragma warning(disable:4985)
  57. #endif
  58. #include <math.h>
  59. #if defined(_XM_SSE_INTRINSICS_)
  60. #pragma warning(pop)
  61. #endif
  62. #include <sal.h>
  63. #if !defined(XMINLINE)
  64. #if !defined(XM_NO_MISALIGNED_VECTOR_ACCESS)
  65. #define XMINLINE __inline
  66. #else
  67. #define XMINLINE __forceinline
  68. #endif
  69. #endif
  70. #if !defined(XMFINLINE)
  71. #define XMFINLINE __forceinline
  72. #endif
  73. #if !defined(XMDEBUG)
  74. #if defined(_DEBUG)
  75. #define XMDEBUG
  76. #endif
  77. #endif // !XMDEBUG
  78. #if !defined(XMASSERT)
  79. #if defined(_PREFAST_)
  80. #define XMASSERT(Expression) __analysis_assume((Expression))
  81. #elif defined(XMDEBUG) // !_PREFAST_
  82. #define XMASSERT(Expression) ((VOID)((Expression) || (XMAssert(#Expression, __FILE__, __LINE__), 0)))
  83. #else // !XMDEBUG
  84. #define XMASSERT(Expression) ((VOID)0)
  85. #endif // !XMDEBUG
  86. #endif // !XMASSERT
  87. #if !defined(XM_NO_ALIGNMENT)
  88. #define _DECLSPEC_ALIGN_16_ __declspec(align(16))
  89. #else
  90. #define _DECLSPEC_ALIGN_16_
  91. #endif
  92. #if defined(_MSC_VER) && (_MSC_VER<1500) && (_MSC_VER>=1400)
  93. #define _XM_ISVS2005_
  94. #endif
  95. /****************************************************************************
  96. *
  97. * Constant definitions
  98. *
  99. ****************************************************************************/
  100. #define XM_PI 3.141592654f
  101. #define XM_2PI 6.283185307f
  102. #define XM_1DIVPI 0.318309886f
  103. #define XM_1DIV2PI 0.159154943f
  104. #define XM_PIDIV2 1.570796327f
  105. #define XM_PIDIV4 0.785398163f
  106. #define XM_SELECT_0 0x00000000
  107. #define XM_SELECT_1 0xFFFFFFFF
  108. #define XM_PERMUTE_0X 0x00010203
  109. #define XM_PERMUTE_0Y 0x04050607
  110. #define XM_PERMUTE_0Z 0x08090A0B
  111. #define XM_PERMUTE_0W 0x0C0D0E0F
  112. #define XM_PERMUTE_1X 0x10111213
  113. #define XM_PERMUTE_1Y 0x14151617
  114. #define XM_PERMUTE_1Z 0x18191A1B
  115. #define XM_PERMUTE_1W 0x1C1D1E1F
  116. #define XM_CRMASK_CR6 0x000000F0
  117. #define XM_CRMASK_CR6TRUE 0x00000080
  118. #define XM_CRMASK_CR6FALSE 0x00000020
  119. #define XM_CRMASK_CR6BOUNDS XM_CRMASK_CR6FALSE
  120. #define XM_CACHE_LINE_SIZE 64
  121. /****************************************************************************
  122. *
  123. * Macros
  124. *
  125. ****************************************************************************/
  126. // Unit conversion
  127. XMFINLINE FLOAT XMConvertToRadians(FLOAT fDegrees) { return fDegrees * (XM_PI / 180.0f); }
  128. XMFINLINE FLOAT XMConvertToDegrees(FLOAT fRadians) { return fRadians * (180.0f / XM_PI); }
  129. // Condition register evaluation proceeding a recording (Rc) comparison
  130. #define XMComparisonAllTrue(CR) (((CR) & XM_CRMASK_CR6TRUE) == XM_CRMASK_CR6TRUE)
  131. #define XMComparisonAnyTrue(CR) (((CR) & XM_CRMASK_CR6FALSE) != XM_CRMASK_CR6FALSE)
  132. #define XMComparisonAllFalse(CR) (((CR) & XM_CRMASK_CR6FALSE) == XM_CRMASK_CR6FALSE)
  133. #define XMComparisonAnyFalse(CR) (((CR) & XM_CRMASK_CR6TRUE) != XM_CRMASK_CR6TRUE)
  134. #define XMComparisonMixed(CR) (((CR) & XM_CRMASK_CR6) == 0)
  135. #define XMComparisonAllInBounds(CR) (((CR) & XM_CRMASK_CR6BOUNDS) == XM_CRMASK_CR6BOUNDS)
  136. #define XMComparisonAnyOutOfBounds(CR) (((CR) & XM_CRMASK_CR6BOUNDS) != XM_CRMASK_CR6BOUNDS)
  137. #define XMMin(a, b) (((a) < (b)) ? (a) : (b))
  138. #define XMMax(a, b) (((a) > (b)) ? (a) : (b))
  139. /****************************************************************************
  140. *
  141. * Data types
  142. *
  143. ****************************************************************************/
  144. #pragma warning(push)
  145. #pragma warning(disable:4201 4365 4324)
  146. #if !defined (_XM_X86_) && !defined(_XM_X64_)
  147. #pragma bitfield_order(push)
  148. #pragma bitfield_order(lsb_to_msb)
  149. #endif // !_XM_X86_ && !_XM_X64_
  150. #if defined(_XM_NO_INTRINSICS_) && !defined(_XBOX_VER)
  151. // The __vector4 structure is an intrinsic on Xbox but must be separately defined
  152. // for x86/x64
  153. typedef struct __vector4
  154. {
  155. union
  156. {
  157. float vector4_f32[4];
  158. unsigned int vector4_u32[4];
  159. #ifndef XM_STRICT_VECTOR4
  160. struct
  161. {
  162. FLOAT x;
  163. FLOAT y;
  164. FLOAT z;
  165. FLOAT w;
  166. };
  167. FLOAT v[4];
  168. UINT u[4];
  169. #endif // !XM_STRICT_VECTOR4
  170. };
  171. } __vector4;
  172. #endif // _XM_NO_INTRINSICS_
  173. #if (defined (_XM_X86_) || defined(_XM_X64_)) && defined(_XM_NO_INTRINSICS_)
  174. typedef UINT __vector4i[4];
  175. #else
  176. typedef __declspec(align(16)) UINT __vector4i[4];
  177. #endif
  178. // Vector intrinsic: Four 32 bit floating point components aligned on a 16 byte
  179. // boundary and mapped to hardware vector registers
  180. #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
  181. typedef __m128 XMVECTOR;
  182. #else
  183. typedef __vector4 XMVECTOR;
  184. #endif
  185. // Conversion types for constants
  186. typedef _DECLSPEC_ALIGN_16_ struct XMVECTORF32 {
  187. union {
  188. float f[4];
  189. XMVECTOR v;
  190. };
  191. #if defined(__cplusplus)
  192. inline operator XMVECTOR() const { return v; }
  193. #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_)
  194. inline operator __m128i() const { return reinterpret_cast<const __m128i *>(&v)[0]; }
  195. inline operator __m128d() const { return reinterpret_cast<const __m128d *>(&v)[0]; }
  196. #endif
  197. #endif // __cplusplus
  198. } XMVECTORF32;
  199. typedef _DECLSPEC_ALIGN_16_ struct XMVECTORI32 {
  200. union {
  201. INT i[4];
  202. XMVECTOR v;
  203. };
  204. #if defined(__cplusplus)
  205. inline operator XMVECTOR() const { return v; }
  206. #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_)
  207. inline operator __m128i() const { return reinterpret_cast<const __m128i *>(&v)[0]; }
  208. inline operator __m128d() const { return reinterpret_cast<const __m128d *>(&v)[0]; }
  209. #endif
  210. #endif // __cplusplus
  211. } XMVECTORI32;
  212. typedef _DECLSPEC_ALIGN_16_ struct XMVECTORU8 {
  213. union {
  214. BYTE u[16];
  215. XMVECTOR v;
  216. };
  217. #if defined(__cplusplus)
  218. inline operator XMVECTOR() const { return v; }
  219. #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_)
  220. inline operator __m128i() const { return reinterpret_cast<const __m128i *>(&v)[0]; }
  221. inline operator __m128d() const { return reinterpret_cast<const __m128d *>(&v)[0]; }
  222. #endif
  223. #endif // __cplusplus
  224. } XMVECTORU8;
  225. typedef _DECLSPEC_ALIGN_16_ struct XMVECTORU32 {
  226. union {
  227. UINT u[4];
  228. XMVECTOR v;
  229. };
  230. #if defined(__cplusplus)
  231. inline operator XMVECTOR() const { return v; }
  232. #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_)
  233. inline operator __m128i() const { return reinterpret_cast<const __m128i *>(&v)[0]; }
  234. inline operator __m128d() const { return reinterpret_cast<const __m128d *>(&v)[0]; }
  235. #endif
  236. #endif // __cplusplus
  237. } XMVECTORU32;
  238. // Fix-up for (1st-3rd) XMVECTOR parameters that are pass-in-register for x86 and Xbox 360, but not for other targets
  239. #if defined(_XM_VMX128_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
  240. typedef const XMVECTOR FXMVECTOR;
  241. #elif defined(_XM_X86_) && !defined(_XM_NO_INTRINSICS_)
  242. typedef const XMVECTOR FXMVECTOR;
  243. #elif defined(__cplusplus)
  244. typedef const XMVECTOR& FXMVECTOR;
  245. #else
  246. typedef const XMVECTOR FXMVECTOR;
  247. #endif
  248. // Fix-up for (4th+) XMVECTOR parameters to pass in-register for Xbox 360 and by reference otherwise
  249. #if defined(_XM_VMX128_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
  250. typedef const XMVECTOR CXMVECTOR;
  251. #elif defined(__cplusplus)
  252. typedef const XMVECTOR& CXMVECTOR;
  253. #else
  254. typedef const XMVECTOR CXMVECTOR;
  255. #endif
  256. // Vector operators
  257. #if defined(__cplusplus) && !defined(XM_NO_OPERATOR_OVERLOADS)
  258. XMVECTOR operator+ (FXMVECTOR V);
  259. XMVECTOR operator- (FXMVECTOR V);
  260. XMVECTOR& operator+= (XMVECTOR& V1, FXMVECTOR V2);
  261. XMVECTOR& operator-= (XMVECTOR& V1, FXMVECTOR V2);
  262. XMVECTOR& operator*= (XMVECTOR& V1, FXMVECTOR V2);
  263. XMVECTOR& operator/= (XMVECTOR& V1, FXMVECTOR V2);
  264. XMVECTOR& operator*= (XMVECTOR& V, FLOAT S);
  265. XMVECTOR& operator/= (XMVECTOR& V, FLOAT S);
  266. XMVECTOR operator+ (FXMVECTOR V1, FXMVECTOR V2);
  267. XMVECTOR operator- (FXMVECTOR V1, FXMVECTOR V2);
  268. XMVECTOR operator* (FXMVECTOR V1, FXMVECTOR V2);
  269. XMVECTOR operator/ (FXMVECTOR V1, FXMVECTOR V2);
  270. XMVECTOR operator* (FXMVECTOR V, FLOAT S);
  271. XMVECTOR operator* (FLOAT S, FXMVECTOR V);
  272. XMVECTOR operator/ (FXMVECTOR V, FLOAT S);
  273. #endif // __cplusplus && !XM_NO_OPERATOR_OVERLOADS
  274. // Matrix type: Sixteen 32 bit floating point components aligned on a
  275. // 16 byte boundary and mapped to four hardware vector registers
  276. #if (defined(_XM_X86_) || defined(_XM_X64_)) && defined(_XM_NO_INTRINSICS_)
  277. typedef struct _XMMATRIX
  278. #else
  279. typedef _DECLSPEC_ALIGN_16_ struct _XMMATRIX
  280. #endif
  281. {
  282. union
  283. {
  284. XMVECTOR r[4];
  285. struct
  286. {
  287. FLOAT _11, _12, _13, _14;
  288. FLOAT _21, _22, _23, _24;
  289. FLOAT _31, _32, _33, _34;
  290. FLOAT _41, _42, _43, _44;
  291. };
  292. FLOAT m[4][4];
  293. };
  294. #ifdef __cplusplus
  295. _XMMATRIX() {};
  296. _XMMATRIX(FXMVECTOR R0, FXMVECTOR R1, FXMVECTOR R2, CXMVECTOR R3);
  297. _XMMATRIX(FLOAT m00, FLOAT m01, FLOAT m02, FLOAT m03,
  298. FLOAT m10, FLOAT m11, FLOAT m12, FLOAT m13,
  299. FLOAT m20, FLOAT m21, FLOAT m22, FLOAT m23,
  300. FLOAT m30, FLOAT m31, FLOAT m32, FLOAT m33);
  301. _XMMATRIX(CONST FLOAT *pArray);
  302. FLOAT operator() (UINT Row, UINT Column) CONST { return m[Row][Column]; }
  303. FLOAT& operator() (UINT Row, UINT Column) { return m[Row][Column]; }
  304. _XMMATRIX& operator= (CONST _XMMATRIX& M);
  305. #ifndef XM_NO_OPERATOR_OVERLOADS
  306. _XMMATRIX& operator*= (CONST _XMMATRIX& M);
  307. _XMMATRIX operator* (CONST _XMMATRIX& M) CONST;
  308. #endif // !XM_NO_OPERATOR_OVERLOADS
  309. #endif // __cplusplus
  310. } XMMATRIX;
  311. // Fix-up for XMMATRIX parameters to pass in-register on Xbox 360, by reference otherwise
  312. #if defined(_XM_VMX128_INTRINSICS_)
  313. typedef const XMMATRIX CXMMATRIX;
  314. #elif defined(__cplusplus)
  315. typedef const XMMATRIX& CXMMATRIX;
  316. #else
  317. typedef const XMMATRIX CXMMATRIX;
  318. #endif
  319. // 16 bit floating point number consisting of a sign bit, a 5 bit biased
  320. // exponent, and a 10 bit mantissa
  321. //typedef WORD HALF;
  322. typedef USHORT HALF;
  323. // 2D Vector; 32 bit floating point components
  324. typedef struct _XMFLOAT2
  325. {
  326. FLOAT x;
  327. FLOAT y;
  328. #ifdef __cplusplus
  329. _XMFLOAT2() {};
  330. _XMFLOAT2(FLOAT _x, FLOAT _y) : x(_x), y(_y) {};
  331. _XMFLOAT2(CONST FLOAT *pArray);
  332. _XMFLOAT2& operator= (CONST _XMFLOAT2& Float2);
  333. #endif // __cplusplus
  334. } XMFLOAT2;
  335. // 2D Vector; 32 bit floating point components aligned on a 16 byte boundary
  336. #ifdef __cplusplus
  337. __declspec(align(16)) struct XMFLOAT2A : public XMFLOAT2
  338. {
  339. XMFLOAT2A() : XMFLOAT2() {};
  340. XMFLOAT2A(FLOAT _x, FLOAT _y) : XMFLOAT2(_x, _y) {};
  341. XMFLOAT2A(CONST FLOAT *pArray) : XMFLOAT2(pArray) {};
  342. XMFLOAT2A& operator= (CONST XMFLOAT2A& Float2);
  343. };
  344. #else
  345. typedef __declspec(align(16)) XMFLOAT2 XMFLOAT2A;
  346. #endif // __cplusplus
  347. // 2D Vector; 16 bit floating point components
  348. typedef struct _XMHALF2
  349. {
  350. HALF x;
  351. HALF y;
  352. #ifdef __cplusplus
  353. _XMHALF2() {};
  354. _XMHALF2(HALF _x, HALF _y) : x(_x), y(_y) {};
  355. _XMHALF2(CONST HALF *pArray);
  356. _XMHALF2(FLOAT _x, FLOAT _y);
  357. _XMHALF2(CONST FLOAT *pArray);
  358. _XMHALF2& operator= (CONST _XMHALF2& Half2);
  359. #endif // __cplusplus
  360. } XMHALF2;
  361. // 2D Vector; 16 bit signed normalized integer components
  362. typedef struct _XMSHORTN2
  363. {
  364. SHORT x;
  365. SHORT y;
  366. #ifdef __cplusplus
  367. _XMSHORTN2() {};
  368. _XMSHORTN2(SHORT _x, SHORT _y) : x(_x), y(_y) {};
  369. _XMSHORTN2(CONST SHORT *pArray);
  370. _XMSHORTN2(FLOAT _x, FLOAT _y);
  371. _XMSHORTN2(CONST FLOAT *pArray);
  372. _XMSHORTN2& operator= (CONST _XMSHORTN2& ShortN2);
  373. #endif // __cplusplus
  374. } XMSHORTN2;
  375. // 2D Vector; 16 bit signed integer components
  376. typedef struct _XMSHORT2
  377. {
  378. SHORT x;
  379. SHORT y;
  380. #ifdef __cplusplus
  381. _XMSHORT2() {};
  382. _XMSHORT2(SHORT _x, SHORT _y) : x(_x), y(_y) {};
  383. _XMSHORT2(CONST SHORT *pArray);
  384. _XMSHORT2(FLOAT _x, FLOAT _y);
  385. _XMSHORT2(CONST FLOAT *pArray);
  386. _XMSHORT2& operator= (CONST _XMSHORT2& Short2);
  387. #endif // __cplusplus
  388. } XMSHORT2;
  389. // 2D Vector; 16 bit unsigned normalized integer components
  390. typedef struct _XMUSHORTN2
  391. {
  392. USHORT x;
  393. USHORT y;
  394. #ifdef __cplusplus
  395. _XMUSHORTN2() {};
  396. _XMUSHORTN2(USHORT _x, USHORT _y) : x(_x), y(_y) {};
  397. _XMUSHORTN2(CONST USHORT *pArray);
  398. _XMUSHORTN2(FLOAT _x, FLOAT _y);
  399. _XMUSHORTN2(CONST FLOAT *pArray);
  400. _XMUSHORTN2& operator= (CONST _XMUSHORTN2& UShortN2);
  401. #endif // __cplusplus
  402. } XMUSHORTN2;
  403. // 2D Vector; 16 bit unsigned integer components
  404. typedef struct _XMUSHORT2
  405. {
  406. USHORT x;
  407. USHORT y;
  408. #ifdef __cplusplus
  409. _XMUSHORT2() {};
  410. _XMUSHORT2(USHORT _x, USHORT _y) : x(_x), y(_y) {};
  411. _XMUSHORT2(CONST USHORT *pArray);
  412. _XMUSHORT2(FLOAT _x, FLOAT _y);
  413. _XMUSHORT2(CONST FLOAT *pArray);
  414. _XMUSHORT2& operator= (CONST _XMUSHORT2& UShort2);
  415. #endif // __cplusplus
  416. } XMUSHORT2;
  417. // 3D Vector; 32 bit floating point components
  418. typedef struct _XMFLOAT3
  419. {
  420. FLOAT x;
  421. FLOAT y;
  422. FLOAT z;
  423. #ifdef __cplusplus
  424. _XMFLOAT3() {};
  425. _XMFLOAT3(FLOAT _x, FLOAT _y, FLOAT _z) : x(_x), y(_y), z(_z) {};
  426. _XMFLOAT3(CONST FLOAT *pArray);
  427. _XMFLOAT3& operator= (CONST _XMFLOAT3& Float3);
  428. #endif // __cplusplus
  429. } XMFLOAT3;
  430. // 3D Vector; 32 bit floating point components aligned on a 16 byte boundary
  431. #ifdef __cplusplus
  432. __declspec(align(16)) struct XMFLOAT3A : public XMFLOAT3
  433. {
  434. XMFLOAT3A() : XMFLOAT3() {};
  435. XMFLOAT3A(FLOAT _x, FLOAT _y, FLOAT _z) : XMFLOAT3(_x, _y, _z) {};
  436. XMFLOAT3A(CONST FLOAT *pArray) : XMFLOAT3(pArray) {};
  437. XMFLOAT3A& operator= (CONST XMFLOAT3A& Float3);
  438. };
  439. #else
  440. typedef __declspec(align(16)) XMFLOAT3 XMFLOAT3A;
  441. #endif // __cplusplus
  442. // 3D Vector; 11-11-10 bit normalized components packed into a 32 bit integer
  443. // The normalized 3D Vector is packed into 32 bits as follows: a 10 bit signed,
  444. // normalized integer for the z component and 11 bit signed, normalized
  445. // integers for the x and y components. The z component is stored in the
  446. // most significant bits and the x component in the least significant bits
  447. // (Z10Y11X11): [32] zzzzzzzz zzyyyyyy yyyyyxxx xxxxxxxx [0]
  448. typedef struct _XMHENDN3
  449. {
  450. union
  451. {
  452. struct
  453. {
  454. INT x : 11; // -1023/1023 to 1023/1023
  455. INT y : 11; // -1023/1023 to 1023/1023
  456. INT z : 10; // -511/511 to 511/511
  457. };
  458. UINT v;
  459. };
  460. #ifdef __cplusplus
  461. _XMHENDN3() {};
  462. _XMHENDN3(UINT Packed) : v(Packed) {};
  463. _XMHENDN3(FLOAT _x, FLOAT _y, FLOAT _z);
  464. _XMHENDN3(CONST FLOAT *pArray);
  465. operator UINT () { return v; }
  466. _XMHENDN3& operator= (CONST _XMHENDN3& HenDN3);
  467. _XMHENDN3& operator= (CONST UINT Packed);
  468. #endif // __cplusplus
  469. } XMHENDN3;
  470. // 3D Vector; 11-11-10 bit components packed into a 32 bit integer
  471. // The 3D Vector is packed into 32 bits as follows: a 10 bit signed,
  472. // integer for the z component and 11 bit signed integers for the
  473. // x and y components. The z component is stored in the
  474. // most significant bits and the x component in the least significant bits
  475. // (Z10Y11X11): [32] zzzzzzzz zzyyyyyy yyyyyxxx xxxxxxxx [0]
  476. typedef struct _XMHEND3
  477. {
  478. union
  479. {
  480. struct
  481. {
  482. INT x : 11; // -1023 to 1023
  483. INT y : 11; // -1023 to 1023
  484. INT z : 10; // -511 to 511
  485. };
  486. UINT v;
  487. };
  488. #ifdef __cplusplus
  489. _XMHEND3() {};
  490. _XMHEND3(UINT Packed) : v(Packed) {};
  491. _XMHEND3(FLOAT _x, FLOAT _y, FLOAT _z);
  492. _XMHEND3(CONST FLOAT *pArray);
  493. operator UINT () { return v; }
  494. _XMHEND3& operator= (CONST _XMHEND3& HenD3);
  495. _XMHEND3& operator= (CONST UINT Packed);
  496. #endif // __cplusplus
  497. } XMHEND3;
  498. // 3D Vector; 11-11-10 bit normalized components packed into a 32 bit integer
  499. // The normalized 3D Vector is packed into 32 bits as follows: a 10 bit unsigned,
  500. // normalized integer for the z component and 11 bit unsigned, normalized
  501. // integers for the x and y components. The z component is stored in the
  502. // most significant bits and the x component in the least significant bits
  503. // (Z10Y11X11): [32] zzzzzzzz zzyyyyyy yyyyyxxx xxxxxxxx [0]
  504. typedef struct _XMUHENDN3
  505. {
  506. union
  507. {
  508. struct
  509. {
  510. UINT x : 11; // 0/2047 to 2047/2047
  511. UINT y : 11; // 0/2047 to 2047/2047
  512. UINT z : 10; // 0/1023 to 1023/1023
  513. };
  514. UINT v;
  515. };
  516. #ifdef __cplusplus
  517. _XMUHENDN3() {};
  518. _XMUHENDN3(UINT Packed) : v(Packed) {};
  519. _XMUHENDN3(FLOAT _x, FLOAT _y, FLOAT _z);
  520. _XMUHENDN3(CONST FLOAT *pArray);
  521. operator UINT () { return v; }
  522. _XMUHENDN3& operator= (CONST _XMUHENDN3& UHenDN3);
  523. _XMUHENDN3& operator= (CONST UINT Packed);
  524. #endif // __cplusplus
  525. } XMUHENDN3;
  526. // 3D Vector; 11-11-10 bit components packed into a 32 bit integer
  527. // The 3D Vector is packed into 32 bits as follows: a 10 bit unsigned
  528. // integer for the z component and 11 bit unsigned integers
  529. // for the x and y components. The z component is stored in the
  530. // most significant bits and the x component in the least significant bits
  531. // (Z10Y11X11): [32] zzzzzzzz zzyyyyyy yyyyyxxx xxxxxxxx [0]
  532. typedef struct _XMUHEND3
  533. {
  534. union
  535. {
  536. struct
  537. {
  538. UINT x : 11; // 0 to 2047
  539. UINT y : 11; // 0 to 2047
  540. UINT z : 10; // 0 to 1023
  541. };
  542. UINT v;
  543. };
  544. #ifdef __cplusplus
  545. _XMUHEND3() {};
  546. _XMUHEND3(UINT Packed) : v(Packed) {};
  547. _XMUHEND3(FLOAT _x, FLOAT _y, FLOAT _z);
  548. _XMUHEND3(CONST FLOAT *pArray);
  549. operator UINT () { return v; }
  550. _XMUHEND3& operator= (CONST _XMUHEND3& UHenD3);
  551. _XMUHEND3& operator= (CONST UINT Packed);
  552. #endif // __cplusplus
  553. } XMUHEND3;
  554. // 3D Vector; 10-11-11 bit normalized components packed into a 32 bit integer
  555. // The normalized 3D Vector is packed into 32 bits as follows: a 10 bit signed,
  556. // normalized integer for the x component and 11 bit signed, normalized
  557. // integers for the y and z components. The z component is stored in the
  558. // most significant bits and the x component in the least significant bits
  559. // (Z11Y11X10): [32] zzzzzzzz zzzyyyyy yyyyyyxx xxxxxxxx [0]
  560. typedef struct _XMDHENN3
  561. {
  562. union
  563. {
  564. struct
  565. {
  566. INT x : 10; // -511/511 to 511/511
  567. INT y : 11; // -1023/1023 to 1023/1023
  568. INT z : 11; // -1023/1023 to 1023/1023
  569. };
  570. UINT v;
  571. };
  572. #ifdef __cplusplus
  573. _XMDHENN3() {};
  574. _XMDHENN3(UINT Packed) : v(Packed) {};
  575. _XMDHENN3(FLOAT _x, FLOAT _y, FLOAT _z);
  576. _XMDHENN3(CONST FLOAT *pArray);
  577. operator UINT () { return v; }
  578. _XMDHENN3& operator= (CONST _XMDHENN3& DHenN3);
  579. _XMDHENN3& operator= (CONST UINT Packed);
  580. #endif // __cplusplus
  581. } XMDHENN3;
  582. // 3D Vector; 10-11-11 bit components packed into a 32 bit integer
  583. // The 3D Vector is packed into 32 bits as follows: a 10 bit signed,
  584. // integer for the x component and 11 bit signed integers for the
  585. // y and z components. The w component is stored in the
  586. // most significant bits and the x component in the least significant bits
  587. // (Z11Y11X10): [32] zzzzzzzz zzzyyyyy yyyyyyxx xxxxxxxx [0]
  588. typedef struct _XMDHEN3
  589. {
  590. union
  591. {
  592. struct
  593. {
  594. INT x : 10; // -511 to 511
  595. INT y : 11; // -1023 to 1023
  596. INT z : 11; // -1023 to 1023
  597. };
  598. UINT v;
  599. };
  600. #ifdef __cplusplus
  601. _XMDHEN3() {};
  602. _XMDHEN3(UINT Packed) : v(Packed) {};
  603. _XMDHEN3(FLOAT _x, FLOAT _y, FLOAT _z);
  604. _XMDHEN3(CONST FLOAT *pArray);
  605. operator UINT () { return v; }
  606. _XMDHEN3& operator= (CONST _XMDHEN3& DHen3);
  607. _XMDHEN3& operator= (CONST UINT Packed);
  608. #endif // __cplusplus
  609. } XMDHEN3;
  610. // 3D Vector; 10-11-11 bit normalized components packed into a 32 bit integer
  611. // The normalized 3D Vector is packed into 32 bits as follows: a 10 bit unsigned,
  612. // normalized integer for the x component and 11 bit unsigned, normalized
  613. // integers for the y and z components. The w component is stored in the
  614. // most significant bits and the x component in the least significant bits
  615. // (Z11Y11X10): [32] zzzzzzzz zzzyyyyy yyyyyyxx xxxxxxxx [0]
  616. typedef struct _XMUDHENN3
  617. {
  618. union
  619. {
  620. struct
  621. {
  622. UINT x : 10; // 0/1023 to 1023/1023
  623. UINT y : 11; // 0/2047 to 2047/2047
  624. UINT z : 11; // 0/2047 to 2047/2047
  625. };
  626. UINT v;
  627. };
  628. #ifdef __cplusplus
  629. _XMUDHENN3() {};
  630. _XMUDHENN3(UINT Packed) : v(Packed) {};
  631. _XMUDHENN3(FLOAT _x, FLOAT _y, FLOAT _z);
  632. _XMUDHENN3(CONST FLOAT *pArray);
  633. operator UINT () { return v; }
  634. _XMUDHENN3& operator= (CONST _XMUDHENN3& UDHenN3);
  635. _XMUDHENN3& operator= (CONST UINT Packed);
  636. #endif // __cplusplus
  637. } XMUDHENN3;
  638. // 3D Vector; 10-11-11 bit components packed into a 32 bit integer
  639. // The 3D Vector is packed into 32 bits as follows: a 10 bit unsigned,
  640. // integer for the x component and 11 bit unsigned integers
  641. // for the y and z components. The w component is stored in the
  642. // most significant bits and the x component in the least significant bits
  643. // (Z11Y11X10): [32] zzzzzzzz zzzyyyyy yyyyyyxx xxxxxxxx [0]
  644. typedef struct _XMUDHEN3
  645. {
  646. union
  647. {
  648. struct
  649. {
  650. UINT x : 10; // 0 to 1023
  651. UINT y : 11; // 0 to 2047
  652. UINT z : 11; // 0 to 2047
  653. };
  654. UINT v;
  655. };
  656. #ifdef __cplusplus
  657. _XMUDHEN3() {};
  658. _XMUDHEN3(UINT Packed) : v(Packed) {};
  659. _XMUDHEN3(FLOAT _x, FLOAT _y, FLOAT _z);
  660. _XMUDHEN3(CONST FLOAT *pArray);
  661. operator UINT () { return v; }
  662. _XMUDHEN3& operator= (CONST _XMUDHEN3& UDHen3);
  663. _XMUDHEN3& operator= (CONST UINT Packed);
  664. #endif // __cplusplus
  665. } XMUDHEN3;
  666. // 3D vector: 5/6/5 unsigned integer components
  667. typedef struct _XMU565
  668. {
  669. union
  670. {
  671. struct
  672. {
  673. USHORT x : 5;
  674. USHORT y : 6;
  675. USHORT z : 5;
  676. };
  677. USHORT v;
  678. };
  679. #ifdef __cplusplus
  680. _XMU565() {};
  681. _XMU565(USHORT Packed) : v(Packed) {};
  682. _XMU565(CHAR _x, CHAR _y, CHAR _z) : x(_x), y(_y), z(_z) {};
  683. _XMU565(CONST CHAR *pArray);
  684. _XMU565(FLOAT _x, FLOAT _y, FLOAT _z);
  685. _XMU565(CONST FLOAT *pArray);
  686. operator USHORT () { return v; }
  687. _XMU565& operator= (CONST _XMU565& U565);
  688. _XMU565& operator= (CONST USHORT Packed);
  689. #endif // __cplusplus
  690. } XMU565;
  691. // 3D vector: 11/11/10 floating-point components
  692. // The 3D vector is packed into 32 bits as follows: a 5-bit biased exponent
  693. // and 6-bit mantissa for x component, a 5-bit biased exponent and
  694. // 6-bit mantissa for y component, a 5-bit biased exponent and a 5-bit
  695. // mantissa for z. The z component is stored in the most significant bits
  696. // and the x component in the least significant bits. No sign bits so
  697. // all partial-precision numbers are positive.
  698. // (Z10Y11X11): [32] ZZZZZzzz zzzYYYYY yyyyyyXX XXXxxxxx [0]
  699. typedef struct _XMFLOAT3PK
  700. {
  701. union
  702. {
  703. struct
  704. {
  705. UINT xm : 6;
  706. UINT xe : 5;
  707. UINT ym : 6;
  708. UINT ye : 5;
  709. UINT zm : 5;
  710. UINT ze : 5;
  711. };
  712. UINT v;
  713. };
  714. #ifdef __cplusplus
  715. _XMFLOAT3PK() {};
  716. _XMFLOAT3PK(UINT Packed) : v(Packed) {};
  717. _XMFLOAT3PK(FLOAT _x, FLOAT _y, FLOAT _z);
  718. _XMFLOAT3PK(CONST FLOAT *pArray);
  719. operator UINT () { return v; }
  720. _XMFLOAT3PK& operator= (CONST _XMFLOAT3PK& float3pk);
  721. _XMFLOAT3PK& operator= (CONST UINT Packed);
  722. #endif // __cplusplus
  723. } XMFLOAT3PK;
  724. // 3D vector: 9/9/9 floating-point components with shared 5-bit exponent
  725. // The 3D vector is packed into 32 bits as follows: a 5-bit biased exponent
  726. // with 9-bit mantissa for the x, y, and z component. The shared exponent
  727. // is stored in the most significant bits and the x component mantissa is in
  728. // the least significant bits. No sign bits so all partial-precision numbers
  729. // are positive.
  730. // (E5Z9Y9X9): [32] EEEEEzzz zzzzzzyy yyyyyyyx xxxxxxxx [0]
  731. typedef struct _XMFLOAT3SE
  732. {
  733. union
  734. {
  735. struct
  736. {
  737. UINT xm : 9;
  738. UINT ym : 9;
  739. UINT zm : 9;
  740. UINT e : 5;
  741. };
  742. UINT v;
  743. };
  744. #ifdef __cplusplus
  745. _XMFLOAT3SE() {};
  746. _XMFLOAT3SE(UINT Packed) : v(Packed) {};
  747. _XMFLOAT3SE(FLOAT _x, FLOAT _y, FLOAT _z);
  748. _XMFLOAT3SE(CONST FLOAT *pArray);
  749. operator UINT () { return v; }
  750. _XMFLOAT3SE& operator= (CONST _XMFLOAT3SE& float3se);
  751. _XMFLOAT3SE& operator= (CONST UINT Packed);
  752. #endif // __cplusplus
  753. } XMFLOAT3SE;
  754. // 4D Vector; 32 bit floating point components
  755. typedef struct _XMFLOAT4
  756. {
  757. FLOAT x;
  758. FLOAT y;
  759. FLOAT z;
  760. FLOAT w;
  761. #ifdef __cplusplus
  762. _XMFLOAT4() {};
  763. _XMFLOAT4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w) : x(_x), y(_y), z(_z), w(_w) {};
  764. _XMFLOAT4(CONST FLOAT *pArray);
  765. _XMFLOAT4& operator= (CONST _XMFLOAT4& Float4);
  766. #endif // __cplusplus
  767. } XMFLOAT4;
  768. // 4D Vector; 32 bit floating point components aligned on a 16 byte boundary
  769. #ifdef __cplusplus
  770. __declspec(align(16)) struct XMFLOAT4A : public XMFLOAT4
  771. {
  772. XMFLOAT4A() : XMFLOAT4() {};
  773. XMFLOAT4A(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w) : XMFLOAT4(_x, _y, _z, _w) {};
  774. XMFLOAT4A(CONST FLOAT *pArray) : XMFLOAT4(pArray) {};
  775. XMFLOAT4A& operator= (CONST XMFLOAT4A& Float4);
  776. };
  777. #else
  778. typedef __declspec(align(16)) XMFLOAT4 XMFLOAT4A;
  779. #endif // __cplusplus
  780. // 4D Vector; 16 bit floating point components
  781. typedef struct _XMHALF4
  782. {
  783. HALF x;
  784. HALF y;
  785. HALF z;
  786. HALF w;
  787. #ifdef __cplusplus
  788. _XMHALF4() {};
  789. _XMHALF4(HALF _x, HALF _y, HALF _z, HALF _w) : x(_x), y(_y), z(_z), w(_w) {};
  790. _XMHALF4(CONST HALF *pArray);
  791. _XMHALF4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  792. _XMHALF4(CONST FLOAT *pArray);
  793. _XMHALF4& operator= (CONST _XMHALF4& Half4);
  794. #endif // __cplusplus
  795. } XMHALF4;
  796. // 4D Vector; 16 bit signed normalized integer components
  797. typedef struct _XMSHORTN4
  798. {
  799. SHORT x;
  800. SHORT y;
  801. SHORT z;
  802. SHORT w;
  803. #ifdef __cplusplus
  804. _XMSHORTN4() {};
  805. _XMSHORTN4(SHORT _x, SHORT _y, SHORT _z, SHORT _w) : x(_x), y(_y), z(_z), w(_w) {};
  806. _XMSHORTN4(CONST SHORT *pArray);
  807. _XMSHORTN4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  808. _XMSHORTN4(CONST FLOAT *pArray);
  809. _XMSHORTN4& operator= (CONST _XMSHORTN4& ShortN4);
  810. #endif // __cplusplus
  811. } XMSHORTN4;
  812. // 4D Vector; 16 bit signed integer components
  813. typedef struct _XMSHORT4
  814. {
  815. SHORT x;
  816. SHORT y;
  817. SHORT z;
  818. SHORT w;
  819. #ifdef __cplusplus
  820. _XMSHORT4() {};
  821. _XMSHORT4(SHORT _x, SHORT _y, SHORT _z, SHORT _w) : x(_x), y(_y), z(_z), w(_w) {};
  822. _XMSHORT4(CONST SHORT *pArray);
  823. _XMSHORT4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  824. _XMSHORT4(CONST FLOAT *pArray);
  825. _XMSHORT4& operator= (CONST _XMSHORT4& Short4);
  826. #endif // __cplusplus
  827. } XMSHORT4;
  828. // 4D Vector; 16 bit unsigned normalized integer components
  829. typedef struct _XMUSHORTN4
  830. {
  831. USHORT x;
  832. USHORT y;
  833. USHORT z;
  834. USHORT w;
  835. #ifdef __cplusplus
  836. _XMUSHORTN4() {};
  837. _XMUSHORTN4(USHORT _x, USHORT _y, USHORT _z, USHORT _w) : x(_x), y(_y), z(_z), w(_w) {};
  838. _XMUSHORTN4(CONST USHORT *pArray);
  839. _XMUSHORTN4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  840. _XMUSHORTN4(CONST FLOAT *pArray);
  841. _XMUSHORTN4& operator= (CONST _XMUSHORTN4& UShortN4);
  842. #endif // __cplusplus
  843. } XMUSHORTN4;
  844. // 4D Vector; 16 bit unsigned integer components
  845. typedef struct _XMUSHORT4
  846. {
  847. USHORT x;
  848. USHORT y;
  849. USHORT z;
  850. USHORT w;
  851. #ifdef __cplusplus
  852. _XMUSHORT4() {};
  853. _XMUSHORT4(USHORT _x, USHORT _y, USHORT _z, USHORT _w) : x(_x), y(_y), z(_z), w(_w) {};
  854. _XMUSHORT4(CONST USHORT *pArray);
  855. _XMUSHORT4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  856. _XMUSHORT4(CONST FLOAT *pArray);
  857. _XMUSHORT4& operator= (CONST _XMUSHORT4& UShort4);
  858. #endif // __cplusplus
  859. } XMUSHORT4;
  860. // 4D Vector; 10-10-10-2 bit normalized components packed into a 32 bit integer
  861. // The normalized 4D Vector is packed into 32 bits as follows: a 2 bit unsigned,
  862. // normalized integer for the w component and 10 bit signed, normalized
  863. // integers for the z, y, and x components. The w component is stored in the
  864. // most significant bits and the x component in the least significant bits
  865. // (W2Z10Y10X10): [32] wwzzzzzz zzzzyyyy yyyyyyxx xxxxxxxx [0]
  866. typedef struct _XMXDECN4
  867. {
  868. union
  869. {
  870. struct
  871. {
  872. INT x : 10; // -511/511 to 511/511
  873. INT y : 10; // -511/511 to 511/511
  874. INT z : 10; // -511/511 to 511/511
  875. UINT w : 2; // 0/3 to 3/3
  876. };
  877. UINT v;
  878. };
  879. #ifdef __cplusplus
  880. _XMXDECN4() {};
  881. _XMXDECN4(UINT Packed) : v(Packed) {};
  882. _XMXDECN4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  883. _XMXDECN4(CONST FLOAT *pArray);
  884. operator UINT () { return v; }
  885. _XMXDECN4& operator= (CONST _XMXDECN4& XDecN4);
  886. _XMXDECN4& operator= (CONST UINT Packed);
  887. #endif // __cplusplus
  888. } XMXDECN4;
  889. // 4D Vector; 10-10-10-2 bit components packed into a 32 bit integer
  890. // The normalized 4D Vector is packed into 32 bits as follows: a 2 bit unsigned
  891. // integer for the w component and 10 bit signed integers for the
  892. // z, y, and x components. The w component is stored in the
  893. // most significant bits and the x component in the least significant bits
  894. // (W2Z10Y10X10): [32] wwzzzzzz zzzzyyyy yyyyyyxx xxxxxxxx [0]
  895. typedef struct _XMXDEC4
  896. {
  897. union
  898. {
  899. struct
  900. {
  901. INT x : 10; // -511 to 511
  902. INT y : 10; // -511 to 511
  903. INT z : 10; // -511 to 511
  904. UINT w : 2; // 0 to 3
  905. };
  906. UINT v;
  907. };
  908. #ifdef __cplusplus
  909. _XMXDEC4() {};
  910. _XMXDEC4(UINT Packed) : v(Packed) {};
  911. _XMXDEC4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  912. _XMXDEC4(CONST FLOAT *pArray);
  913. operator UINT () { return v; }
  914. _XMXDEC4& operator= (CONST _XMXDEC4& XDec4);
  915. _XMXDEC4& operator= (CONST UINT Packed);
  916. #endif // __cplusplus
  917. } XMXDEC4;
  918. // 4D Vector; 10-10-10-2 bit normalized components packed into a 32 bit integer
  919. // The normalized 4D Vector is packed into 32 bits as follows: a 2 bit signed,
  920. // normalized integer for the w component and 10 bit signed, normalized
  921. // integers for the z, y, and x components. The w component is stored in the
  922. // most significant bits and the x component in the least significant bits
  923. // (W2Z10Y10X10): [32] wwzzzzzz zzzzyyyy yyyyyyxx xxxxxxxx [0]
  924. typedef struct _XMDECN4
  925. {
  926. union
  927. {
  928. struct
  929. {
  930. INT x : 10; // -511/511 to 511/511
  931. INT y : 10; // -511/511 to 511/511
  932. INT z : 10; // -511/511 to 511/511
  933. INT w : 2; // -1/1 to 1/1
  934. };
  935. UINT v;
  936. };
  937. #ifdef __cplusplus
  938. _XMDECN4() {};
  939. _XMDECN4(UINT Packed) : v(Packed) {};
  940. _XMDECN4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  941. _XMDECN4(CONST FLOAT *pArray);
  942. operator UINT () { return v; }
  943. _XMDECN4& operator= (CONST _XMDECN4& DecN4);
  944. _XMDECN4& operator= (CONST UINT Packed);
  945. #endif // __cplusplus
  946. } XMDECN4;
  947. // 4D Vector; 10-10-10-2 bit components packed into a 32 bit integer
  948. // The 4D Vector is packed into 32 bits as follows: a 2 bit signed,
  949. // integer for the w component and 10 bit signed integers for the
  950. // z, y, and x components. The w component is stored in the
  951. // most significant bits and the x component in the least significant bits
  952. // (W2Z10Y10X10): [32] wwzzzzzz zzzzyyyy yyyyyyxx xxxxxxxx [0]
  953. typedef struct _XMDEC4
  954. {
  955. union
  956. {
  957. struct
  958. {
  959. INT x : 10; // -511 to 511
  960. INT y : 10; // -511 to 511
  961. INT z : 10; // -511 to 511
  962. INT w : 2; // -1 to 1
  963. };
  964. UINT v;
  965. };
  966. #ifdef __cplusplus
  967. _XMDEC4() {};
  968. _XMDEC4(UINT Packed) : v(Packed) {};
  969. _XMDEC4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  970. _XMDEC4(CONST FLOAT *pArray);
  971. operator UINT () { return v; }
  972. _XMDEC4& operator= (CONST _XMDEC4& Dec4);
  973. _XMDEC4& operator= (CONST UINT Packed);
  974. #endif // __cplusplus
  975. } XMDEC4;
  976. // 4D Vector; 10-10-10-2 bit normalized components packed into a 32 bit integer
  977. // The normalized 4D Vector is packed into 32 bits as follows: a 2 bit unsigned,
  978. // normalized integer for the w component and 10 bit unsigned, normalized
  979. // integers for the z, y, and x components. The w component is stored in the
  980. // most significant bits and the x component in the least significant bits
  981. // (W2Z10Y10X10): [32] wwzzzzzz zzzzyyyy yyyyyyxx xxxxxxxx [0]
  982. typedef struct _XMUDECN4
  983. {
  984. union
  985. {
  986. struct
  987. {
  988. UINT x : 10; // 0/1023 to 1023/1023
  989. UINT y : 10; // 0/1023 to 1023/1023
  990. UINT z : 10; // 0/1023 to 1023/1023
  991. UINT w : 2; // 0/3 to 3/3
  992. };
  993. UINT v;
  994. };
  995. #ifdef __cplusplus
  996. _XMUDECN4() {};
  997. _XMUDECN4(UINT Packed) : v(Packed) {};
  998. _XMUDECN4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  999. _XMUDECN4(CONST FLOAT *pArray);
  1000. operator UINT () { return v; }
  1001. _XMUDECN4& operator= (CONST _XMUDECN4& UDecN4);
  1002. _XMUDECN4& operator= (CONST UINT Packed);
  1003. #endif // __cplusplus
  1004. } XMUDECN4;
  1005. // 4D Vector; 10-10-10-2 bit components packed into a 32 bit integer
  1006. // The 4D Vector is packed into 32 bits as follows: a 2 bit unsigned,
  1007. // integer for the w component and 10 bit unsigned integers
  1008. // for the z, y, and x components. The w component is stored in the
  1009. // most significant bits and the x component in the least significant bits
  1010. // (W2Z10Y10X10): [32] wwzzzzzz zzzzyyyy yyyyyyxx xxxxxxxx [0]
  1011. typedef struct _XMUDEC4
  1012. {
  1013. union
  1014. {
  1015. struct
  1016. {
  1017. UINT x : 10; // 0 to 1023
  1018. UINT y : 10; // 0 to 1023
  1019. UINT z : 10; // 0 to 1023
  1020. UINT w : 2; // 0 to 3
  1021. };
  1022. UINT v;
  1023. };
  1024. #ifdef __cplusplus
  1025. _XMUDEC4() {};
  1026. _XMUDEC4(UINT Packed) : v(Packed) {};
  1027. _XMUDEC4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1028. _XMUDEC4(CONST FLOAT *pArray);
  1029. operator UINT () { return v; }
  1030. _XMUDEC4& operator= (CONST _XMUDEC4& UDec4);
  1031. _XMUDEC4& operator= (CONST UINT Packed);
  1032. #endif // __cplusplus
  1033. } XMUDEC4;
  1034. // 4D Vector; 20-20-20-4 bit normalized components packed into a 64 bit integer
  1035. // The normalized 4D Vector is packed into 64 bits as follows: a 4 bit unsigned,
  1036. // normalized integer for the w component and 20 bit signed, normalized
  1037. // integers for the z, y, and x components. The w component is stored in the
  1038. // most significant bits and the x component in the least significant bits
  1039. // (W4Z20Y20X20): [64] wwwwzzzz zzzzzzzz zzzzzzzz yyyyyyyy yyyyyyyy yyyyxxxx xxxxxxxx xxxxxxxx [0]
  1040. typedef struct _XMXICON4
  1041. {
  1042. union
  1043. {
  1044. struct
  1045. {
  1046. INT64 x : 20; // -524287/524287 to 524287/524287
  1047. INT64 y : 20; // -524287/524287 to 524287/524287
  1048. INT64 z : 20; // -524287/524287 to 524287/524287
  1049. UINT64 w : 4; // 0/15 to 15/15
  1050. };
  1051. UINT64 v;
  1052. };
  1053. #ifdef __cplusplus
  1054. _XMXICON4() {};
  1055. _XMXICON4(UINT64 Packed) : v(Packed) {};
  1056. _XMXICON4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1057. _XMXICON4(CONST FLOAT *pArray);
  1058. operator UINT64 () { return v; }
  1059. _XMXICON4& operator= (CONST _XMXICON4& XIcoN4);
  1060. _XMXICON4& operator= (CONST UINT64 Packed);
  1061. #endif // __cplusplus
  1062. } XMXICON4;
  1063. // 4D Vector; 20-20-20-4 bit components packed into a 64 bit integer
  1064. // The 4D Vector is packed into 64 bits as follows: a 4 bit unsigned
  1065. // integer for the w component and 20 bit signed integers for the
  1066. // z, y, and x components. The w component is stored in the
  1067. // most significant bits and the x component in the least significant bits
  1068. // (W4Z20Y20X20): [64] wwwwzzzz zzzzzzzz zzzzzzzz yyyyyyyy yyyyyyyy yyyyxxxx xxxxxxxx xxxxxxxx [0]
  1069. typedef struct _XMXICO4
  1070. {
  1071. union
  1072. {
  1073. struct
  1074. {
  1075. INT64 x : 20; // -524287 to 524287
  1076. INT64 y : 20; // -524287 to 524287
  1077. INT64 z : 20; // -524287 to 524287
  1078. UINT64 w : 4; // 0 to 15
  1079. };
  1080. UINT64 v;
  1081. };
  1082. #ifdef __cplusplus
  1083. _XMXICO4() {};
  1084. _XMXICO4(UINT64 Packed) : v(Packed) {};
  1085. _XMXICO4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1086. _XMXICO4(CONST FLOAT *pArray);
  1087. operator UINT64 () { return v; }
  1088. _XMXICO4& operator= (CONST _XMXICO4& XIco4);
  1089. _XMXICO4& operator= (CONST UINT64 Packed);
  1090. #endif // __cplusplus
  1091. } XMXICO4;
  1092. // 4D Vector; 20-20-20-4 bit normalized components packed into a 64 bit integer
  1093. // The normalized 4D Vector is packed into 64 bits as follows: a 4 bit signed,
  1094. // normalized integer for the w component and 20 bit signed, normalized
  1095. // integers for the z, y, and x components. The w component is stored in the
  1096. // most significant bits and the x component in the least significant bits
  1097. // (W4Z20Y20X20): [64] wwwwzzzz zzzzzzzz zzzzzzzz yyyyyyyy yyyyyyyy yyyyxxxx xxxxxxxx xxxxxxxx [0]
  1098. typedef struct _XMICON4
  1099. {
  1100. union
  1101. {
  1102. struct
  1103. {
  1104. INT64 x : 20; // -524287/524287 to 524287/524287
  1105. INT64 y : 20; // -524287/524287 to 524287/524287
  1106. INT64 z : 20; // -524287/524287 to 524287/524287
  1107. INT64 w : 4; // -7/7 to 7/7
  1108. };
  1109. UINT64 v;
  1110. };
  1111. #ifdef __cplusplus
  1112. _XMICON4() {};
  1113. _XMICON4(UINT64 Packed) : v(Packed) {};
  1114. _XMICON4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1115. _XMICON4(CONST FLOAT *pArray);
  1116. operator UINT64 () { return v; }
  1117. _XMICON4& operator= (CONST _XMICON4& IcoN4);
  1118. _XMICON4& operator= (CONST UINT64 Packed);
  1119. #endif // __cplusplus
  1120. } XMICON4;
  1121. // 4D Vector; 20-20-20-4 bit components packed into a 64 bit integer
  1122. // The 4D Vector is packed into 64 bits as follows: a 4 bit signed,
  1123. // integer for the w component and 20 bit signed integers for the
  1124. // z, y, and x components. The w component is stored in the
  1125. // most significant bits and the x component in the least significant bits
  1126. // (W4Z20Y20X20): [64] wwwwzzzz zzzzzzzz zzzzzzzz yyyyyyyy yyyyyyyy yyyyxxxx xxxxxxxx xxxxxxxx [0]
  1127. typedef struct _XMICO4
  1128. {
  1129. union
  1130. {
  1131. struct
  1132. {
  1133. INT64 x : 20; // -524287 to 524287
  1134. INT64 y : 20; // -524287 to 524287
  1135. INT64 z : 20; // -524287 to 524287
  1136. INT64 w : 4; // -7 to 7
  1137. };
  1138. UINT64 v;
  1139. };
  1140. #ifdef __cplusplus
  1141. _XMICO4() {};
  1142. _XMICO4(UINT64 Packed) : v(Packed) {};
  1143. _XMICO4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1144. _XMICO4(CONST FLOAT *pArray);
  1145. operator UINT64 () { return v; }
  1146. _XMICO4& operator= (CONST _XMICO4& Ico4);
  1147. _XMICO4& operator= (CONST UINT64 Packed);
  1148. #endif // __cplusplus
  1149. } XMICO4;
  1150. // 4D Vector; 20-20-20-4 bit normalized components packed into a 64 bit integer
  1151. // The normalized 4D Vector is packed into 64 bits as follows: a 4 bit unsigned,
  1152. // normalized integer for the w component and 20 bit unsigned, normalized
  1153. // integers for the z, y, and x components. The w component is stored in the
  1154. // most significant bits and the x component in the least significant bits
  1155. // (W4Z20Y20X20): [64] wwwwzzzz zzzzzzzz zzzzzzzz yyyyyyyy yyyyyyyy yyyyxxxx xxxxxxxx xxxxxxxx [0]
  1156. typedef struct _XMUICON4
  1157. {
  1158. union
  1159. {
  1160. struct
  1161. {
  1162. UINT64 x : 20; // 0/1048575 to 1048575/1048575
  1163. UINT64 y : 20; // 0/1048575 to 1048575/1048575
  1164. UINT64 z : 20; // 0/1048575 to 1048575/1048575
  1165. UINT64 w : 4; // 0/15 to 15/15
  1166. };
  1167. UINT64 v;
  1168. };
  1169. #ifdef __cplusplus
  1170. _XMUICON4() {};
  1171. _XMUICON4(UINT64 Packed) : v(Packed) {};
  1172. _XMUICON4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1173. _XMUICON4(CONST FLOAT *pArray);
  1174. operator UINT64 () { return v; }
  1175. _XMUICON4& operator= (CONST _XMUICON4& UIcoN4);
  1176. _XMUICON4& operator= (CONST UINT64 Packed);
  1177. #endif // __cplusplus
  1178. } XMUICON4;
  1179. // 4D Vector; 20-20-20-4 bit components packed into a 64 bit integer
  1180. // The 4D Vector is packed into 64 bits as follows: a 4 bit unsigned
  1181. // integer for the w component and 20 bit unsigned integers for the
  1182. // z, y, and x components. The w component is stored in the
  1183. // most significant bits and the x component in the least significant bits
  1184. // (W4Z20Y20X20): [64] wwwwzzzz zzzzzzzz zzzzzzzz yyyyyyyy yyyyyyyy yyyyxxxx xxxxxxxx xxxxxxxx [0]
  1185. typedef struct _XMUICO4
  1186. {
  1187. union
  1188. {
  1189. struct
  1190. {
  1191. UINT64 x : 20; // 0 to 1048575
  1192. UINT64 y : 20; // 0 to 1048575
  1193. UINT64 z : 20; // 0 to 1048575
  1194. UINT64 w : 4; // 0 to 15
  1195. };
  1196. UINT64 v;
  1197. };
  1198. #ifdef __cplusplus
  1199. _XMUICO4() {};
  1200. _XMUICO4(UINT64 Packed) : v(Packed) {};
  1201. _XMUICO4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1202. _XMUICO4(CONST FLOAT *pArray);
  1203. operator UINT64 () { return v; }
  1204. _XMUICO4& operator= (CONST _XMUICO4& UIco4);
  1205. _XMUICO4& operator= (CONST UINT64 Packed);
  1206. #endif // __cplusplus
  1207. } XMUICO4;
  1208. // ARGB Color; 8-8-8-8 bit unsigned normalized integer components packed into
  1209. // a 32 bit integer. The normalized color is packed into 32 bits using 8 bit
  1210. // unsigned, normalized integers for the alpha, red, green, and blue components.
  1211. // The alpha component is stored in the most significant bits and the blue
  1212. // component in the least significant bits (A8R8G8B8):
  1213. // [32] aaaaaaaa rrrrrrrr gggggggg bbbbbbbb [0]
  1214. typedef struct _XMCOLOR
  1215. {
  1216. union
  1217. {
  1218. struct
  1219. {
  1220. UINT b : 8; // Blue: 0/255 to 255/255
  1221. UINT g : 8; // Green: 0/255 to 255/255
  1222. UINT r : 8; // Red: 0/255 to 255/255
  1223. UINT a : 8; // Alpha: 0/255 to 255/255
  1224. };
  1225. UINT c;
  1226. };
  1227. #ifdef __cplusplus
  1228. _XMCOLOR() {};
  1229. _XMCOLOR(UINT Color) : c(Color) {};
  1230. _XMCOLOR(FLOAT _r, FLOAT _g, FLOAT _b, FLOAT _a);
  1231. _XMCOLOR(CONST FLOAT *pArray);
  1232. operator UINT () { return c; }
  1233. _XMCOLOR& operator= (CONST _XMCOLOR& Color);
  1234. _XMCOLOR& operator= (CONST UINT Color);
  1235. #endif // __cplusplus
  1236. } XMCOLOR;
  1237. // 4D Vector; 8 bit signed normalized integer components
  1238. typedef struct _XMBYTEN4
  1239. {
  1240. union
  1241. {
  1242. struct
  1243. {
  1244. CHAR x;
  1245. CHAR y;
  1246. CHAR z;
  1247. CHAR w;
  1248. };
  1249. UINT v;
  1250. };
  1251. #ifdef __cplusplus
  1252. _XMBYTEN4() {};
  1253. _XMBYTEN4(CHAR _x, CHAR _y, CHAR _z, CHAR _w) : x(_x), y(_y), z(_z), w(_w) {};
  1254. _XMBYTEN4(UINT Packed) : v(Packed) {};
  1255. _XMBYTEN4(CONST CHAR *pArray);
  1256. _XMBYTEN4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1257. _XMBYTEN4(CONST FLOAT *pArray);
  1258. _XMBYTEN4& operator= (CONST _XMBYTEN4& ByteN4);
  1259. #endif // __cplusplus
  1260. } XMBYTEN4;
  1261. // 4D Vector; 8 bit signed integer components
  1262. typedef struct _XMBYTE4
  1263. {
  1264. union
  1265. {
  1266. struct
  1267. {
  1268. CHAR x;
  1269. CHAR y;
  1270. CHAR z;
  1271. CHAR w;
  1272. };
  1273. UINT v;
  1274. };
  1275. #ifdef __cplusplus
  1276. _XMBYTE4() {};
  1277. _XMBYTE4(CHAR _x, CHAR _y, CHAR _z, CHAR _w) : x(_x), y(_y), z(_z), w(_w) {};
  1278. _XMBYTE4(UINT Packed) : v(Packed) {};
  1279. _XMBYTE4(CONST CHAR *pArray);
  1280. _XMBYTE4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1281. _XMBYTE4(CONST FLOAT *pArray);
  1282. _XMBYTE4& operator= (CONST _XMBYTE4& Byte4);
  1283. #endif // __cplusplus
  1284. } XMBYTE4;
  1285. // 4D Vector; 8 bit unsigned normalized integer components
  1286. typedef struct _XMUBYTEN4
  1287. {
  1288. union
  1289. {
  1290. struct
  1291. {
  1292. BYTE x;
  1293. BYTE y;
  1294. BYTE z;
  1295. BYTE w;
  1296. };
  1297. UINT v;
  1298. };
  1299. #ifdef __cplusplus
  1300. _XMUBYTEN4() {};
  1301. _XMUBYTEN4(BYTE _x, BYTE _y, BYTE _z, BYTE _w) : x(_x), y(_y), z(_z), w(_w) {};
  1302. _XMUBYTEN4(UINT Packed) : v(Packed) {};
  1303. _XMUBYTEN4(CONST BYTE *pArray);
  1304. _XMUBYTEN4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1305. _XMUBYTEN4(CONST FLOAT *pArray);
  1306. _XMUBYTEN4& operator= (CONST _XMUBYTEN4& UByteN4);
  1307. #endif // __cplusplus
  1308. } XMUBYTEN4;
  1309. // 4D Vector; 8 bit unsigned integer components
  1310. typedef struct _XMUBYTE4
  1311. {
  1312. union
  1313. {
  1314. struct
  1315. {
  1316. BYTE x;
  1317. BYTE y;
  1318. BYTE z;
  1319. BYTE w;
  1320. };
  1321. UINT v;
  1322. };
  1323. #ifdef __cplusplus
  1324. _XMUBYTE4() {};
  1325. _XMUBYTE4(BYTE _x, BYTE _y, BYTE _z, BYTE _w) : x(_x), y(_y), z(_z), w(_w) {};
  1326. _XMUBYTE4(UINT Packed) : v(Packed) {};
  1327. _XMUBYTE4(CONST BYTE *pArray);
  1328. _XMUBYTE4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1329. _XMUBYTE4(CONST FLOAT *pArray);
  1330. _XMUBYTE4& operator= (CONST _XMUBYTE4& UByte4);
  1331. #endif // __cplusplus
  1332. } XMUBYTE4;
  1333. // 4D vector; 4 bit unsigned integer components
  1334. typedef struct _XMUNIBBLE4
  1335. {
  1336. union
  1337. {
  1338. struct
  1339. {
  1340. USHORT x : 4;
  1341. USHORT y : 4;
  1342. USHORT z : 4;
  1343. USHORT w : 4;
  1344. };
  1345. USHORT v;
  1346. };
  1347. #ifdef __cplusplus
  1348. _XMUNIBBLE4() {};
  1349. _XMUNIBBLE4(USHORT Packed) : v(Packed) {};
  1350. _XMUNIBBLE4(CHAR _x, CHAR _y, CHAR _z, CHAR _w) : x(_x), y(_y), z(_z), w(_w) {};
  1351. _XMUNIBBLE4(CONST CHAR *pArray);
  1352. _XMUNIBBLE4(FLOAT _x, FLOAT _y, FLOAT _z, FLOAT _w);
  1353. _XMUNIBBLE4(CONST FLOAT *pArray);
  1354. operator USHORT () { return v; }
  1355. _XMUNIBBLE4& operator= (CONST _XMUNIBBLE4& UNibble4);
  1356. _XMUNIBBLE4& operator= (CONST USHORT Packed);
  1357. #endif // __cplusplus
  1358. } XMUNIBBLE4;
  1359. // 4D vector: 5/5/5/1 unsigned integer components
  1360. typedef struct _XMU555
  1361. {
  1362. union
  1363. {
  1364. struct
  1365. {
  1366. USHORT x : 5;
  1367. USHORT y : 5;
  1368. USHORT z : 5;
  1369. USHORT w : 1;
  1370. };
  1371. USHORT v;
  1372. };
  1373. #ifdef __cplusplus
  1374. _XMU555() {};
  1375. _XMU555(USHORT Packed) : v(Packed) {};
  1376. _XMU555(CHAR _x, CHAR _y, CHAR _z, BOOL _w) : x(_x), y(_y), z(_z), w(_w ? 0x1 : 0) {};
  1377. _XMU555(CONST CHAR *pArray, BOOL _w);
  1378. _XMU555(FLOAT _x, FLOAT _y, FLOAT _z, BOOL _w);
  1379. _XMU555(CONST FLOAT *pArray, BOOL _w);
  1380. operator USHORT () { return v; }
  1381. _XMU555& operator= (CONST _XMU555& U555);
  1382. _XMU555& operator= (CONST USHORT Packed);
  1383. #endif // __cplusplus
  1384. } XMU555;
  1385. // 3x3 Matrix: 32 bit floating point components
  1386. typedef struct _XMFLOAT3X3
  1387. {
  1388. union
  1389. {
  1390. struct
  1391. {
  1392. FLOAT _11, _12, _13;
  1393. FLOAT _21, _22, _23;
  1394. FLOAT _31, _32, _33;
  1395. };
  1396. FLOAT m[3][3];
  1397. };
  1398. #ifdef __cplusplus
  1399. _XMFLOAT3X3() {};
  1400. _XMFLOAT3X3(FLOAT m00, FLOAT m01, FLOAT m02,
  1401. FLOAT m10, FLOAT m11, FLOAT m12,
  1402. FLOAT m20, FLOAT m21, FLOAT m22);
  1403. _XMFLOAT3X3(CONST FLOAT *pArray);
  1404. FLOAT operator() (UINT Row, UINT Column) CONST { return m[Row][Column]; }
  1405. FLOAT& operator() (UINT Row, UINT Column) { return m[Row][Column]; }
  1406. _XMFLOAT3X3& operator= (CONST _XMFLOAT3X3& Float3x3);
  1407. #endif // __cplusplus
  1408. } XMFLOAT3X3;
  1409. // 4x3 Matrix: 32 bit floating point components
  1410. typedef struct _XMFLOAT4X3
  1411. {
  1412. union
  1413. {
  1414. struct
  1415. {
  1416. FLOAT _11, _12, _13;
  1417. FLOAT _21, _22, _23;
  1418. FLOAT _31, _32, _33;
  1419. FLOAT _41, _42, _43;
  1420. };
  1421. FLOAT m[4][3];
  1422. };
  1423. #ifdef __cplusplus
  1424. _XMFLOAT4X3() {};
  1425. _XMFLOAT4X3(FLOAT m00, FLOAT m01, FLOAT m02,
  1426. FLOAT m10, FLOAT m11, FLOAT m12,
  1427. FLOAT m20, FLOAT m21, FLOAT m22,
  1428. FLOAT m30, FLOAT m31, FLOAT m32);
  1429. _XMFLOAT4X3(CONST FLOAT *pArray);
  1430. FLOAT operator() (UINT Row, UINT Column) CONST { return m[Row][Column]; }
  1431. FLOAT& operator() (UINT Row, UINT Column) { return m[Row][Column]; }
  1432. _XMFLOAT4X3& operator= (CONST _XMFLOAT4X3& Float4x3);
  1433. #endif // __cplusplus
  1434. } XMFLOAT4X3;
  1435. // 4x3 Matrix: 32 bit floating point components aligned on a 16 byte boundary
  1436. #ifdef __cplusplus
  1437. __declspec(align(16)) struct XMFLOAT4X3A : public XMFLOAT4X3
  1438. {
  1439. XMFLOAT4X3A() : XMFLOAT4X3() {};
  1440. XMFLOAT4X3A(FLOAT m00, FLOAT m01, FLOAT m02,
  1441. FLOAT m10, FLOAT m11, FLOAT m12,
  1442. FLOAT m20, FLOAT m21, FLOAT m22,
  1443. FLOAT m30, FLOAT m31, FLOAT m32) :
  1444. XMFLOAT4X3(m00,m01,m02,m10,m11,m12,m20,m21,m22,m30,m31,m32) {};
  1445. XMFLOAT4X3A(CONST FLOAT *pArray) : XMFLOAT4X3(pArray) {}
  1446. FLOAT operator() (UINT Row, UINT Column) CONST { return m[Row][Column]; }
  1447. FLOAT& operator() (UINT Row, UINT Column) { return m[Row][Column]; }
  1448. XMFLOAT4X3A& operator= (CONST XMFLOAT4X3A& Float4x3);
  1449. };
  1450. #else
  1451. typedef __declspec(align(16)) XMFLOAT4X3 XMFLOAT4X3A;
  1452. #endif // __cplusplus
  1453. // 4x4 Matrix: 32 bit floating point components
  1454. typedef struct _XMFLOAT4X4
  1455. {
  1456. union
  1457. {
  1458. struct
  1459. {
  1460. FLOAT _11, _12, _13, _14;
  1461. FLOAT _21, _22, _23, _24;
  1462. FLOAT _31, _32, _33, _34;
  1463. FLOAT _41, _42, _43, _44;
  1464. };
  1465. FLOAT m[4][4];
  1466. };
  1467. #ifdef __cplusplus
  1468. _XMFLOAT4X4() {};
  1469. _XMFLOAT4X4(FLOAT m00, FLOAT m01, FLOAT m02, FLOAT m03,
  1470. FLOAT m10, FLOAT m11, FLOAT m12, FLOAT m13,
  1471. FLOAT m20, FLOAT m21, FLOAT m22, FLOAT m23,
  1472. FLOAT m30, FLOAT m31, FLOAT m32, FLOAT m33);
  1473. _XMFLOAT4X4(CONST FLOAT *pArray);
  1474. FLOAT operator() (UINT Row, UINT Column) CONST { return m[Row][Column]; }
  1475. FLOAT& operator() (UINT Row, UINT Column) { return m[Row][Column]; }
  1476. _XMFLOAT4X4& operator= (CONST _XMFLOAT4X4& Float4x4);
  1477. #endif // __cplusplus
  1478. } XMFLOAT4X4;
  1479. // 4x4 Matrix: 32 bit floating point components aligned on a 16 byte boundary
  1480. #ifdef __cplusplus
  1481. __declspec(align(16)) struct XMFLOAT4X4A : public XMFLOAT4X4
  1482. {
  1483. XMFLOAT4X4A() : XMFLOAT4X4() {};
  1484. XMFLOAT4X4A(FLOAT m00, FLOAT m01, FLOAT m02, FLOAT m03,
  1485. FLOAT m10, FLOAT m11, FLOAT m12, FLOAT m13,
  1486. FLOAT m20, FLOAT m21, FLOAT m22, FLOAT m23,
  1487. FLOAT m30, FLOAT m31, FLOAT m32, FLOAT m33)
  1488. : XMFLOAT4X4(m00,m01,m02,m03,m10,m11,m12,m13,m20,m21,m22,m23,m30,m31,m32,m33) {};
  1489. XMFLOAT4X4A(CONST FLOAT *pArray) : XMFLOAT4X4(pArray) {}
  1490. FLOAT operator() (UINT Row, UINT Column) CONST { return m[Row][Column]; }
  1491. FLOAT& operator() (UINT Row, UINT Column) { return m[Row][Column]; }
  1492. XMFLOAT4X4A& operator= (CONST XMFLOAT4X4A& Float4x4);
  1493. };
  1494. #else
  1495. typedef __declspec(align(16)) XMFLOAT4X4 XMFLOAT4X4A;
  1496. #endif // __cplusplus
  1497. #if !defined(_XM_X86_) && !defined(_XM_X64_)
  1498. #pragma bitfield_order(pop)
  1499. #endif // !_XM_X86_ && !_XM_X64_
  1500. #pragma warning(pop)
  1501. /****************************************************************************
  1502. *
  1503. * Data conversion operations
  1504. *
  1505. ****************************************************************************/
  1506. #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_VMX128_INTRINSICS_)
  1507. #else
  1508. XMVECTOR XMConvertVectorIntToFloat(FXMVECTOR VInt, UINT DivExponent);
  1509. XMVECTOR XMConvertVectorFloatToInt(FXMVECTOR VFloat, UINT MulExponent);
  1510. XMVECTOR XMConvertVectorUIntToFloat(FXMVECTOR VUInt, UINT DivExponent);
  1511. XMVECTOR XMConvertVectorFloatToUInt(FXMVECTOR VFloat, UINT MulExponent);
  1512. #endif
  1513. FLOAT XMConvertHalfToFloat(HALF Value);
  1514. FLOAT* XMConvertHalfToFloatStream(_Out_bytecap_x_(sizeof(FLOAT)+OutputStride*(HalfCount-1)) FLOAT* pOutputStream,
  1515. _In_ UINT OutputStride,
  1516. _In_bytecount_x_(sizeof(HALF)+InputStride*(HalfCount-1)) CONST HALF* pInputStream,
  1517. _In_ UINT InputStride, _In_ UINT HalfCount);
  1518. HALF XMConvertFloatToHalf(FLOAT Value);
  1519. HALF* XMConvertFloatToHalfStream(_Out_bytecap_x_(sizeof(HALF)+OutputStride*(FloatCount-1)) HALF* pOutputStream,
  1520. _In_ UINT OutputStride,
  1521. _In_bytecount_x_(sizeof(FLOAT)+InputStride*(FloatCount-1)) CONST FLOAT* pInputStream,
  1522. _In_ UINT InputStride, _In_ UINT FloatCount);
  1523. #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_VMX128_INTRINSICS_)
  1524. #else
  1525. XMVECTOR XMVectorSetBinaryConstant(UINT C0, UINT C1, UINT C2, UINT C3);
  1526. XMVECTOR XMVectorSplatConstant(INT IntConstant, UINT DivExponent);
  1527. XMVECTOR XMVectorSplatConstantInt(INT IntConstant);
  1528. #endif
  1529. /****************************************************************************
  1530. *
  1531. * Load operations
  1532. *
  1533. ****************************************************************************/
  1534. XMVECTOR XMLoadInt(_In_ CONST UINT* pSource);
  1535. XMVECTOR XMLoadFloat(_In_ CONST FLOAT* pSource);
  1536. XMVECTOR XMLoadInt2(_In_count_c_(2) CONST UINT* pSource);
  1537. XMVECTOR XMLoadInt2A(_In_count_c_(2) CONST UINT* PSource);
  1538. XMVECTOR XMLoadFloat2(_In_ CONST XMFLOAT2* pSource);
  1539. XMVECTOR XMLoadFloat2A(_In_ CONST XMFLOAT2A* pSource);
  1540. XMVECTOR XMLoadHalf2(_In_ CONST XMHALF2* pSource);
  1541. XMVECTOR XMLoadShortN2(_In_ CONST XMSHORTN2* pSource);
  1542. XMVECTOR XMLoadShort2(_In_ CONST XMSHORT2* pSource);
  1543. XMVECTOR XMLoadUShortN2(_In_ CONST XMUSHORTN2* pSource);
  1544. XMVECTOR XMLoadUShort2(_In_ CONST XMUSHORT2* pSource);
  1545. XMVECTOR XMLoadInt3(_In_count_c_(3) CONST UINT* pSource);
  1546. XMVECTOR XMLoadInt3A(_In_count_c_(3) CONST UINT* pSource);
  1547. XMVECTOR XMLoadFloat3(_In_ CONST XMFLOAT3* pSource);
  1548. XMVECTOR XMLoadFloat3A(_In_ CONST XMFLOAT3A* pSource);
  1549. XMVECTOR XMLoadHenDN3(_In_ CONST XMHENDN3* pSource);
  1550. XMVECTOR XMLoadHenD3(_In_ CONST XMHEND3* pSource);
  1551. XMVECTOR XMLoadUHenDN3(_In_ CONST XMUHENDN3* pSource);
  1552. XMVECTOR XMLoadUHenD3(_In_ CONST XMUHEND3* pSource);
  1553. XMVECTOR XMLoadDHenN3(_In_ CONST XMDHENN3* pSource);
  1554. XMVECTOR XMLoadDHen3(_In_ CONST XMDHEN3* pSource);
  1555. XMVECTOR XMLoadUDHenN3(_In_ CONST XMUDHENN3* pSource);
  1556. XMVECTOR XMLoadUDHen3(_In_ CONST XMUDHEN3* pSource);
  1557. XMVECTOR XMLoadU565(_In_ CONST XMU565* pSource);
  1558. XMVECTOR XMLoadFloat3PK(_In_ CONST XMFLOAT3PK* pSource);
  1559. XMVECTOR XMLoadFloat3SE(_In_ CONST XMFLOAT3SE* pSource);
  1560. XMVECTOR XMLoadInt4(_In_count_c_(4) CONST UINT* pSource);
  1561. XMVECTOR XMLoadInt4A(_In_count_c_(4) CONST UINT* pSource);
  1562. XMVECTOR XMLoadFloat4(_In_ CONST XMFLOAT4* pSource);
  1563. XMVECTOR XMLoadFloat4A(_In_ CONST XMFLOAT4A* pSource);
  1564. XMVECTOR XMLoadHalf4(_In_ CONST XMHALF4* pSource);
  1565. XMVECTOR XMLoadShortN4(_In_ CONST XMSHORTN4* pSource);
  1566. XMVECTOR XMLoadShort4(_In_ CONST XMSHORT4* pSource);
  1567. XMVECTOR XMLoadUShortN4(_In_ CONST XMUSHORTN4* pSource);
  1568. XMVECTOR XMLoadUShort4(_In_ CONST XMUSHORT4* pSource);
  1569. XMVECTOR XMLoadXIcoN4(_In_ CONST XMXICON4* pSource);
  1570. XMVECTOR XMLoadXIco4(_In_ CONST XMXICO4* pSource);
  1571. XMVECTOR XMLoadIcoN4(_In_ CONST XMICON4* pSource);
  1572. XMVECTOR XMLoadIco4(_In_ CONST XMICO4* pSource);
  1573. XMVECTOR XMLoadUIcoN4(_In_ CONST XMUICON4* pSource);
  1574. XMVECTOR XMLoadUIco4(_In_ CONST XMUICO4* pSource);
  1575. XMVECTOR XMLoadXDecN4(_In_ CONST XMXDECN4* pSource);
  1576. XMVECTOR XMLoadXDec4(_In_ CONST XMXDEC4* pSource);
  1577. XMVECTOR XMLoadDecN4(_In_ CONST XMDECN4* pSource);
  1578. XMVECTOR XMLoadDec4(_In_ CONST XMDEC4* pSource);
  1579. XMVECTOR XMLoadUDecN4(_In_ CONST XMUDECN4* pSource);
  1580. XMVECTOR XMLoadUDec4(_In_ CONST XMUDEC4* pSource);
  1581. XMVECTOR XMLoadByteN4(_In_ CONST XMBYTEN4* pSource);
  1582. XMVECTOR XMLoadByte4(_In_ CONST XMBYTE4* pSource);
  1583. XMVECTOR XMLoadUByteN4(_In_ CONST XMUBYTEN4* pSource);
  1584. XMVECTOR XMLoadUByte4(_In_ CONST XMUBYTE4* pSource);
  1585. XMVECTOR XMLoadUNibble4(_In_ CONST XMUNIBBLE4* pSource);
  1586. XMVECTOR XMLoadU555(_In_ CONST XMU555* pSource);
  1587. XMVECTOR XMLoadColor(_In_ CONST XMCOLOR* pSource);
  1588. XMMATRIX XMLoadFloat3x3(_In_ CONST XMFLOAT3X3* pSource);
  1589. XMMATRIX XMLoadFloat4x3(_In_ CONST XMFLOAT4X3* pSource);
  1590. XMMATRIX XMLoadFloat4x3A(_In_ CONST XMFLOAT4X3A* pSource);
  1591. XMMATRIX XMLoadFloat4x4(_In_ CONST XMFLOAT4X4* pSource);
  1592. XMMATRIX XMLoadFloat4x4A(_In_ CONST XMFLOAT4X4A* pSource);
  1593. /****************************************************************************
  1594. *
  1595. * Store operations
  1596. *
  1597. ****************************************************************************/
  1598. VOID XMStoreInt(_Out_ UINT* pDestination, FXMVECTOR V);
  1599. VOID XMStoreFloat(_Out_ FLOAT* pDestination, FXMVECTOR V);
  1600. VOID XMStoreInt2(_Out_cap_c_(2) UINT* pDestination, FXMVECTOR V);
  1601. VOID XMStoreInt2A(_Out_cap_c_(2) UINT* pDestination, FXMVECTOR V);
  1602. VOID XMStoreFloat2(_Out_ XMFLOAT2* pDestination, FXMVECTOR V);
  1603. VOID XMStoreFloat2A(_Out_ XMFLOAT2A* pDestination, FXMVECTOR V);
  1604. VOID XMStoreHalf2(_Out_ XMHALF2* pDestination, FXMVECTOR V);
  1605. VOID XMStoreShortN2(_Out_ XMSHORTN2* pDestination, FXMVECTOR V);
  1606. VOID XMStoreShort2(_Out_ XMSHORT2* pDestination, FXMVECTOR V);
  1607. VOID XMStoreUShortN2(_Out_ XMUSHORTN2* pDestination, FXMVECTOR V);
  1608. VOID XMStoreUShort2(_Out_ XMUSHORT2* pDestination, FXMVECTOR V);
  1609. VOID XMStoreInt3(_Out_cap_c_(3) UINT* pDestination, FXMVECTOR V);
  1610. VOID XMStoreInt3A(_Out_cap_c_(3) UINT* pDestination, FXMVECTOR V);
  1611. VOID XMStoreFloat3(_Out_ XMFLOAT3* pDestination, FXMVECTOR V);
  1612. VOID XMStoreFloat3A(_Out_ XMFLOAT3A* pDestination, FXMVECTOR V);
  1613. VOID XMStoreHenDN3(_Out_ XMHENDN3* pDestination, FXMVECTOR V);
  1614. VOID XMStoreHenD3(_Out_ XMHEND3* pDestination, FXMVECTOR V);
  1615. VOID XMStoreUHenDN3(_Out_ XMUHENDN3* pDestination, FXMVECTOR V);
  1616. VOID XMStoreUHenD3(_Out_ XMUHEND3* pDestination, FXMVECTOR V);
  1617. VOID XMStoreDHenN3(_Out_ XMDHENN3* pDestination, FXMVECTOR V);
  1618. VOID XMStoreDHen3(_Out_ XMDHEN3* pDestination, FXMVECTOR V);
  1619. VOID XMStoreUDHenN3(_Out_ XMUDHENN3* pDestination, FXMVECTOR V);
  1620. VOID XMStoreUDHen3(_Out_ XMUDHEN3* pDestination, FXMVECTOR V);
  1621. VOID XMStoreU565(_Out_ XMU565* pDestination, FXMVECTOR V);
  1622. VOID XMStoreFloat3PK(_Out_ XMFLOAT3PK* pDestination, FXMVECTOR V);
  1623. VOID XMStoreFloat3SE(_Out_ XMFLOAT3SE* pDestination, FXMVECTOR V);
  1624. VOID XMStoreInt4(_Out_cap_c_(4) UINT* pDestination, FXMVECTOR V);
  1625. VOID XMStoreInt4A(_Out_cap_c_(4) UINT* pDestination, FXMVECTOR V);
  1626. VOID XMStoreInt4NC(_Out_ UINT* pDestination, FXMVECTOR V);
  1627. VOID XMStoreFloat4(_Out_ XMFLOAT4* pDestination, FXMVECTOR V);
  1628. VOID XMStoreFloat4A(_Out_ XMFLOAT4A* pDestination, FXMVECTOR V);
  1629. VOID XMStoreFloat4NC(_Out_ XMFLOAT4* pDestination, FXMVECTOR V);
  1630. VOID XMStoreHalf4(_Out_ XMHALF4* pDestination, FXMVECTOR V);
  1631. VOID XMStoreShortN4(_Out_ XMSHORTN4* pDestination, FXMVECTOR V);
  1632. VOID XMStoreShort4(_Out_ XMSHORT4* pDestination, FXMVECTOR V);
  1633. VOID XMStoreUShortN4(_Out_ XMUSHORTN4* pDestination, FXMVECTOR V);
  1634. VOID XMStoreUShort4(_Out_ XMUSHORT4* pDestination, FXMVECTOR V);
  1635. VOID XMStoreXIcoN4(_Out_ XMXICON4* pDestination, FXMVECTOR V);
  1636. VOID XMStoreXIco4(_Out_ XMXICO4* pDestination, FXMVECTOR V);
  1637. VOID XMStoreIcoN4(_Out_ XMICON4* pDestination, FXMVECTOR V);
  1638. VOID XMStoreIco4(_Out_ XMICO4* pDestination, FXMVECTOR V);
  1639. VOID XMStoreUIcoN4(_Out_ XMUICON4* pDestination, FXMVECTOR V);
  1640. VOID XMStoreUIco4(_Out_ XMUICO4* pDestination, FXMVECTOR V);
  1641. VOID XMStoreXDecN4(_Out_ XMXDECN4* pDestination, FXMVECTOR V);
  1642. VOID XMStoreXDec4(_Out_ XMXDEC4* pDestination, FXMVECTOR V);
  1643. VOID XMStoreDecN4(_Out_ XMDECN4* pDestination, FXMVECTOR V);
  1644. VOID XMStoreDec4(_Out_ XMDEC4* pDestination, FXMVECTOR V);
  1645. VOID XMStoreUDecN4(_Out_ XMUDECN4* pDestination, FXMVECTOR V);
  1646. VOID XMStoreUDec4(_Out_ XMUDEC4* pDestination, FXMVECTOR V);
  1647. VOID XMStoreByteN4(_Out_ XMBYTEN4* pDestination, FXMVECTOR V);
  1648. VOID XMStoreByte4(_Out_ XMBYTE4* pDestination, FXMVECTOR V);
  1649. VOID XMStoreUByteN4(_Out_ XMUBYTEN4* pDestination, FXMVECTOR V);
  1650. VOID XMStoreUByte4(_Out_ XMUBYTE4* pDestination, FXMVECTOR V);
  1651. VOID XMStoreUNibble4(_Out_ XMUNIBBLE4* pDestination, FXMVECTOR V);
  1652. VOID XMStoreU555(_Out_ XMU555* pDestination, FXMVECTOR V);
  1653. VOID XMStoreColor(_Out_ XMCOLOR* pDestination, FXMVECTOR V);
  1654. VOID XMStoreFloat3x3(_Out_ XMFLOAT3X3* pDestination, CXMMATRIX M);
  1655. VOID XMStoreFloat3x3NC(_Out_ XMFLOAT3X3* pDestination, CXMMATRIX M);
  1656. VOID XMStoreFloat4x3(_Out_ XMFLOAT4X3* pDestination, CXMMATRIX M);
  1657. VOID XMStoreFloat4x3A(_Out_ XMFLOAT4X3A* pDestination, CXMMATRIX M);
  1658. VOID XMStoreFloat4x3NC(_Out_ XMFLOAT4X3* pDestination, CXMMATRIX M);
  1659. VOID XMStoreFloat4x4(_Out_ XMFLOAT4X4* pDestination, CXMMATRIX M);
  1660. VOID XMStoreFloat4x4A(_Out_ XMFLOAT4X4A* pDestination, CXMMATRIX M);
  1661. VOID XMStoreFloat4x4NC(_Out_ XMFLOAT4X4* pDestination, CXMMATRIX M);
  1662. /****************************************************************************
  1663. *
  1664. * General vector operations
  1665. *
  1666. ****************************************************************************/
  1667. XMVECTOR XMVectorZero();
  1668. XMVECTOR XMVectorSet(FLOAT x, FLOAT y, FLOAT z, FLOAT w);
  1669. XMVECTOR XMVectorSetInt(UINT x, UINT y, UINT z, UINT w);
  1670. XMVECTOR XMVectorReplicate(FLOAT Value);
  1671. XMVECTOR XMVectorReplicatePtr(_In_ CONST FLOAT *pValue);
  1672. XMVECTOR XMVectorReplicateInt(UINT Value);
  1673. XMVECTOR XMVectorReplicateIntPtr(_In_ CONST UINT *pValue);
  1674. XMVECTOR XMVectorTrueInt();
  1675. XMVECTOR XMVectorFalseInt();
  1676. XMVECTOR XMVectorSplatX(FXMVECTOR V);
  1677. XMVECTOR XMVectorSplatY(FXMVECTOR V);
  1678. XMVECTOR XMVectorSplatZ(FXMVECTOR V);
  1679. XMVECTOR XMVectorSplatW(FXMVECTOR V);
  1680. XMVECTOR XMVectorSplatOne();
  1681. XMVECTOR XMVectorSplatInfinity();
  1682. XMVECTOR XMVectorSplatQNaN();
  1683. XMVECTOR XMVectorSplatEpsilon();
  1684. XMVECTOR XMVectorSplatSignMask();
  1685. FLOAT XMVectorGetByIndex(FXMVECTOR V,UINT i);
  1686. FLOAT XMVectorGetX(FXMVECTOR V);
  1687. FLOAT XMVectorGetY(FXMVECTOR V);
  1688. FLOAT XMVectorGetZ(FXMVECTOR V);
  1689. FLOAT XMVectorGetW(FXMVECTOR V);
  1690. VOID XMVectorGetByIndexPtr(_Out_ FLOAT *f, FXMVECTOR V, UINT i);
  1691. VOID XMVectorGetXPtr(_Out_ FLOAT *x, FXMVECTOR V);
  1692. VOID XMVectorGetYPtr(_Out_ FLOAT *y, FXMVECTOR V);
  1693. VOID XMVectorGetZPtr(_Out_ FLOAT *z, FXMVECTOR V);
  1694. VOID XMVectorGetWPtr(_Out_ FLOAT *w, FXMVECTOR V);
  1695. UINT XMVectorGetIntByIndex(FXMVECTOR V,UINT i);
  1696. UINT XMVectorGetIntX(FXMVECTOR V);
  1697. UINT XMVectorGetIntY(FXMVECTOR V);
  1698. UINT XMVectorGetIntZ(FXMVECTOR V);
  1699. UINT XMVectorGetIntW(FXMVECTOR V);
  1700. VOID XMVectorGetIntByIndexPtr(_Out_ UINT *x,FXMVECTOR V, UINT i);
  1701. VOID XMVectorGetIntXPtr(_Out_ UINT *x, FXMVECTOR V);
  1702. VOID XMVectorGetIntYPtr(_Out_ UINT *y, FXMVECTOR V);
  1703. VOID XMVectorGetIntZPtr(_Out_ UINT *z, FXMVECTOR V);
  1704. VOID XMVectorGetIntWPtr(_Out_ UINT *w, FXMVECTOR V);
  1705. XMVECTOR XMVectorSetByIndex(FXMVECTOR V,FLOAT f,UINT i);
  1706. XMVECTOR XMVectorSetX(FXMVECTOR V, FLOAT x);
  1707. XMVECTOR XMVectorSetY(FXMVECTOR V, FLOAT y);
  1708. XMVECTOR XMVectorSetZ(FXMVECTOR V, FLOAT z);
  1709. XMVECTOR XMVectorSetW(FXMVECTOR V, FLOAT w);
  1710. XMVECTOR XMVectorSetByIndexPtr(FXMVECTOR V, _In_ CONST FLOAT *f, UINT i);
  1711. XMVECTOR XMVectorSetXPtr(FXMVECTOR V, _In_ CONST FLOAT *x);
  1712. XMVECTOR XMVectorSetYPtr(FXMVECTOR V, _In_ CONST FLOAT *y);
  1713. XMVECTOR XMVectorSetZPtr(FXMVECTOR V, _In_ CONST FLOAT *z);
  1714. XMVECTOR XMVectorSetWPtr(FXMVECTOR V, _In_ CONST FLOAT *w);
  1715. XMVECTOR XMVectorSetIntByIndex(FXMVECTOR V, UINT x,UINT i);
  1716. XMVECTOR XMVectorSetIntX(FXMVECTOR V, UINT x);
  1717. XMVECTOR XMVectorSetIntY(FXMVECTOR V, UINT y);
  1718. XMVECTOR XMVectorSetIntZ(FXMVECTOR V, UINT z);
  1719. XMVECTOR XMVectorSetIntW(FXMVECTOR V, UINT w);
  1720. XMVECTOR XMVectorSetIntByIndexPtr(FXMVECTOR V, _In_ CONST UINT *x, UINT i);
  1721. XMVECTOR XMVectorSetIntXPtr(FXMVECTOR V, _In_ CONST UINT *x);
  1722. XMVECTOR XMVectorSetIntYPtr(FXMVECTOR V, _In_ CONST UINT *y);
  1723. XMVECTOR XMVectorSetIntZPtr(FXMVECTOR V, _In_ CONST UINT *z);
  1724. XMVECTOR XMVectorSetIntWPtr(FXMVECTOR V, _In_ CONST UINT *w);
  1725. XMVECTOR XMVectorPermuteControl(UINT ElementIndex0, UINT ElementIndex1, UINT ElementIndex2, UINT ElementIndex3);
  1726. XMVECTOR XMVectorPermute(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Control);
  1727. XMVECTOR XMVectorSelectControl(UINT VectorIndex0, UINT VectorIndex1, UINT VectorIndex2, UINT VectorIndex3);
  1728. XMVECTOR XMVectorSelect(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Control);
  1729. XMVECTOR XMVectorMergeXY(FXMVECTOR V1, FXMVECTOR V2);
  1730. XMVECTOR XMVectorMergeZW(FXMVECTOR V1, FXMVECTOR V2);
  1731. #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_VMX128_INTRINSICS_)
  1732. #else
  1733. XMVECTOR XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2, UINT Elements);
  1734. XMVECTOR XMVectorRotateLeft(FXMVECTOR V, UINT Elements);
  1735. XMVECTOR XMVectorRotateRight(FXMVECTOR V, UINT Elements);
  1736. XMVECTOR XMVectorSwizzle(FXMVECTOR V, UINT E0, UINT E1, UINT E2, UINT E3);
  1737. XMVECTOR XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS, UINT VSLeftRotateElements,
  1738. UINT Select0, UINT Select1, UINT Select2, UINT Select3);
  1739. #endif
  1740. XMVECTOR XMVectorEqual(FXMVECTOR V1, FXMVECTOR V2);
  1741. XMVECTOR XMVectorEqualR(_Out_ UINT* pCR, FXMVECTOR V1, FXMVECTOR V2);
  1742. XMVECTOR XMVectorEqualInt(FXMVECTOR V1, FXMVECTOR V2);
  1743. XMVECTOR XMVectorEqualIntR(_Out_ UINT* pCR, FXMVECTOR V, FXMVECTOR V2);
  1744. XMVECTOR XMVectorNearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon);
  1745. XMVECTOR XMVectorNotEqual(FXMVECTOR V1, FXMVECTOR V2);
  1746. XMVECTOR XMVectorNotEqualInt(FXMVECTOR V1, FXMVECTOR V2);
  1747. XMVECTOR XMVectorGreater(FXMVECTOR V1, FXMVECTOR V2);
  1748. XMVECTOR XMVectorGreaterR(_Out_ UINT* pCR, FXMVECTOR V1, FXMVECTOR V2);
  1749. XMVECTOR XMVectorGreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2);
  1750. XMVECTOR XMVectorGreaterOrEqualR(_Out_ UINT* pCR, FXMVECTOR V1, FXMVECTOR V2);
  1751. XMVECTOR XMVectorLess(FXMVECTOR V1, FXMVECTOR V2);
  1752. XMVECTOR XMVectorLessOrEqual(FXMVECTOR V1, FXMVECTOR V2);
  1753. XMVECTOR XMVectorInBounds(FXMVECTOR V, FXMVECTOR Bounds);
  1754. XMVECTOR XMVectorInBoundsR(_Out_ UINT* pCR, FXMVECTOR V, FXMVECTOR Bounds);
  1755. XMVECTOR XMVectorIsNaN(FXMVECTOR V);
  1756. XMVECTOR XMVectorIsInfinite(FXMVECTOR V);
  1757. XMVECTOR XMVectorMin(FXMVECTOR V1,FXMVECTOR V2);
  1758. XMVECTOR XMVectorMax(FXMVECTOR V1, FXMVECTOR V2);
  1759. XMVECTOR XMVectorRound(FXMVECTOR V);
  1760. XMVECTOR XMVectorTruncate(FXMVECTOR V);
  1761. XMVECTOR XMVectorFloor(FXMVECTOR V);
  1762. XMVECTOR XMVectorCeiling(FXMVECTOR V);
  1763. XMVECTOR XMVectorClamp(FXMVECTOR V, FXMVECTOR Min, FXMVECTOR Max);
  1764. XMVECTOR XMVectorSaturate(FXMVECTOR V);
  1765. XMVECTOR XMVectorAndInt(FXMVECTOR V1, FXMVECTOR V2);
  1766. XMVECTOR XMVectorAndCInt(FXMVECTOR V1, FXMVECTOR V2);
  1767. XMVECTOR XMVectorOrInt(FXMVECTOR V1, FXMVECTOR V2);
  1768. XMVECTOR XMVectorNorInt(FXMVECTOR V1, FXMVECTOR V2);
  1769. XMVECTOR XMVectorXorInt(FXMVECTOR V1, FXMVECTOR V2);
  1770. XMVECTOR XMVectorNegate(FXMVECTOR V);
  1771. XMVECTOR XMVectorAdd(FXMVECTOR V1, FXMVECTOR V2);
  1772. XMVECTOR XMVectorAddAngles(FXMVECTOR V1, FXMVECTOR V2);
  1773. XMVECTOR XMVectorSubtract(FXMVECTOR V1, FXMVECTOR V2);
  1774. XMVECTOR XMVectorSubtractAngles(FXMVECTOR V1, FXMVECTOR V2);
  1775. XMVECTOR XMVectorMultiply(FXMVECTOR V1, FXMVECTOR V2);
  1776. XMVECTOR XMVectorMultiplyAdd(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3);
  1777. XMVECTOR XMVectorDivide(FXMVECTOR V1, FXMVECTOR V2);
  1778. XMVECTOR XMVectorNegativeMultiplySubtract(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3);
  1779. XMVECTOR XMVectorScale(FXMVECTOR V, FLOAT ScaleFactor);
  1780. XMVECTOR XMVectorReciprocalEst(FXMVECTOR V);
  1781. XMVECTOR XMVectorReciprocal(FXMVECTOR V);
  1782. XMVECTOR XMVectorSqrtEst(FXMVECTOR V);
  1783. XMVECTOR XMVectorSqrt(FXMVECTOR V);
  1784. XMVECTOR XMVectorReciprocalSqrtEst(FXMVECTOR V);
  1785. XMVECTOR XMVectorReciprocalSqrt(FXMVECTOR V);
  1786. XMVECTOR XMVectorExpEst(FXMVECTOR V);
  1787. XMVECTOR XMVectorExp(FXMVECTOR V);
  1788. XMVECTOR XMVectorLogEst(FXMVECTOR V);
  1789. XMVECTOR XMVectorLog(FXMVECTOR V);
  1790. XMVECTOR XMVectorPowEst(FXMVECTOR V1, FXMVECTOR V2);
  1791. XMVECTOR XMVectorPow(FXMVECTOR V1, FXMVECTOR V2);
  1792. XMVECTOR XMVectorAbs(FXMVECTOR V);
  1793. XMVECTOR XMVectorMod(FXMVECTOR V1, FXMVECTOR V2);
  1794. XMVECTOR XMVectorModAngles(FXMVECTOR Angles);
  1795. XMVECTOR XMVectorSin(FXMVECTOR V);
  1796. XMVECTOR XMVectorSinEst(FXMVECTOR V);
  1797. XMVECTOR XMVectorCos(FXMVECTOR V);
  1798. XMVECTOR XMVectorCosEst(FXMVECTOR V);
  1799. VOID XMVectorSinCos(_Out_ XMVECTOR* pSin, _Out_ XMVECTOR* pCos, FXMVECTOR V);
  1800. VOID XMVectorSinCosEst(_Out_ XMVECTOR* pSin, _Out_ XMVECTOR* pCos, FXMVECTOR V);
  1801. XMVECTOR XMVectorTan(FXMVECTOR V);
  1802. XMVECTOR XMVectorTanEst(FXMVECTOR V);
  1803. XMVECTOR XMVectorSinH(FXMVECTOR V);
  1804. XMVECTOR XMVectorSinHEst(FXMVECTOR V);
  1805. XMVECTOR XMVectorCosH(FXMVECTOR V);
  1806. XMVECTOR XMVectorCosHEst(FXMVECTOR V);
  1807. XMVECTOR XMVectorTanH(FXMVECTOR V);
  1808. XMVECTOR XMVectorTanHEst(FXMVECTOR V);
  1809. XMVECTOR XMVectorASin(FXMVECTOR V);
  1810. XMVECTOR XMVectorASinEst(FXMVECTOR V);
  1811. XMVECTOR XMVectorACos(FXMVECTOR V);
  1812. XMVECTOR XMVectorACosEst(FXMVECTOR V);
  1813. XMVECTOR XMVectorATan(FXMVECTOR V);
  1814. XMVECTOR XMVectorATanEst(FXMVECTOR V);
  1815. XMVECTOR XMVectorATan2(FXMVECTOR Y, FXMVECTOR X);
  1816. XMVECTOR XMVectorATan2Est(FXMVECTOR Y, FXMVECTOR X);
  1817. XMVECTOR XMVectorLerp(FXMVECTOR V0, FXMVECTOR V1, FLOAT t);
  1818. XMVECTOR XMVectorLerpV(FXMVECTOR V0, FXMVECTOR V1, FXMVECTOR T);
  1819. XMVECTOR XMVectorHermite(FXMVECTOR Position0, FXMVECTOR Tangent0, FXMVECTOR Position1, CXMVECTOR Tangent1, FLOAT t);
  1820. XMVECTOR XMVectorHermiteV(FXMVECTOR Position0, FXMVECTOR Tangent0, FXMVECTOR Position1, CXMVECTOR Tangent1, CXMVECTOR T);
  1821. XMVECTOR XMVectorCatmullRom(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, CXMVECTOR Position3, FLOAT t);
  1822. XMVECTOR XMVectorCatmullRomV(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, CXMVECTOR Position3, CXMVECTOR T);
  1823. XMVECTOR XMVectorBaryCentric(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, FLOAT f, FLOAT g);
  1824. XMVECTOR XMVectorBaryCentricV(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, CXMVECTOR F, CXMVECTOR G);
  1825. /****************************************************************************
  1826. *
  1827. * 2D vector operations
  1828. *
  1829. ****************************************************************************/
  1830. BOOL XMVector2Equal(FXMVECTOR V1, FXMVECTOR V2);
  1831. UINT XMVector2EqualR(FXMVECTOR V1, FXMVECTOR V2);
  1832. BOOL XMVector2EqualInt(FXMVECTOR V1, FXMVECTOR V2);
  1833. UINT XMVector2EqualIntR(FXMVECTOR V1, FXMVECTOR V2);
  1834. BOOL XMVector2NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon);
  1835. BOOL XMVector2NotEqual(FXMVECTOR V1, FXMVECTOR V2);
  1836. BOOL XMVector2NotEqualInt(FXMVECTOR V1, FXMVECTOR V2);
  1837. BOOL XMVector2Greater(FXMVECTOR V1, FXMVECTOR V2);
  1838. UINT XMVector2GreaterR(FXMVECTOR V1, FXMVECTOR V2);
  1839. BOOL XMVector2GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2);
  1840. UINT XMVector2GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2);
  1841. BOOL XMVector2Less(FXMVECTOR V1, FXMVECTOR V2);
  1842. BOOL XMVector2LessOrEqual(FXMVECTOR V1, FXMVECTOR V2);
  1843. BOOL XMVector2InBounds(FXMVECTOR V, FXMVECTOR Bounds);
  1844. UINT XMVector2InBoundsR(FXMVECTOR V, FXMVECTOR Bounds);
  1845. BOOL XMVector2IsNaN(FXMVECTOR V);
  1846. BOOL XMVector2IsInfinite(FXMVECTOR V);
  1847. XMVECTOR XMVector2Dot(FXMVECTOR V1, FXMVECTOR V2);
  1848. XMVECTOR XMVector2Cross(FXMVECTOR V1, FXMVECTOR V2);
  1849. XMVECTOR XMVector2LengthSq(FXMVECTOR V);
  1850. XMVECTOR XMVector2ReciprocalLengthEst(FXMVECTOR V);
  1851. XMVECTOR XMVector2ReciprocalLength(FXMVECTOR V);
  1852. XMVECTOR XMVector2LengthEst(FXMVECTOR V);
  1853. XMVECTOR XMVector2Length(FXMVECTOR V);
  1854. XMVECTOR XMVector2NormalizeEst(FXMVECTOR V);
  1855. XMVECTOR XMVector2Normalize(FXMVECTOR V);
  1856. XMVECTOR XMVector2ClampLength(FXMVECTOR V, FLOAT LengthMin, FLOAT LengthMax);
  1857. XMVECTOR XMVector2ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax);
  1858. XMVECTOR XMVector2Reflect(FXMVECTOR Incident, FXMVECTOR Normal);
  1859. XMVECTOR XMVector2Refract(FXMVECTOR Incident, FXMVECTOR Normal, FLOAT RefractionIndex);
  1860. XMVECTOR XMVector2RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex);
  1861. XMVECTOR XMVector2Orthogonal(FXMVECTOR V);
  1862. XMVECTOR XMVector2AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2);
  1863. XMVECTOR XMVector2AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2);
  1864. XMVECTOR XMVector2AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2);
  1865. XMVECTOR XMVector2LinePointDistance(FXMVECTOR LinePoint1, FXMVECTOR LinePoint2, FXMVECTOR Point);
  1866. XMVECTOR XMVector2IntersectLine(FXMVECTOR Line1Point1, FXMVECTOR Line1Point2, FXMVECTOR Line2Point1, CXMVECTOR Line2Point2);
  1867. XMVECTOR XMVector2Transform(FXMVECTOR V, CXMMATRIX M);
  1868. XMFLOAT4* XMVector2TransformStream(_Out_bytecap_x_(sizeof(XMFLOAT4)+OutputStride*(VectorCount-1)) XMFLOAT4* pOutputStream,
  1869. _In_ UINT OutputStride,
  1870. _In_bytecount_x_(sizeof(XMFLOAT2)+InputStride*(VectorCount-1)) CONST XMFLOAT2* pInputStream,
  1871. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  1872. XMFLOAT4* XMVector2TransformStreamNC(_Out_bytecap_x_(sizeof(XMFLOAT4)+OutputStride*(VectorCount-1)) XMFLOAT4* pOutputStream,
  1873. _In_ UINT OutputStride,
  1874. _In_bytecount_x_(sizeof(XMFLOAT2)+InputStride*(VectorCount-1)) CONST XMFLOAT2* pInputStream,
  1875. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  1876. XMVECTOR XMVector2TransformCoord(FXMVECTOR V, CXMMATRIX M);
  1877. XMFLOAT2* XMVector2TransformCoordStream(_Out_bytecap_x_(sizeof(XMFLOAT2)+OutputStride*(VectorCount-1)) XMFLOAT2* pOutputStream,
  1878. _In_ UINT OutputStride,
  1879. _In_bytecount_x_(sizeof(XMFLOAT2)+InputStride*(VectorCount-1)) CONST XMFLOAT2* pInputStream,
  1880. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  1881. XMVECTOR XMVector2TransformNormal(FXMVECTOR V, CXMMATRIX M);
  1882. XMFLOAT2* XMVector2TransformNormalStream(_Out_bytecap_x_(sizeof(XMFLOAT2)+OutputStride*(VectorCount-1)) XMFLOAT2* pOutputStream,
  1883. _In_ UINT OutputStride,
  1884. _In_bytecount_x_(sizeof(XMFLOAT2)+InputStride*(VectorCount-1)) CONST XMFLOAT2* pInputStream,
  1885. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  1886. /****************************************************************************
  1887. *
  1888. * 3D vector operations
  1889. *
  1890. ****************************************************************************/
  1891. BOOL XMVector3Equal(FXMVECTOR V1, FXMVECTOR V2);
  1892. UINT XMVector3EqualR(FXMVECTOR V1, FXMVECTOR V2);
  1893. BOOL XMVector3EqualInt(FXMVECTOR V1, FXMVECTOR V2);
  1894. UINT XMVector3EqualIntR(FXMVECTOR V1, FXMVECTOR V2);
  1895. BOOL XMVector3NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon);
  1896. BOOL XMVector3NotEqual(FXMVECTOR V1, FXMVECTOR V2);
  1897. BOOL XMVector3NotEqualInt(FXMVECTOR V1, FXMVECTOR V2);
  1898. BOOL XMVector3Greater(FXMVECTOR V1, FXMVECTOR V2);
  1899. UINT XMVector3GreaterR(FXMVECTOR V1, FXMVECTOR V2);
  1900. BOOL XMVector3GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2);
  1901. UINT XMVector3GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2);
  1902. BOOL XMVector3Less(FXMVECTOR V1, FXMVECTOR V2);
  1903. BOOL XMVector3LessOrEqual(FXMVECTOR V1, FXMVECTOR V2);
  1904. BOOL XMVector3InBounds(FXMVECTOR V, FXMVECTOR Bounds);
  1905. UINT XMVector3InBoundsR(FXMVECTOR V, FXMVECTOR Bounds);
  1906. BOOL XMVector3IsNaN(FXMVECTOR V);
  1907. BOOL XMVector3IsInfinite(FXMVECTOR V);
  1908. XMVECTOR XMVector3Dot(FXMVECTOR V1, FXMVECTOR V2);
  1909. XMVECTOR XMVector3Cross(FXMVECTOR V1, FXMVECTOR V2);
  1910. XMVECTOR XMVector3LengthSq(FXMVECTOR V);
  1911. XMVECTOR XMVector3ReciprocalLengthEst(FXMVECTOR V);
  1912. XMVECTOR XMVector3ReciprocalLength(FXMVECTOR V);
  1913. XMVECTOR XMVector3LengthEst(FXMVECTOR V);
  1914. XMVECTOR XMVector3Length(FXMVECTOR V);
  1915. XMVECTOR XMVector3NormalizeEst(FXMVECTOR V);
  1916. XMVECTOR XMVector3Normalize(FXMVECTOR V);
  1917. XMVECTOR XMVector3ClampLength(FXMVECTOR V, FLOAT LengthMin, FLOAT LengthMax);
  1918. XMVECTOR XMVector3ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax);
  1919. XMVECTOR XMVector3Reflect(FXMVECTOR Incident, FXMVECTOR Normal);
  1920. XMVECTOR XMVector3Refract(FXMVECTOR Incident, FXMVECTOR Normal, FLOAT RefractionIndex);
  1921. XMVECTOR XMVector3RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex);
  1922. XMVECTOR XMVector3Orthogonal(FXMVECTOR V);
  1923. XMVECTOR XMVector3AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2);
  1924. XMVECTOR XMVector3AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2);
  1925. XMVECTOR XMVector3AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2);
  1926. XMVECTOR XMVector3LinePointDistance(FXMVECTOR LinePoint1, FXMVECTOR LinePoint2, FXMVECTOR Point);
  1927. VOID XMVector3ComponentsFromNormal(_Out_ XMVECTOR* pParallel, _Out_ XMVECTOR* pPerpendicular, FXMVECTOR V, FXMVECTOR Normal);
  1928. XMVECTOR XMVector3Rotate(FXMVECTOR V, FXMVECTOR RotationQuaternion);
  1929. XMVECTOR XMVector3InverseRotate(FXMVECTOR V, FXMVECTOR RotationQuaternion);
  1930. XMVECTOR XMVector3Transform(FXMVECTOR V, CXMMATRIX M);
  1931. XMFLOAT4* XMVector3TransformStream(_Out_bytecap_x_(sizeof(XMFLOAT4)+OutputStride*(VectorCount-1)) XMFLOAT4* pOutputStream,
  1932. _In_ UINT OutputStride,
  1933. _In_bytecount_x_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) CONST XMFLOAT3* pInputStream,
  1934. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  1935. XMFLOAT4* XMVector3TransformStreamNC(_Out_bytecap_x_(sizeof(XMFLOAT4)+OutputStride*(VectorCount-1)) XMFLOAT4* pOutputStream,
  1936. _In_ UINT OutputStride,
  1937. _In_bytecount_x_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) CONST XMFLOAT3* pInputStream,
  1938. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  1939. XMVECTOR XMVector3TransformCoord(FXMVECTOR V, CXMMATRIX M);
  1940. XMFLOAT3* XMVector3TransformCoordStream(_Out_bytecap_x_(sizeof(XMFLOAT3)+OutputStride*(VectorCount-1)) XMFLOAT3* pOutputStream,
  1941. _In_ UINT OutputStride,
  1942. _In_bytecount_x_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) CONST XMFLOAT3* pInputStream,
  1943. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  1944. XMVECTOR XMVector3TransformNormal(FXMVECTOR V, CXMMATRIX M);
  1945. XMFLOAT3* XMVector3TransformNormalStream(_Out_bytecap_x_(sizeof(XMFLOAT3)+OutputStride*(VectorCount-1)) XMFLOAT3* pOutputStream,
  1946. _In_ UINT OutputStride,
  1947. _In_bytecount_x_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) CONST XMFLOAT3* pInputStream,
  1948. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  1949. XMVECTOR XMVector3Project(FXMVECTOR V, FLOAT ViewportX, FLOAT ViewportY, FLOAT ViewportWidth, FLOAT ViewportHeight, FLOAT ViewportMinZ, FLOAT ViewportMaxZ,
  1950. CXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World);
  1951. XMFLOAT3* XMVector3ProjectStream(_Out_bytecap_x_(sizeof(XMFLOAT3)+OutputStride*(VectorCount-1)) XMFLOAT3* pOutputStream,
  1952. _In_ UINT OutputStride,
  1953. _In_bytecount_x_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) CONST XMFLOAT3* pInputStream,
  1954. _In_ UINT InputStride, _In_ UINT VectorCount,
  1955. FLOAT ViewportX, FLOAT ViewportY, FLOAT ViewportWidth, FLOAT ViewportHeight, FLOAT ViewportMinZ, FLOAT ViewportMaxZ,
  1956. CXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World);
  1957. XMVECTOR XMVector3Unproject(FXMVECTOR V, FLOAT ViewportX, FLOAT ViewportY, FLOAT ViewportWidth, FLOAT ViewportHeight, FLOAT ViewportMinZ, FLOAT ViewportMaxZ,
  1958. CXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World);
  1959. XMFLOAT3* XMVector3UnprojectStream(_Out_bytecap_x_(sizeof(XMFLOAT3)+OutputStride*(VectorCount-1)) XMFLOAT3* pOutputStream,
  1960. _In_ UINT OutputStride,
  1961. _In_bytecount_x_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) CONST XMFLOAT3* pInputStream,
  1962. _In_ UINT InputStride, _In_ UINT VectorCount,
  1963. FLOAT ViewportX, FLOAT ViewportY, FLOAT ViewportWidth, FLOAT ViewportHeight, FLOAT ViewportMinZ, FLOAT ViewportMaxZ,
  1964. CXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World);
  1965. /****************************************************************************
  1966. *
  1967. * 4D vector operations
  1968. *
  1969. ****************************************************************************/
  1970. BOOL XMVector4Equal(FXMVECTOR V1, FXMVECTOR V2);
  1971. UINT XMVector4EqualR(FXMVECTOR V1, FXMVECTOR V2);
  1972. BOOL XMVector4EqualInt(FXMVECTOR V1, FXMVECTOR V2);
  1973. UINT XMVector4EqualIntR(FXMVECTOR V1, FXMVECTOR V2);
  1974. BOOL XMVector4NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon);
  1975. BOOL XMVector4NotEqual(FXMVECTOR V1, FXMVECTOR V2);
  1976. BOOL XMVector4NotEqualInt(FXMVECTOR V1, FXMVECTOR V2);
  1977. BOOL XMVector4Greater(FXMVECTOR V1, FXMVECTOR V2);
  1978. UINT XMVector4GreaterR(FXMVECTOR V1, FXMVECTOR V2);
  1979. BOOL XMVector4GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2);
  1980. UINT XMVector4GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2);
  1981. BOOL XMVector4Less(FXMVECTOR V1, FXMVECTOR V2);
  1982. BOOL XMVector4LessOrEqual(FXMVECTOR V1, FXMVECTOR V2);
  1983. BOOL XMVector4InBounds(FXMVECTOR V, FXMVECTOR Bounds);
  1984. UINT XMVector4InBoundsR(FXMVECTOR V, FXMVECTOR Bounds);
  1985. BOOL XMVector4IsNaN(FXMVECTOR V);
  1986. BOOL XMVector4IsInfinite(FXMVECTOR V);
  1987. XMVECTOR XMVector4Dot(FXMVECTOR V1, FXMVECTOR V2);
  1988. XMVECTOR XMVector4Cross(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3);
  1989. XMVECTOR XMVector4LengthSq(FXMVECTOR V);
  1990. XMVECTOR XMVector4ReciprocalLengthEst(FXMVECTOR V);
  1991. XMVECTOR XMVector4ReciprocalLength(FXMVECTOR V);
  1992. XMVECTOR XMVector4LengthEst(FXMVECTOR V);
  1993. XMVECTOR XMVector4Length(FXMVECTOR V);
  1994. XMVECTOR XMVector4NormalizeEst(FXMVECTOR V);
  1995. XMVECTOR XMVector4Normalize(FXMVECTOR V);
  1996. XMVECTOR XMVector4ClampLength(FXMVECTOR V, FLOAT LengthMin, FLOAT LengthMax);
  1997. XMVECTOR XMVector4ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax);
  1998. XMVECTOR XMVector4Reflect(FXMVECTOR Incident, FXMVECTOR Normal);
  1999. XMVECTOR XMVector4Refract(FXMVECTOR Incident, FXMVECTOR Normal, FLOAT RefractionIndex);
  2000. XMVECTOR XMVector4RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex);
  2001. XMVECTOR XMVector4Orthogonal(FXMVECTOR V);
  2002. XMVECTOR XMVector4AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2);
  2003. XMVECTOR XMVector4AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2);
  2004. XMVECTOR XMVector4AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2);
  2005. XMVECTOR XMVector4Transform(FXMVECTOR V, CXMMATRIX M);
  2006. XMFLOAT4* XMVector4TransformStream(_Out_bytecap_x_(sizeof(XMFLOAT4)+OutputStride*(VectorCount-1)) XMFLOAT4* pOutputStream,
  2007. _In_ UINT OutputStride,
  2008. _In_bytecount_x_(sizeof(XMFLOAT4)+InputStride*(VectorCount-1)) CONST XMFLOAT4* pInputStream,
  2009. _In_ UINT InputStride, _In_ UINT VectorCount, CXMMATRIX M);
  2010. /****************************************************************************
  2011. *
  2012. * Matrix operations
  2013. *
  2014. ****************************************************************************/
  2015. BOOL XMMatrixIsNaN(CXMMATRIX M);
  2016. BOOL XMMatrixIsInfinite(CXMMATRIX M);
  2017. BOOL XMMatrixIsIdentity(CXMMATRIX M);
  2018. XMMATRIX XMMatrixMultiply(CXMMATRIX M1, CXMMATRIX M2);
  2019. XMMATRIX XMMatrixMultiplyTranspose(CXMMATRIX M1, CXMMATRIX M2);
  2020. XMMATRIX XMMatrixTranspose(CXMMATRIX M);
  2021. XMMATRIX XMMatrixInverse(_Out_ XMVECTOR* pDeterminant, CXMMATRIX M);
  2022. XMVECTOR XMMatrixDeterminant(CXMMATRIX M);
  2023. BOOL XMMatrixDecompose(_Out_ XMVECTOR *outScale, _Out_ XMVECTOR *outRotQuat, _Out_ XMVECTOR *outTrans, CXMMATRIX M);
  2024. XMMATRIX XMMatrixIdentity();
  2025. XMMATRIX XMMatrixSet(FLOAT m00, FLOAT m01, FLOAT m02, FLOAT m03,
  2026. FLOAT m10, FLOAT m11, FLOAT m12, FLOAT m13,
  2027. FLOAT m20, FLOAT m21, FLOAT m22, FLOAT m23,
  2028. FLOAT m30, FLOAT m31, FLOAT m32, FLOAT m33);
  2029. XMMATRIX XMMatrixTranslation(FLOAT OffsetX, FLOAT OffsetY, FLOAT OffsetZ);
  2030. XMMATRIX XMMatrixTranslationFromVector(FXMVECTOR Offset);
  2031. XMMATRIX XMMatrixScaling(FLOAT ScaleX, FLOAT ScaleY, FLOAT ScaleZ);
  2032. XMMATRIX XMMatrixScalingFromVector(FXMVECTOR Scale);
  2033. XMMATRIX XMMatrixRotationX(FLOAT Angle);
  2034. XMMATRIX XMMatrixRotationY(FLOAT Angle);
  2035. XMMATRIX XMMatrixRotationZ(FLOAT Angle);
  2036. XMMATRIX XMMatrixRotationRollPitchYaw(FLOAT Pitch, FLOAT Yaw, FLOAT Roll);
  2037. XMMATRIX XMMatrixRotationRollPitchYawFromVector(FXMVECTOR Angles);
  2038. XMMATRIX XMMatrixRotationNormal(FXMVECTOR NormalAxis, FLOAT Angle);
  2039. XMMATRIX XMMatrixRotationAxis(FXMVECTOR Axis, FLOAT Angle);
  2040. XMMATRIX XMMatrixRotationQuaternion(FXMVECTOR Quaternion);
  2041. XMMATRIX XMMatrixTransformation2D(FXMVECTOR ScalingOrigin, FLOAT ScalingOrientation, FXMVECTOR Scaling,
  2042. FXMVECTOR RotationOrigin, FLOAT Rotation, CXMVECTOR Translation);
  2043. XMMATRIX XMMatrixTransformation(FXMVECTOR ScalingOrigin, FXMVECTOR ScalingOrientationQuaternion, FXMVECTOR Scaling,
  2044. CXMVECTOR RotationOrigin, CXMVECTOR RotationQuaternion, CXMVECTOR Translation);
  2045. XMMATRIX XMMatrixAffineTransformation2D(FXMVECTOR Scaling, FXMVECTOR RotationOrigin, FLOAT Rotation, FXMVECTOR Translation);
  2046. XMMATRIX XMMatrixAffineTransformation(FXMVECTOR Scaling, FXMVECTOR RotationOrigin, FXMVECTOR RotationQuaternion, CXMVECTOR Translation);
  2047. XMMATRIX XMMatrixReflect(FXMVECTOR ReflectionPlane);
  2048. XMMATRIX XMMatrixShadow(FXMVECTOR ShadowPlane, FXMVECTOR LightPosition);
  2049. XMMATRIX XMMatrixLookAtLH(FXMVECTOR EyePosition, FXMVECTOR FocusPosition, FXMVECTOR UpDirection);
  2050. XMMATRIX XMMatrixLookAtRH(FXMVECTOR EyePosition, FXMVECTOR FocusPosition, FXMVECTOR UpDirection);
  2051. XMMATRIX XMMatrixLookToLH(FXMVECTOR EyePosition, FXMVECTOR EyeDirection, FXMVECTOR UpDirection);
  2052. XMMATRIX XMMatrixLookToRH(FXMVECTOR EyePosition, FXMVECTOR EyeDirection, FXMVECTOR UpDirection);
  2053. XMMATRIX XMMatrixPerspectiveLH(FLOAT ViewWidth, FLOAT ViewHeight, FLOAT NearZ, FLOAT FarZ);
  2054. XMMATRIX XMMatrixPerspectiveRH(FLOAT ViewWidth, FLOAT ViewHeight, FLOAT NearZ, FLOAT FarZ);
  2055. XMMATRIX XMMatrixPerspectiveFovLH(FLOAT FovAngleY, FLOAT AspectHByW, FLOAT NearZ, FLOAT FarZ);
  2056. XMMATRIX XMMatrixPerspectiveFovRH(FLOAT FovAngleY, FLOAT AspectHByW, FLOAT NearZ, FLOAT FarZ);
  2057. XMMATRIX XMMatrixPerspectiveOffCenterLH(FLOAT ViewLeft, FLOAT ViewRight, FLOAT ViewBottom, FLOAT ViewTop, FLOAT NearZ, FLOAT FarZ);
  2058. XMMATRIX XMMatrixPerspectiveOffCenterRH(FLOAT ViewLeft, FLOAT ViewRight, FLOAT ViewBottom, FLOAT ViewTop, FLOAT NearZ, FLOAT FarZ);
  2059. XMMATRIX XMMatrixOrthographicLH(FLOAT ViewWidth, FLOAT ViewHeight, FLOAT NearZ, FLOAT FarZ);
  2060. XMMATRIX XMMatrixOrthographicRH(FLOAT ViewWidth, FLOAT ViewHeight, FLOAT NearZ, FLOAT FarZ);
  2061. XMMATRIX XMMatrixOrthographicOffCenterLH(FLOAT ViewLeft, FLOAT ViewRight, FLOAT ViewBottom, FLOAT ViewTop, FLOAT NearZ, FLOAT FarZ);
  2062. XMMATRIX XMMatrixOrthographicOffCenterRH(FLOAT ViewLeft, FLOAT ViewRight, FLOAT ViewBottom, FLOAT ViewTop, FLOAT NearZ, FLOAT FarZ);
  2063. /****************************************************************************
  2064. *
  2065. * Quaternion operations
  2066. *
  2067. ****************************************************************************/
  2068. BOOL XMQuaternionEqual(FXMVECTOR Q1, FXMVECTOR Q2);
  2069. BOOL XMQuaternionNotEqual(FXMVECTOR Q1, FXMVECTOR Q2);
  2070. BOOL XMQuaternionIsNaN(FXMVECTOR Q);
  2071. BOOL XMQuaternionIsInfinite(FXMVECTOR Q);
  2072. BOOL XMQuaternionIsIdentity(FXMVECTOR Q);
  2073. XMVECTOR XMQuaternionDot(FXMVECTOR Q1, FXMVECTOR Q2);
  2074. XMVECTOR XMQuaternionMultiply(FXMVECTOR Q1, FXMVECTOR Q2);
  2075. XMVECTOR XMQuaternionLengthSq(FXMVECTOR Q);
  2076. XMVECTOR XMQuaternionReciprocalLength(FXMVECTOR Q);
  2077. XMVECTOR XMQuaternionLength(FXMVECTOR Q);
  2078. XMVECTOR XMQuaternionNormalizeEst(FXMVECTOR Q);
  2079. XMVECTOR XMQuaternionNormalize(FXMVECTOR Q);
  2080. XMVECTOR XMQuaternionConjugate(FXMVECTOR Q);
  2081. XMVECTOR XMQuaternionInverse(FXMVECTOR Q);
  2082. XMVECTOR XMQuaternionLn(FXMVECTOR Q);
  2083. XMVECTOR XMQuaternionExp(FXMVECTOR Q);
  2084. XMVECTOR XMQuaternionSlerp(FXMVECTOR Q0, FXMVECTOR Q1, FLOAT t);
  2085. XMVECTOR XMQuaternionSlerpV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR T);
  2086. XMVECTOR XMQuaternionSquad(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, CXMVECTOR Q3, FLOAT t);
  2087. XMVECTOR XMQuaternionSquadV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, CXMVECTOR Q3, CXMVECTOR T);
  2088. VOID XMQuaternionSquadSetup(_Out_ XMVECTOR* pA, _Out_ XMVECTOR* pB, _Out_ XMVECTOR* pC, FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, CXMVECTOR Q3);
  2089. XMVECTOR XMQuaternionBaryCentric(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, FLOAT f, FLOAT g);
  2090. XMVECTOR XMQuaternionBaryCentricV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, CXMVECTOR F, CXMVECTOR G);
  2091. XMVECTOR XMQuaternionIdentity();
  2092. XMVECTOR XMQuaternionRotationRollPitchYaw(FLOAT Pitch, FLOAT Yaw, FLOAT Roll);
  2093. XMVECTOR XMQuaternionRotationRollPitchYawFromVector(FXMVECTOR Angles);
  2094. XMVECTOR XMQuaternionRotationNormal(FXMVECTOR NormalAxis, FLOAT Angle);
  2095. XMVECTOR XMQuaternionRotationAxis(FXMVECTOR Axis, FLOAT Angle);
  2096. XMVECTOR XMQuaternionRotationMatrix(CXMMATRIX M);
  2097. VOID XMQuaternionToAxisAngle(_Out_ XMVECTOR* pAxis, _Out_ FLOAT* pAngle, FXMVECTOR Q);
  2098. /****************************************************************************
  2099. *
  2100. * Plane operations
  2101. *
  2102. ****************************************************************************/
  2103. BOOL XMPlaneEqual(FXMVECTOR P1, FXMVECTOR P2);
  2104. BOOL XMPlaneNearEqual(FXMVECTOR P1, FXMVECTOR P2, FXMVECTOR Epsilon);
  2105. BOOL XMPlaneNotEqual(FXMVECTOR P1, FXMVECTOR P2);
  2106. BOOL XMPlaneIsNaN(FXMVECTOR P);
  2107. BOOL XMPlaneIsInfinite(FXMVECTOR P);
  2108. XMVECTOR XMPlaneDot(FXMVECTOR P, FXMVECTOR V);
  2109. XMVECTOR XMPlaneDotCoord(FXMVECTOR P, FXMVECTOR V);
  2110. XMVECTOR XMPlaneDotNormal(FXMVECTOR P, FXMVECTOR V);
  2111. XMVECTOR XMPlaneNormalizeEst(FXMVECTOR P);
  2112. XMVECTOR XMPlaneNormalize(FXMVECTOR P);
  2113. XMVECTOR XMPlaneIntersectLine(FXMVECTOR P, FXMVECTOR LinePoint1, FXMVECTOR LinePoint2);
  2114. VOID XMPlaneIntersectPlane(_Out_ XMVECTOR* pLinePoint1, _Out_ XMVECTOR* pLinePoint2, FXMVECTOR P1, FXMVECTOR P2);
  2115. XMVECTOR XMPlaneTransform(FXMVECTOR P, CXMMATRIX M);
  2116. XMFLOAT4* XMPlaneTransformStream(_Out_bytecap_x_(sizeof(XMFLOAT4)+OutputStride*(PlaneCount-1)) XMFLOAT4* pOutputStream,
  2117. _In_ UINT OutputStride,
  2118. _In_bytecount_x_(sizeof(XMFLOAT4)+InputStride*(PlaneCount-1)) CONST XMFLOAT4* pInputStream,
  2119. _In_ UINT InputStride, _In_ UINT PlaneCount, CXMMATRIX M);
  2120. XMVECTOR XMPlaneFromPointNormal(FXMVECTOR Point, FXMVECTOR Normal);
  2121. XMVECTOR XMPlaneFromPoints(FXMVECTOR Point1, FXMVECTOR Point2, FXMVECTOR Point3);
  2122. /****************************************************************************
  2123. *
  2124. * Color operations
  2125. *
  2126. ****************************************************************************/
  2127. BOOL XMColorEqual(FXMVECTOR C1, FXMVECTOR C2);
  2128. BOOL XMColorNotEqual(FXMVECTOR C1, FXMVECTOR C2);
  2129. BOOL XMColorGreater(FXMVECTOR C1, FXMVECTOR C2);
  2130. BOOL XMColorGreaterOrEqual(FXMVECTOR C1, FXMVECTOR C2);
  2131. BOOL XMColorLess(FXMVECTOR C1, FXMVECTOR C2);
  2132. BOOL XMColorLessOrEqual(FXMVECTOR C1, FXMVECTOR C2);
  2133. BOOL XMColorIsNaN(FXMVECTOR C);
  2134. BOOL XMColorIsInfinite(FXMVECTOR C);
  2135. XMVECTOR XMColorNegative(FXMVECTOR C);
  2136. XMVECTOR XMColorModulate(FXMVECTOR C1, FXMVECTOR C2);
  2137. XMVECTOR XMColorAdjustSaturation(FXMVECTOR C, FLOAT Saturation);
  2138. XMVECTOR XMColorAdjustContrast(FXMVECTOR C, FLOAT Contrast);
  2139. /****************************************************************************
  2140. *
  2141. * Miscellaneous operations
  2142. *
  2143. ****************************************************************************/
  2144. BOOL XMVerifyCPUSupport();
  2145. VOID XMAssert(_In_z_ CONST CHAR* pExpression, _In_z_ CONST CHAR* pFileName, UINT LineNumber);
  2146. XMVECTOR XMFresnelTerm(FXMVECTOR CosIncidentAngle, FXMVECTOR RefractionIndex);
  2147. BOOL XMScalarNearEqual(FLOAT S1, FLOAT S2, FLOAT Epsilon);
  2148. FLOAT XMScalarModAngle(FLOAT Value);
  2149. FLOAT XMScalarSin(FLOAT Value);
  2150. FLOAT XMScalarCos(FLOAT Value);
  2151. VOID XMScalarSinCos(_Out_ FLOAT* pSin, _Out_ FLOAT* pCos, FLOAT Value);
  2152. FLOAT XMScalarASin(FLOAT Value);
  2153. FLOAT XMScalarACos(FLOAT Value);
  2154. FLOAT XMScalarSinEst(FLOAT Value);
  2155. FLOAT XMScalarCosEst(FLOAT Value);
  2156. VOID XMScalarSinCosEst(_Out_ FLOAT* pSin, _Out_ FLOAT* pCos, FLOAT Value);
  2157. FLOAT XMScalarASinEst(FLOAT Value);
  2158. FLOAT XMScalarACosEst(FLOAT Value);
  2159. /****************************************************************************
  2160. *
  2161. * Globals
  2162. *
  2163. ****************************************************************************/
  2164. // The purpose of the following global constants is to prevent redundant
  2165. // reloading of the constants when they are referenced by more than one
  2166. // separate inline math routine called within the same function. Declaring
  2167. // a constant locally within a routine is sufficient to prevent redundant
  2168. // reloads of that constant when that single routine is called multiple
  2169. // times in a function, but if the constant is used (and declared) in a
  2170. // separate math routine it would be reloaded.
  2171. #define XMGLOBALCONST extern CONST __declspec(selectany)
  2172. XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients0 = {1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f};
  2173. XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients1 = {2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f};
  2174. XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients2 = {2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f};
  2175. XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients0 = {1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f};
  2176. XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients1 = {2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f};
  2177. XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients2 = {4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f};
  2178. XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients0 = {1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f};
  2179. XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients1 = {2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f};
  2180. XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients2 = {5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f};
  2181. XMGLOBALCONST XMVECTORF32 g_XMASinCoefficients0 = {-0.05806367563904f, -0.41861972469416f, 0.22480114791621f, 2.17337241360606f};
  2182. XMGLOBALCONST XMVECTORF32 g_XMASinCoefficients1 = {0.61657275907170f, 4.29696498283455f, -1.18942822255452f, -6.53784832094831f};
  2183. XMGLOBALCONST XMVECTORF32 g_XMASinCoefficients2 = {-1.36926553863413f, -4.48179294237210f, 1.41810672941833f, 5.48179257935713f};
  2184. XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients0 = {1.0f, 0.333333334f, 0.2f, 0.142857143f};
  2185. XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients1 = {1.111111111e-1f, 9.090909091e-2f, 7.692307692e-2f, 6.666666667e-2f};
  2186. XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients2 = {5.882352941e-2f, 5.263157895e-2f, 4.761904762e-2f, 4.347826087e-2f};
  2187. XMGLOBALCONST XMVECTORF32 g_XMSinEstCoefficients = {1.0f, -1.66521856991541e-1f, 8.199913018755e-3f, -1.61475937228e-4f};
  2188. XMGLOBALCONST XMVECTORF32 g_XMCosEstCoefficients = {1.0f, -4.95348008918096e-1f, 3.878259962881e-2f, -9.24587976263e-4f};
  2189. XMGLOBALCONST XMVECTORF32 g_XMTanEstCoefficients = {2.484f, -1.954923183e-1f, 2.467401101f, XM_1DIVPI};
  2190. XMGLOBALCONST XMVECTORF32 g_XMATanEstCoefficients = {7.689891418951e-1f, 1.104742493348f, 8.661844266006e-1f, XM_PIDIV2};
  2191. XMGLOBALCONST XMVECTORF32 g_XMASinEstCoefficients = {-1.36178272886711f, 2.37949493464538f, -8.08228565650486e-1f, 2.78440142746736e-1f};
  2192. XMGLOBALCONST XMVECTORF32 g_XMASinEstConstants = {1.00000011921f, XM_PIDIV2, 0.0f, 0.0f};
  2193. XMGLOBALCONST XMVECTORF32 g_XMPiConstants0 = {XM_PI, XM_2PI, XM_1DIVPI, XM_1DIV2PI};
  2194. XMGLOBALCONST XMVECTORF32 g_XMIdentityR0 = {1.0f, 0.0f, 0.0f, 0.0f};
  2195. XMGLOBALCONST XMVECTORF32 g_XMIdentityR1 = {0.0f, 1.0f, 0.0f, 0.0f};
  2196. XMGLOBALCONST XMVECTORF32 g_XMIdentityR2 = {0.0f, 0.0f, 1.0f, 0.0f};
  2197. XMGLOBALCONST XMVECTORF32 g_XMIdentityR3 = {0.0f, 0.0f, 0.0f, 1.0f};
  2198. XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR0 = {-1.0f,0.0f, 0.0f, 0.0f};
  2199. XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR1 = {0.0f,-1.0f, 0.0f, 0.0f};
  2200. XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR2 = {0.0f, 0.0f,-1.0f, 0.0f};
  2201. XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR3 = {0.0f, 0.0f, 0.0f,-1.0f};
  2202. XMGLOBALCONST XMVECTORI32 g_XMNegativeZero = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
  2203. XMGLOBALCONST XMVECTORI32 g_XMNegate3 = {0x80000000, 0x80000000, 0x80000000, 0x00000000};
  2204. XMGLOBALCONST XMVECTORI32 g_XMMask3 = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000};
  2205. XMGLOBALCONST XMVECTORI32 g_XMMaskX = {0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000};
  2206. XMGLOBALCONST XMVECTORI32 g_XMMaskY = {0x00000000, 0xFFFFFFFF, 0x00000000, 0x00000000};
  2207. XMGLOBALCONST XMVECTORI32 g_XMMaskZ = {0x00000000, 0x00000000, 0xFFFFFFFF, 0x00000000};
  2208. XMGLOBALCONST XMVECTORI32 g_XMMaskW = {0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF};
  2209. XMGLOBALCONST XMVECTORF32 g_XMOne = { 1.0f, 1.0f, 1.0f, 1.0f};
  2210. XMGLOBALCONST XMVECTORF32 g_XMOne3 = { 1.0f, 1.0f, 1.0f, 0.0f};
  2211. XMGLOBALCONST XMVECTORF32 g_XMZero = { 0.0f, 0.0f, 0.0f, 0.0f};
  2212. XMGLOBALCONST XMVECTORF32 g_XMNegativeOne = {-1.0f,-1.0f,-1.0f,-1.0f};
  2213. XMGLOBALCONST XMVECTORF32 g_XMOneHalf = { 0.5f, 0.5f, 0.5f, 0.5f};
  2214. XMGLOBALCONST XMVECTORF32 g_XMNegativeOneHalf = {-0.5f,-0.5f,-0.5f,-0.5f};
  2215. XMGLOBALCONST XMVECTORF32 g_XMNegativeTwoPi = {-XM_2PI, -XM_2PI, -XM_2PI, -XM_2PI};
  2216. XMGLOBALCONST XMVECTORF32 g_XMNegativePi = {-XM_PI, -XM_PI, -XM_PI, -XM_PI};
  2217. XMGLOBALCONST XMVECTORF32 g_XMHalfPi = {XM_PIDIV2, XM_PIDIV2, XM_PIDIV2, XM_PIDIV2};
  2218. XMGLOBALCONST XMVECTORF32 g_XMPi = {XM_PI, XM_PI, XM_PI, XM_PI};
  2219. XMGLOBALCONST XMVECTORF32 g_XMReciprocalPi = {XM_1DIVPI, XM_1DIVPI, XM_1DIVPI, XM_1DIVPI};
  2220. XMGLOBALCONST XMVECTORF32 g_XMTwoPi = {XM_2PI, XM_2PI, XM_2PI, XM_2PI};
  2221. XMGLOBALCONST XMVECTORF32 g_XMReciprocalTwoPi = {XM_1DIV2PI, XM_1DIV2PI, XM_1DIV2PI, XM_1DIV2PI};
  2222. XMGLOBALCONST XMVECTORF32 g_XMEpsilon = {1.192092896e-7f, 1.192092896e-7f, 1.192092896e-7f, 1.192092896e-7f};
  2223. XMGLOBALCONST XMVECTORI32 g_XMInfinity = {0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000};
  2224. XMGLOBALCONST XMVECTORI32 g_XMQNaN = {0x7FC00000, 0x7FC00000, 0x7FC00000, 0x7FC00000};
  2225. XMGLOBALCONST XMVECTORI32 g_XMQNaNTest = {0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF};
  2226. XMGLOBALCONST XMVECTORI32 g_XMAbsMask = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
  2227. XMGLOBALCONST XMVECTORI32 g_XMFltMin = {0x00800000, 0x00800000, 0x00800000, 0x00800000};
  2228. XMGLOBALCONST XMVECTORI32 g_XMFltMax = {0x7F7FFFFF, 0x7F7FFFFF, 0x7F7FFFFF, 0x7F7FFFFF};
  2229. XMGLOBALCONST XMVECTORI32 g_XMNegOneMask = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
  2230. XMGLOBALCONST XMVECTORI32 g_XMMaskA8R8G8B8 = {0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000};
  2231. XMGLOBALCONST XMVECTORI32 g_XMFlipA8R8G8B8 = {0x00000000, 0x00000000, 0x00000000, 0x80000000};
  2232. XMGLOBALCONST XMVECTORF32 g_XMFixAA8R8G8B8 = {0.0f,0.0f,0.0f,(float)(0x80000000U)};
  2233. XMGLOBALCONST XMVECTORF32 g_XMNormalizeA8R8G8B8 = {1.0f/(255.0f*(float)(0x10000)),1.0f/(255.0f*(float)(0x100)),1.0f/255.0f,1.0f/(255.0f*(float)(0x1000000))};
  2234. XMGLOBALCONST XMVECTORI32 g_XMMaskA2B10G10R10 = {0x000003FF, 0x000FFC00, 0x3FF00000, 0xC0000000};
  2235. XMGLOBALCONST XMVECTORI32 g_XMFlipA2B10G10R10 = {0x00000200, 0x00080000, 0x20000000, 0x80000000};
  2236. XMGLOBALCONST XMVECTORF32 g_XMFixAA2B10G10R10 = {-512.0f,-512.0f*(float)(0x400),-512.0f*(float)(0x100000),(float)(0x80000000U)};
  2237. XMGLOBALCONST XMVECTORF32 g_XMNormalizeA2B10G10R10 = {1.0f/511.0f,1.0f/(511.0f*(float)(0x400)),1.0f/(511.0f*(float)(0x100000)),1.0f/(3.0f*(float)(0x40000000))};
  2238. XMGLOBALCONST XMVECTORI32 g_XMMaskX16Y16 = {0x0000FFFF, 0xFFFF0000, 0x00000000, 0x00000000};
  2239. XMGLOBALCONST XMVECTORI32 g_XMFlipX16Y16 = {0x00008000, 0x00000000, 0x00000000, 0x00000000};
  2240. XMGLOBALCONST XMVECTORF32 g_XMFixX16Y16 = {-32768.0f,0.0f,0.0f,0.0f};
  2241. XMGLOBALCONST XMVECTORF32 g_XMNormalizeX16Y16 = {1.0f/32767.0f,1.0f/(32767.0f*65536.0f),0.0f,0.0f};
  2242. XMGLOBALCONST XMVECTORI32 g_XMMaskX16Y16Z16W16 = {0x0000FFFF, 0x0000FFFF, 0xFFFF0000, 0xFFFF0000};
  2243. XMGLOBALCONST XMVECTORI32 g_XMFlipX16Y16Z16W16 = {0x00008000, 0x00008000, 0x00000000, 0x00000000};
  2244. XMGLOBALCONST XMVECTORF32 g_XMFixX16Y16Z16W16 = {-32768.0f,-32768.0f,0.0f,0.0f};
  2245. XMGLOBALCONST XMVECTORF32 g_XMNormalizeX16Y16Z16W16 = {1.0f/32767.0f,1.0f/32767.0f,1.0f/(32767.0f*65536.0f),1.0f/(32767.0f*65536.0f)};
  2246. XMGLOBALCONST XMVECTORF32 g_XMNoFraction = {8388608.0f,8388608.0f,8388608.0f,8388608.0f};
  2247. XMGLOBALCONST XMVECTORI32 g_XMMaskByte = {0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF};
  2248. XMGLOBALCONST XMVECTORF32 g_XMNegateX = {-1.0f, 1.0f, 1.0f, 1.0f};
  2249. XMGLOBALCONST XMVECTORF32 g_XMNegateY = { 1.0f,-1.0f, 1.0f, 1.0f};
  2250. XMGLOBALCONST XMVECTORF32 g_XMNegateZ = { 1.0f, 1.0f,-1.0f, 1.0f};
  2251. XMGLOBALCONST XMVECTORF32 g_XMNegateW = { 1.0f, 1.0f, 1.0f,-1.0f};
  2252. XMGLOBALCONST XMVECTORI32 g_XMSelect0101 = {XM_SELECT_0, XM_SELECT_1, XM_SELECT_0, XM_SELECT_1};
  2253. XMGLOBALCONST XMVECTORI32 g_XMSelect1010 = {XM_SELECT_1, XM_SELECT_0, XM_SELECT_1, XM_SELECT_0};
  2254. XMGLOBALCONST XMVECTORI32 g_XMOneHalfMinusEpsilon = { 0x3EFFFFFD, 0x3EFFFFFD, 0x3EFFFFFD, 0x3EFFFFFD};
  2255. XMGLOBALCONST XMVECTORI32 g_XMSelect1000 = {XM_SELECT_1, XM_SELECT_0, XM_SELECT_0, XM_SELECT_0};
  2256. XMGLOBALCONST XMVECTORI32 g_XMSelect1100 = {XM_SELECT_1, XM_SELECT_1, XM_SELECT_0, XM_SELECT_0};
  2257. XMGLOBALCONST XMVECTORI32 g_XMSelect1110 = {XM_SELECT_1, XM_SELECT_1, XM_SELECT_1, XM_SELECT_0};
  2258. XMGLOBALCONST XMVECTORI32 g_XMSwizzleXYXY = {XM_PERMUTE_0X, XM_PERMUTE_0Y, XM_PERMUTE_0X, XM_PERMUTE_0Y};
  2259. XMGLOBALCONST XMVECTORI32 g_XMSwizzleXYZX = {XM_PERMUTE_0X, XM_PERMUTE_0Y, XM_PERMUTE_0Z, XM_PERMUTE_0X};
  2260. XMGLOBALCONST XMVECTORI32 g_XMSwizzleYXZW = {XM_PERMUTE_0Y, XM_PERMUTE_0X, XM_PERMUTE_0Z, XM_PERMUTE_0W};
  2261. XMGLOBALCONST XMVECTORI32 g_XMSwizzleYZXW = {XM_PERMUTE_0Y, XM_PERMUTE_0Z, XM_PERMUTE_0X, XM_PERMUTE_0W};
  2262. XMGLOBALCONST XMVECTORI32 g_XMSwizzleZXYW = {XM_PERMUTE_0Z, XM_PERMUTE_0X, XM_PERMUTE_0Y, XM_PERMUTE_0W};
  2263. XMGLOBALCONST XMVECTORI32 g_XMPermute0X0Y1X1Y = {XM_PERMUTE_0X, XM_PERMUTE_0Y, XM_PERMUTE_1X, XM_PERMUTE_1Y};
  2264. XMGLOBALCONST XMVECTORI32 g_XMPermute0Z0W1Z1W = {XM_PERMUTE_0Z, XM_PERMUTE_0W, XM_PERMUTE_1Z, XM_PERMUTE_1W};
  2265. XMGLOBALCONST XMVECTORF32 g_XMFixupY16 = {1.0f,1.0f/65536.0f,0.0f,0.0f};
  2266. XMGLOBALCONST XMVECTORF32 g_XMFixupY16W16 = {1.0f,1.0f,1.0f/65536.0f,1.0f/65536.0f};
  2267. XMGLOBALCONST XMVECTORI32 g_XMFlipY = {0,0x80000000,0,0};
  2268. XMGLOBALCONST XMVECTORI32 g_XMFlipZ = {0,0,0x80000000,0};
  2269. XMGLOBALCONST XMVECTORI32 g_XMFlipW = {0,0,0,0x80000000};
  2270. XMGLOBALCONST XMVECTORI32 g_XMFlipYZ = {0,0x80000000,0x80000000,0};
  2271. XMGLOBALCONST XMVECTORI32 g_XMFlipZW = {0,0,0x80000000,0x80000000};
  2272. XMGLOBALCONST XMVECTORI32 g_XMFlipYW = {0,0x80000000,0,0x80000000};
  2273. XMGLOBALCONST XMVECTORI32 g_XMMaskHenD3 = {0x7FF,0x7ff<<11,0x3FF<<22,0};
  2274. XMGLOBALCONST XMVECTORI32 g_XMMaskDHen3 = {0x3FF,0x7ff<<10,0x7FF<<21,0};
  2275. XMGLOBALCONST XMVECTORF32 g_XMAddUHenD3 = {0,0,32768.0f*65536.0f,0};
  2276. XMGLOBALCONST XMVECTORF32 g_XMAddHenD3 = {-1024.0f,-1024.0f*2048.0f,0,0};
  2277. XMGLOBALCONST XMVECTORF32 g_XMAddDHen3 = {-512.0f,-1024.0f*1024.0f,0,0};
  2278. XMGLOBALCONST XMVECTORF32 g_XMMulHenD3 = {1.0f,1.0f/2048.0f,1.0f/(2048.0f*2048.0f),0};
  2279. XMGLOBALCONST XMVECTORF32 g_XMMulDHen3 = {1.0f,1.0f/1024.0f,1.0f/(1024.0f*2048.0f),0};
  2280. XMGLOBALCONST XMVECTORI32 g_XMXorHenD3 = {0x400,0x400<<11,0,0};
  2281. XMGLOBALCONST XMVECTORI32 g_XMXorDHen3 = {0x200,0x400<<10,0,0};
  2282. XMGLOBALCONST XMVECTORI32 g_XMMaskIco4 = {0xFFFFF,0xFFFFF000,0xFFFFF,0xF0000000};
  2283. XMGLOBALCONST XMVECTORI32 g_XMXorXIco4 = {0x80000,0,0x80000,0x80000000};
  2284. XMGLOBALCONST XMVECTORI32 g_XMXorIco4 = {0x80000,0,0x80000,0};
  2285. XMGLOBALCONST XMVECTORF32 g_XMAddXIco4 = {-8.0f*65536.0f,0,-8.0f*65536.0f,32768.0f*65536.0f};
  2286. XMGLOBALCONST XMVECTORF32 g_XMAddUIco4 = {0,32768.0f*65536.0f,0,32768.0f*65536.0f};
  2287. XMGLOBALCONST XMVECTORF32 g_XMAddIco4 = {-8.0f*65536.0f,0,-8.0f*65536.0f,0};
  2288. XMGLOBALCONST XMVECTORF32 g_XMMulIco4 = {1.0f,1.0f/4096.0f,1.0f,1.0f/(4096.0f*65536.0f)};
  2289. XMGLOBALCONST XMVECTORI32 g_XMMaskDec4 = {0x3FF,0x3FF<<10,0x3FF<<20,0x3<<30};
  2290. XMGLOBALCONST XMVECTORI32 g_XMXorDec4 = {0x200,0x200<<10,0x200<<20,0};
  2291. XMGLOBALCONST XMVECTORF32 g_XMAddUDec4 = {0,0,0,32768.0f*65536.0f};
  2292. XMGLOBALCONST XMVECTORF32 g_XMAddDec4 = {-512.0f,-512.0f*1024.0f,-512.0f*1024.0f*1024.0f,0};
  2293. XMGLOBALCONST XMVECTORF32 g_XMMulDec4 = {1.0f,1.0f/1024.0f,1.0f/(1024.0f*1024.0f),1.0f/(1024.0f*1024.0f*1024.0f)};
  2294. XMGLOBALCONST XMVECTORI32 g_XMMaskByte4 = {0xFF,0xFF00,0xFF0000,0xFF000000};
  2295. XMGLOBALCONST XMVECTORI32 g_XMXorByte4 = {0x80,0x8000,0x800000,0x00000000};
  2296. XMGLOBALCONST XMVECTORF32 g_XMAddByte4 = {-128.0f,-128.0f*256.0f,-128.0f*65536.0f,0};
  2297. /****************************************************************************
  2298. *
  2299. * Implementation
  2300. *
  2301. ****************************************************************************/
  2302. #pragma warning(push)
  2303. #pragma warning(disable:4214 4204 4365 4616 6001)
  2304. #if !defined(__cplusplus) && !defined(_XBOX) && defined(_XM_ISVS2005_)
  2305. /* Work around VC 2005 bug where math.h defines logf with a semicolon at the end.
  2306. * Note this is fixed as of Visual Studio 2005 Service Pack 1
  2307. */
  2308. #undef logf
  2309. #define logf(x) ((float)log((double)(x)))
  2310. #endif // !defined(__cplusplus) && !defined(_XBOX) && defined(_XM_ISVS2005_)
  2311. //------------------------------------------------------------------------------
  2312. #if defined(_XM_NO_INTRINSICS_) || defined(_XM_SSE_INTRINSICS_)
  2313. XMFINLINE XMVECTOR XMVectorSetBinaryConstant(UINT C0, UINT C1, UINT C2, UINT C3)
  2314. {
  2315. #if defined(_XM_NO_INTRINSICS_)
  2316. XMVECTORU32 vResult;
  2317. vResult.u[0] = (0-(C0&1)) & 0x3F800000;
  2318. vResult.u[1] = (0-(C1&1)) & 0x3F800000;
  2319. vResult.u[2] = (0-(C2&1)) & 0x3F800000;
  2320. vResult.u[3] = (0-(C3&1)) & 0x3F800000;
  2321. return vResult.v;
  2322. #else // XM_SSE_INTRINSICS_
  2323. static const XMVECTORU32 g_vMask1 = {1,1,1,1};
  2324. // Move the parms to a vector
  2325. __m128i vTemp = _mm_set_epi32(C3,C2,C1,C0);
  2326. // Mask off the low bits
  2327. vTemp = _mm_and_si128(vTemp,g_vMask1);
  2328. // 0xFFFFFFFF on true bits
  2329. vTemp = _mm_cmpeq_epi32(vTemp,g_vMask1);
  2330. // 0xFFFFFFFF -> 1.0f, 0x00000000 -> 0.0f
  2331. vTemp = _mm_and_si128(vTemp,g_XMOne);
  2332. return reinterpret_cast<const __m128 *>(&vTemp)[0];
  2333. #endif
  2334. }
  2335. //------------------------------------------------------------------------------
  2336. XMFINLINE XMVECTOR XMVectorSplatConstant(INT IntConstant, UINT DivExponent)
  2337. {
  2338. #if defined(_XM_NO_INTRINSICS_)
  2339. XMASSERT( IntConstant >= -16 && IntConstant <= 15 );
  2340. XMASSERT(DivExponent<32);
  2341. {
  2342. XMVECTORI32 V = { IntConstant, IntConstant, IntConstant, IntConstant };
  2343. return XMConvertVectorIntToFloat( V.v, DivExponent);
  2344. }
  2345. #else // XM_SSE_INTRINSICS_
  2346. XMASSERT( IntConstant >= -16 && IntConstant <= 15 );
  2347. XMASSERT(DivExponent<32);
  2348. // Splat the int
  2349. __m128i vScale = _mm_set1_epi32(IntConstant);
  2350. // Convert to a float
  2351. XMVECTOR vResult = _mm_cvtepi32_ps(vScale);
  2352. // Convert DivExponent into 1.0f/(1<<DivExponent)
  2353. UINT uScale = 0x3F800000U - (DivExponent << 23);
  2354. // Splat the scalar value (It's really a float)
  2355. vScale = _mm_set1_epi32(uScale);
  2356. // Multiply by the reciprocal (Perform a right shift by DivExponent)
  2357. vResult = _mm_mul_ps(vResult,reinterpret_cast<const __m128 *>(&vScale)[0]);
  2358. return vResult;
  2359. #endif
  2360. }
  2361. //------------------------------------------------------------------------------
  2362. XMFINLINE XMVECTOR XMVectorSplatConstantInt(INT IntConstant)
  2363. {
  2364. #if defined(_XM_NO_INTRINSICS_)
  2365. XMASSERT( IntConstant >= -16 && IntConstant <= 15 );
  2366. {
  2367. XMVECTORI32 V = { IntConstant, IntConstant, IntConstant, IntConstant };
  2368. return V.v;
  2369. }
  2370. #else // XM_SSE_INTRINSICS_
  2371. XMASSERT( IntConstant >= -16 && IntConstant <= 15 );
  2372. __m128i V = _mm_set1_epi32( IntConstant );
  2373. return reinterpret_cast<__m128 *>(&V)[0];
  2374. #endif
  2375. }
  2376. //------------------------------------------------------------------------------
  2377. XMFINLINE XMVECTOR XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2, UINT Elements)
  2378. {
  2379. return XMVectorPermute(V1, V2, XMVectorPermuteControl((Elements), ((Elements) + 1), ((Elements) + 2), ((Elements) + 3)));
  2380. }
  2381. //------------------------------------------------------------------------------
  2382. XMFINLINE XMVECTOR XMVectorRotateLeft(FXMVECTOR V, UINT Elements)
  2383. {
  2384. #if defined(_XM_NO_INTRINSICS_)
  2385. XMASSERT( Elements < 4 );
  2386. {
  2387. XMVECTORF32 vResult = { V.vector4_f32[Elements & 3], V.vector4_f32[(Elements + 1) & 3],
  2388. V.vector4_f32[(Elements + 2) & 3], V.vector4_f32[(Elements + 3) & 3] };
  2389. return vResult.v;
  2390. }
  2391. #else // XM_SSE_INTRINSICS_
  2392. FLOAT fx = XMVectorGetByIndex(V,(Elements) & 3);
  2393. FLOAT fy = XMVectorGetByIndex(V,((Elements) + 1) & 3);
  2394. FLOAT fz = XMVectorGetByIndex(V,((Elements) + 2) & 3);
  2395. FLOAT fw = XMVectorGetByIndex(V,((Elements) + 3) & 3);
  2396. return _mm_set_ps( fw, fz, fy, fx );
  2397. #endif
  2398. }
  2399. //------------------------------------------------------------------------------
  2400. XMFINLINE XMVECTOR XMVectorRotateRight(FXMVECTOR V, UINT Elements)
  2401. {
  2402. #if defined(_XM_NO_INTRINSICS_)
  2403. XMASSERT( Elements < 4 );
  2404. {
  2405. XMVECTORF32 vResult = { V.vector4_f32[(4 - (Elements)) & 3], V.vector4_f32[(5 - (Elements)) & 3],
  2406. V.vector4_f32[(6 - (Elements)) & 3], V.vector4_f32[(7 - (Elements)) & 3] };
  2407. return vResult.v;
  2408. }
  2409. #else // XM_SSE_INTRINSICS_
  2410. FLOAT fx = XMVectorGetByIndex(V,(4 - (Elements)) & 3);
  2411. FLOAT fy = XMVectorGetByIndex(V,(5 - (Elements)) & 3);
  2412. FLOAT fz = XMVectorGetByIndex(V,(6 - (Elements)) & 3);
  2413. FLOAT fw = XMVectorGetByIndex(V,(7 - (Elements)) & 3);
  2414. return _mm_set_ps( fw, fz, fy, fx );
  2415. #endif
  2416. }
  2417. //------------------------------------------------------------------------------
  2418. XMFINLINE XMVECTOR XMVectorSwizzle(FXMVECTOR V, UINT E0, UINT E1, UINT E2, UINT E3)
  2419. {
  2420. #if defined(_XM_NO_INTRINSICS_)
  2421. XMASSERT( (E0 < 4) && (E1 < 4) && (E2 < 4) && (E3 < 4) );
  2422. {
  2423. XMVECTORF32 vResult = { V.vector4_f32[E0], V.vector4_f32[E1], V.vector4_f32[E2], V.vector4_f32[E3] };
  2424. return vResult.v;
  2425. }
  2426. #else // XM_SSE_INTRINSICS_
  2427. FLOAT fx = XMVectorGetByIndex(V,E0);
  2428. FLOAT fy = XMVectorGetByIndex(V,E1);
  2429. FLOAT fz = XMVectorGetByIndex(V,E2);
  2430. FLOAT fw = XMVectorGetByIndex(V,E3);
  2431. return _mm_set_ps( fw, fz, fy, fx );
  2432. #endif
  2433. }
  2434. //------------------------------------------------------------------------------
  2435. XMFINLINE XMVECTOR XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS, UINT VSLeftRotateElements,
  2436. UINT Select0, UINT Select1, UINT Select2, UINT Select3)
  2437. {
  2438. XMVECTOR Control = XMVectorSelectControl(Select0&1, Select1&1, Select2&1, Select3&1);
  2439. return XMVectorSelect( VD, XMVectorRotateLeft(VS, VSLeftRotateElements), Control );
  2440. }
  2441. // Implemented for VMX128 intrinsics as #defines aboves
  2442. #endif _XM_NO_INTRINSICS_ || _XM_SSE_INTRINSICS_
  2443. //------------------------------------------------------------------------------
  2444. #include "xnamathconvert.inl"
  2445. #include "xnamathvector.inl"
  2446. #include "xnamathmatrix.inl"
  2447. #include "xnamathmisc.inl"
  2448. #pragma warning(pop)
  2449. #endif // __XNAMATH_H__