Jolt Physics
A multi core friendly Game Physics Engine
Loading...
Searching...
No Matches
Vec4.inl
Go to the documentation of this file.
1// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
2// SPDX-FileCopyrightText: 2021 Jorrit Rouwe
3// SPDX-License-Identifier: MIT
4
6#include <Jolt/Math/Vec3.h>
7#include <Jolt/Math/UVec4.h>
8
10
11// Constructor
13 mValue(inRHS.mValue)
14{
15}
16
17Vec4::Vec4(Vec3Arg inRHS, float inW)
18{
19#if defined(JPH_USE_SSE4_1)
20 mValue = _mm_blend_ps(inRHS.mValue, _mm_set1_ps(inW), 8);
21#elif defined(JPH_USE_NEON)
22 mValue = vsetq_lane_f32(inW, inRHS.mValue, 3);
23#else
24 for (int i = 0; i < 3; i++)
25 mF32[i] = inRHS.mF32[i];
26 mF32[3] = inW;
27#endif
28}
29
30Vec4::Vec4(float inX, float inY, float inZ, float inW)
31{
32#if defined(JPH_USE_SSE)
33 mValue = _mm_set_ps(inW, inZ, inY, inX);
34#elif defined(JPH_USE_NEON)
35 uint32x2_t xy = vcreate_u32(static_cast<uint64>(BitCast<uint32>(inX)) | (static_cast<uint64>(BitCast<uint32>(inY)) << 32));
36 uint32x2_t zw = vcreate_u32(static_cast<uint64>(BitCast<uint32>(inZ)) | (static_cast<uint64>(BitCast<uint32>(inW)) << 32));
37 mValue = vreinterpretq_f32_u32(vcombine_u32(xy, zw));
38#else
39 mF32[0] = inX;
40 mF32[1] = inY;
41 mF32[2] = inZ;
42 mF32[3] = inW;
43#endif
44}
45
46template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW>
48{
49 static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range");
50 static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range");
51 static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range");
52 static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range");
53
54#if defined(JPH_USE_SSE)
55 return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(SwizzleW, SwizzleZ, SwizzleY, SwizzleX));
56#elif defined(JPH_USE_NEON)
57 return JPH_NEON_SHUFFLE_F32x4(mValue, mValue, SwizzleX, SwizzleY, SwizzleZ, SwizzleW);
58#else
59 return Vec4(mF32[SwizzleX], mF32[SwizzleY], mF32[SwizzleZ], mF32[SwizzleW]);
60#endif
61}
62
64{
65#if defined(JPH_USE_SSE)
66 return _mm_setzero_ps();
67#elif defined(JPH_USE_NEON)
68 return vdupq_n_f32(0);
69#else
70 return Vec4(0, 0, 0, 0);
71#endif
72}
73
75{
76#if defined(JPH_USE_SSE)
77 return _mm_set1_ps(inV);
78#elif defined(JPH_USE_NEON)
79 return vdupq_n_f32(inV);
80#else
81 return Vec4(inV, inV, inV, inV);
82#endif
83}
84
86{
87 return sReplicate(1.0f);
88}
89
91{
92 return sReplicate(numeric_limits<float>::quiet_NaN());
93}
94
96{
97#if defined(JPH_USE_SSE)
98 return _mm_loadu_ps(&inV->x);
99#elif defined(JPH_USE_NEON)
100 return vld1q_f32(&inV->x);
101#else
102 return Vec4(inV->x, inV->y, inV->z, inV->w);
103#endif
104}
105
107{
108#if defined(JPH_USE_SSE)
109 return _mm_load_ps(&inV->x);
110#elif defined(JPH_USE_NEON)
111 return vld1q_f32(&inV->x);
112#else
113 return Vec4(inV->x, inV->y, inV->z, inV->w);
114#endif
115}
116
117template <const int Scale>
118Vec4 Vec4::sGatherFloat4(const float *inBase, UVec4Arg inOffsets)
119{
120#if defined(JPH_USE_SSE)
121 #ifdef JPH_USE_AVX2
122 return _mm_i32gather_ps(inBase, inOffsets.mValue, Scale);
123 #else
124 const uint8 *base = reinterpret_cast<const uint8 *>(inBase);
125 Type x = _mm_load_ss(reinterpret_cast<const float *>(base + inOffsets.GetX() * Scale));
126 Type y = _mm_load_ss(reinterpret_cast<const float *>(base + inOffsets.GetY() * Scale));
127 Type xy = _mm_unpacklo_ps(x, y);
128 Type z = _mm_load_ss(reinterpret_cast<const float *>(base + inOffsets.GetZ() * Scale));
129 Type w = _mm_load_ss(reinterpret_cast<const float *>(base + inOffsets.GetW() * Scale));
130 Type zw = _mm_unpacklo_ps(z, w);
131 return _mm_movelh_ps(xy, zw);
132 #endif
133#else
134 const uint8 *base = reinterpret_cast<const uint8 *>(inBase);
135 float x = *reinterpret_cast<const float *>(base + inOffsets.GetX() * Scale);
136 float y = *reinterpret_cast<const float *>(base + inOffsets.GetY() * Scale);
137 float z = *reinterpret_cast<const float *>(base + inOffsets.GetZ() * Scale);
138 float w = *reinterpret_cast<const float *>(base + inOffsets.GetW() * Scale);
139 return Vec4(x, y, z, w);
140#endif
141}
142
144{
145#if defined(JPH_USE_SSE)
146 return _mm_min_ps(inV1.mValue, inV2.mValue);
147#elif defined(JPH_USE_NEON)
148 return vminq_f32(inV1.mValue, inV2.mValue);
149#else
150 return Vec4(min(inV1.mF32[0], inV2.mF32[0]),
151 min(inV1.mF32[1], inV2.mF32[1]),
152 min(inV1.mF32[2], inV2.mF32[2]),
153 min(inV1.mF32[3], inV2.mF32[3]));
154#endif
155}
156
158{
159#if defined(JPH_USE_SSE)
160 return _mm_max_ps(inV1.mValue, inV2.mValue);
161#elif defined(JPH_USE_NEON)
162 return vmaxq_f32(inV1.mValue, inV2.mValue);
163#else
164 return Vec4(max(inV1.mF32[0], inV2.mF32[0]),
165 max(inV1.mF32[1], inV2.mF32[1]),
166 max(inV1.mF32[2], inV2.mF32[2]),
167 max(inV1.mF32[3], inV2.mF32[3]));
168#endif
169}
170
172{
173#if defined(JPH_USE_SSE)
174 return _mm_castps_si128(_mm_cmpeq_ps(inV1.mValue, inV2.mValue));
175#elif defined(JPH_USE_NEON)
176 return vceqq_f32(inV1.mValue, inV2.mValue);
177#else
178 return UVec4(inV1.mF32[0] == inV2.mF32[0]? 0xffffffffu : 0,
179 inV1.mF32[1] == inV2.mF32[1]? 0xffffffffu : 0,
180 inV1.mF32[2] == inV2.mF32[2]? 0xffffffffu : 0,
181 inV1.mF32[3] == inV2.mF32[3]? 0xffffffffu : 0);
182#endif
183}
184
186{
187#if defined(JPH_USE_SSE)
188 return _mm_castps_si128(_mm_cmplt_ps(inV1.mValue, inV2.mValue));
189#elif defined(JPH_USE_NEON)
190 return vcltq_f32(inV1.mValue, inV2.mValue);
191#else
192 return UVec4(inV1.mF32[0] < inV2.mF32[0]? 0xffffffffu : 0,
193 inV1.mF32[1] < inV2.mF32[1]? 0xffffffffu : 0,
194 inV1.mF32[2] < inV2.mF32[2]? 0xffffffffu : 0,
195 inV1.mF32[3] < inV2.mF32[3]? 0xffffffffu : 0);
196#endif
197}
198
200{
201#if defined(JPH_USE_SSE)
202 return _mm_castps_si128(_mm_cmple_ps(inV1.mValue, inV2.mValue));
203#elif defined(JPH_USE_NEON)
204 return vcleq_f32(inV1.mValue, inV2.mValue);
205#else
206 return UVec4(inV1.mF32[0] <= inV2.mF32[0]? 0xffffffffu : 0,
207 inV1.mF32[1] <= inV2.mF32[1]? 0xffffffffu : 0,
208 inV1.mF32[2] <= inV2.mF32[2]? 0xffffffffu : 0,
209 inV1.mF32[3] <= inV2.mF32[3]? 0xffffffffu : 0);
210#endif
211}
212
214{
215#if defined(JPH_USE_SSE)
216 return _mm_castps_si128(_mm_cmpgt_ps(inV1.mValue, inV2.mValue));
217#elif defined(JPH_USE_NEON)
218 return vcgtq_f32(inV1.mValue, inV2.mValue);
219#else
220 return UVec4(inV1.mF32[0] > inV2.mF32[0]? 0xffffffffu : 0,
221 inV1.mF32[1] > inV2.mF32[1]? 0xffffffffu : 0,
222 inV1.mF32[2] > inV2.mF32[2]? 0xffffffffu : 0,
223 inV1.mF32[3] > inV2.mF32[3]? 0xffffffffu : 0);
224#endif
225}
226
228{
229#if defined(JPH_USE_SSE)
230 return _mm_castps_si128(_mm_cmpge_ps(inV1.mValue, inV2.mValue));
231#elif defined(JPH_USE_NEON)
232 return vcgeq_f32(inV1.mValue, inV2.mValue);
233#else
234 return UVec4(inV1.mF32[0] >= inV2.mF32[0]? 0xffffffffu : 0,
235 inV1.mF32[1] >= inV2.mF32[1]? 0xffffffffu : 0,
236 inV1.mF32[2] >= inV2.mF32[2]? 0xffffffffu : 0,
237 inV1.mF32[3] >= inV2.mF32[3]? 0xffffffffu : 0);
238#endif
239}
240
242{
243#if defined(JPH_USE_SSE)
244 #ifdef JPH_USE_FMADD
245 return _mm_fmadd_ps(inMul1.mValue, inMul2.mValue, inAdd.mValue);
246 #else
247 return _mm_add_ps(_mm_mul_ps(inMul1.mValue, inMul2.mValue), inAdd.mValue);
248 #endif
249#elif defined(JPH_USE_NEON)
250 return vmlaq_f32(inAdd.mValue, inMul1.mValue, inMul2.mValue);
251#else
252 return Vec4(inMul1.mF32[0] * inMul2.mF32[0] + inAdd.mF32[0],
253 inMul1.mF32[1] * inMul2.mF32[1] + inAdd.mF32[1],
254 inMul1.mF32[2] * inMul2.mF32[2] + inAdd.mF32[2],
255 inMul1.mF32[3] * inMul2.mF32[3] + inAdd.mF32[3]);
256#endif
257}
258
259Vec4 Vec4::sSelect(Vec4Arg inNotSet, Vec4Arg inSet, UVec4Arg inControl)
260{
261#if defined(JPH_USE_SSE4_1) && !defined(JPH_PLATFORM_WASM) // _mm_blendv_ps has problems on FireFox
262 return _mm_blendv_ps(inNotSet.mValue, inSet.mValue, _mm_castsi128_ps(inControl.mValue));
263#elif defined(JPH_USE_SSE)
264 __m128 is_set = _mm_castsi128_ps(_mm_srai_epi32(inControl.mValue, 31));
265 return _mm_or_ps(_mm_and_ps(is_set, inSet.mValue), _mm_andnot_ps(is_set, inNotSet.mValue));
266#elif defined(JPH_USE_NEON)
267 return vbslq_f32(vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_u32(inControl.mValue), 31)), inSet.mValue, inNotSet.mValue);
268#else
269 Vec4 result;
270 for (int i = 0; i < 4; i++)
271 result.mF32[i] = (inControl.mU32[i] & 0x80000000u) ? inSet.mF32[i] : inNotSet.mF32[i];
272 return result;
273#endif
274}
275
277{
278#if defined(JPH_USE_SSE)
279 return _mm_or_ps(inV1.mValue, inV2.mValue);
280#elif defined(JPH_USE_NEON)
281 return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(inV1.mValue), vreinterpretq_u32_f32(inV2.mValue)));
282#else
284#endif
285}
286
288{
289#if defined(JPH_USE_SSE)
290 return _mm_xor_ps(inV1.mValue, inV2.mValue);
291#elif defined(JPH_USE_NEON)
292 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(inV1.mValue), vreinterpretq_u32_f32(inV2.mValue)));
293#else
295#endif
296}
297
299{
300#if defined(JPH_USE_SSE)
301 return _mm_and_ps(inV1.mValue, inV2.mValue);
302#elif defined(JPH_USE_NEON)
303 return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(inV1.mValue), vreinterpretq_u32_f32(inV2.mValue)));
304#else
306#endif
307}
308
309void Vec4::sSort4(Vec4 &ioValue, UVec4 &ioIndex)
310{
311 // Pass 1, test 1st vs 3rd, 2nd vs 4th
314 UVec4 c1 = sLess(ioValue, v1).Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_Z, SWIZZLE_W>();
315 ioValue = sSelect(ioValue, v1, c1);
316 ioIndex = UVec4::sSelect(ioIndex, i1, c1);
317
318 // Pass 2, test 1st vs 2nd, 3rd vs 4th
321 UVec4 c2 = sLess(ioValue, v2).Swizzle<SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_W, SWIZZLE_W>();
322 ioValue = sSelect(ioValue, v2, c2);
323 ioIndex = UVec4::sSelect(ioIndex, i2, c2);
324
325 // Pass 3, test 2nd vs 3rd component
328 UVec4 c3 = sLess(ioValue, v3).Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_Z, SWIZZLE_W>();
329 ioValue = sSelect(ioValue, v3, c3);
330 ioIndex = UVec4::sSelect(ioIndex, i3, c3);
331}
332
333void Vec4::sSort4Reverse(Vec4 &ioValue, UVec4 &ioIndex)
334{
335 // Pass 1, test 1st vs 3rd, 2nd vs 4th
339 ioValue = sSelect(ioValue, v1, c1);
340 ioIndex = UVec4::sSelect(ioIndex, i1, c1);
341
342 // Pass 2, test 1st vs 2nd, 3rd vs 4th
346 ioValue = sSelect(ioValue, v2, c2);
347 ioIndex = UVec4::sSelect(ioIndex, i2, c2);
348
349 // Pass 3, test 2nd vs 3rd component
353 ioValue = sSelect(ioValue, v3, c3);
354 ioIndex = UVec4::sSelect(ioIndex, i3, c3);
355}
356
358{
359 return sEquals(*this, inV2).TestAllTrue();
360}
361
362bool Vec4::IsClose(Vec4Arg inV2, float inMaxDistSq) const
363{
364 return (inV2 - *this).LengthSq() <= inMaxDistSq;
365}
366
367bool Vec4::IsNearZero(float inMaxDistSq) const
368{
369 return LengthSq() <= inMaxDistSq;
370}
371
372bool Vec4::IsNormalized(float inTolerance) const
373{
374 return abs(LengthSq() - 1.0f) <= inTolerance;
375}
376
377bool Vec4::IsNaN() const
378{
379#if defined(JPH_USE_AVX512)
380 return _mm_fpclass_ps_mask(mValue, 0b10000001) != 0;
381#elif defined(JPH_USE_SSE)
382 return _mm_movemask_ps(_mm_cmpunord_ps(mValue, mValue)) != 0;
383#elif defined(JPH_USE_NEON)
384 uint32x4_t is_equal = vceqq_f32(mValue, mValue); // If a number is not equal to itself it's a NaN
385 return vaddvq_u32(vshrq_n_u32(is_equal, 31)) != 4;
386#else
387 return isnan(mF32[0]) || isnan(mF32[1]) || isnan(mF32[2]) || isnan(mF32[3]);
388#endif
389}
390
392{
393#if defined(JPH_USE_SSE)
394 return _mm_mul_ps(mValue, inV2.mValue);
395#elif defined(JPH_USE_NEON)
396 return vmulq_f32(mValue, inV2.mValue);
397#else
398 return Vec4(mF32[0] * inV2.mF32[0],
399 mF32[1] * inV2.mF32[1],
400 mF32[2] * inV2.mF32[2],
401 mF32[3] * inV2.mF32[3]);
402#endif
403}
404
405Vec4 Vec4::operator * (float inV2) const
406{
407#if defined(JPH_USE_SSE)
408 return _mm_mul_ps(mValue, _mm_set1_ps(inV2));
409#elif defined(JPH_USE_NEON)
410 return vmulq_n_f32(mValue, inV2);
411#else
412 return Vec4(mF32[0] * inV2, mF32[1] * inV2, mF32[2] * inV2, mF32[3] * inV2);
413#endif
414}
415
417Vec4 operator * (float inV1, Vec4Arg inV2)
418{
419#if defined(JPH_USE_SSE)
420 return _mm_mul_ps(_mm_set1_ps(inV1), inV2.mValue);
421#elif defined(JPH_USE_NEON)
422 return vmulq_n_f32(inV2.mValue, inV1);
423#else
424 return Vec4(inV1 * inV2.mF32[0],
425 inV1 * inV2.mF32[1],
426 inV1 * inV2.mF32[2],
427 inV1 * inV2.mF32[3]);
428#endif
429}
430
431Vec4 Vec4::operator / (float inV2) const
432{
433#if defined(JPH_USE_SSE)
434 return _mm_div_ps(mValue, _mm_set1_ps(inV2));
435#elif defined(JPH_USE_NEON)
436 return vdivq_f32(mValue, vdupq_n_f32(inV2));
437#else
438 return Vec4(mF32[0] / inV2, mF32[1] / inV2, mF32[2] / inV2, mF32[3] / inV2);
439#endif
440}
441
443{
444#if defined(JPH_USE_SSE)
445 mValue = _mm_mul_ps(mValue, _mm_set1_ps(inV2));
446#elif defined(JPH_USE_NEON)
447 mValue = vmulq_n_f32(mValue, inV2);
448#else
449 for (int i = 0; i < 4; ++i)
450 mF32[i] *= inV2;
451#endif
452 return *this;
453}
454
456{
457#if defined(JPH_USE_SSE)
458 mValue = _mm_mul_ps(mValue, inV2.mValue);
459#elif defined(JPH_USE_NEON)
460 mValue = vmulq_f32(mValue, inV2.mValue);
461#else
462 for (int i = 0; i < 4; ++i)
463 mF32[i] *= inV2.mF32[i];
464#endif
465 return *this;
466}
467
469{
470#if defined(JPH_USE_SSE)
471 mValue = _mm_div_ps(mValue, _mm_set1_ps(inV2));
472#elif defined(JPH_USE_NEON)
473 mValue = vdivq_f32(mValue, vdupq_n_f32(inV2));
474#else
475 for (int i = 0; i < 4; ++i)
476 mF32[i] /= inV2;
477#endif
478 return *this;
479}
480
482{
483#if defined(JPH_USE_SSE)
484 return _mm_add_ps(mValue, inV2.mValue);
485#elif defined(JPH_USE_NEON)
486 return vaddq_f32(mValue, inV2.mValue);
487#else
488 return Vec4(mF32[0] + inV2.mF32[0],
489 mF32[1] + inV2.mF32[1],
490 mF32[2] + inV2.mF32[2],
491 mF32[3] + inV2.mF32[3]);
492#endif
493}
494
496{
497#if defined(JPH_USE_SSE)
498 mValue = _mm_add_ps(mValue, inV2.mValue);
499#elif defined(JPH_USE_NEON)
500 mValue = vaddq_f32(mValue, inV2.mValue);
501#else
502 for (int i = 0; i < 4; ++i)
503 mF32[i] += inV2.mF32[i];
504#endif
505 return *this;
506}
507
509{
510#if defined(JPH_USE_SSE)
511 return _mm_sub_ps(_mm_setzero_ps(), mValue);
512#elif defined(JPH_USE_NEON)
513 #ifdef JPH_CROSS_PLATFORM_DETERMINISTIC
514 return vsubq_f32(vdupq_n_f32(0), mValue);
515 #else
516 return vnegq_f32(mValue);
517 #endif
518#else
519 #ifdef JPH_CROSS_PLATFORM_DETERMINISTIC
520 return Vec4(0.0f - mF32[0], 0.0f - mF32[1], 0.0f - mF32[2], 0.0f - mF32[3]);
521 #else
522 return Vec4(-mF32[0], -mF32[1], -mF32[2], -mF32[3]);
523 #endif
524#endif
525}
526
528{
529#if defined(JPH_USE_SSE)
530 return _mm_sub_ps(mValue, inV2.mValue);
531#elif defined(JPH_USE_NEON)
532 return vsubq_f32(mValue, inV2.mValue);
533#else
534 return Vec4(mF32[0] - inV2.mF32[0],
535 mF32[1] - inV2.mF32[1],
536 mF32[2] - inV2.mF32[2],
537 mF32[3] - inV2.mF32[3]);
538#endif
539}
540
542{
543#if defined(JPH_USE_SSE)
544 mValue = _mm_sub_ps(mValue, inV2.mValue);
545#elif defined(JPH_USE_NEON)
546 mValue = vsubq_f32(mValue, inV2.mValue);
547#else
548 for (int i = 0; i < 4; ++i)
549 mF32[i] -= inV2.mF32[i];
550#endif
551 return *this;
552}
553
555{
556#if defined(JPH_USE_SSE)
557 return _mm_div_ps(mValue, inV2.mValue);
558#elif defined(JPH_USE_NEON)
559 return vdivq_f32(mValue, inV2.mValue);
560#else
561 return Vec4(mF32[0] / inV2.mF32[0],
562 mF32[1] / inV2.mF32[1],
563 mF32[2] / inV2.mF32[2],
564 mF32[3] / inV2.mF32[3]);
565#endif
566}
567
569{
570#if defined(JPH_USE_SSE)
571 return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(0, 0, 0, 0));
572#elif defined(JPH_USE_NEON)
573 return vdupq_laneq_f32(mValue, 0);
574#else
575 return Vec4(mF32[0], mF32[0], mF32[0], mF32[0]);
576#endif
577}
578
580{
581#if defined(JPH_USE_SSE)
582 return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(1, 1, 1, 1));
583#elif defined(JPH_USE_NEON)
584 return vdupq_laneq_f32(mValue, 1);
585#else
586 return Vec4(mF32[1], mF32[1], mF32[1], mF32[1]);
587#endif
588}
589
591{
592#if defined(JPH_USE_SSE)
593 return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(2, 2, 2, 2));
594#elif defined(JPH_USE_NEON)
595 return vdupq_laneq_f32(mValue, 2);
596#else
597 return Vec4(mF32[2], mF32[2], mF32[2], mF32[2]);
598#endif
599}
600
602{
603#if defined(JPH_USE_SSE)
604 return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(3, 3, 3, 3));
605#elif defined(JPH_USE_NEON)
606 return vdupq_laneq_f32(mValue, 3);
607#else
608 return Vec4(mF32[3], mF32[3], mF32[3], mF32[3]);
609#endif
610}
611
613{
614#if defined(JPH_USE_AVX512)
615 return _mm_range_ps(mValue, mValue, 0b1000);
616#elif defined(JPH_USE_SSE)
617 return _mm_max_ps(_mm_sub_ps(_mm_setzero_ps(), mValue), mValue);
618#elif defined(JPH_USE_NEON)
619 return vabsq_f32(mValue);
620#else
621 return Vec4(abs(mF32[0]), abs(mF32[1]), abs(mF32[2]), abs(mF32[3]));
622#endif
623}
624
626{
627 return sOne() / mValue;
628}
629
631{
632#if defined(JPH_USE_SSE4_1)
633 return _mm_dp_ps(mValue, inV2.mValue, 0xff);
634#elif defined(JPH_USE_NEON)
635 float32x4_t mul = vmulq_f32(mValue, inV2.mValue);
636 return vdupq_n_f32(vaddvq_f32(mul));
637#else
638 // Brackets placed so that the order is consistent with the vectorized version
639 return Vec4::sReplicate((mF32[0] * inV2.mF32[0] + mF32[1] * inV2.mF32[1]) + (mF32[2] * inV2.mF32[2] + mF32[3] * inV2.mF32[3]));
640#endif
641}
642
643float Vec4::Dot(Vec4Arg inV2) const
644{
645#if defined(JPH_USE_SSE4_1)
646 return _mm_cvtss_f32(_mm_dp_ps(mValue, inV2.mValue, 0xff));
647#elif defined(JPH_USE_NEON)
648 float32x4_t mul = vmulq_f32(mValue, inV2.mValue);
649 return vaddvq_f32(mul);
650#else
651 // Brackets placed so that the order is consistent with the vectorized version
652 return (mF32[0] * inV2.mF32[0] + mF32[1] * inV2.mF32[1]) + (mF32[2] * inV2.mF32[2] + mF32[3] * inV2.mF32[3]);
653#endif
654}
655
656float Vec4::LengthSq() const
657{
658#if defined(JPH_USE_SSE4_1)
659 return _mm_cvtss_f32(_mm_dp_ps(mValue, mValue, 0xff));
660#elif defined(JPH_USE_NEON)
661 float32x4_t mul = vmulq_f32(mValue, mValue);
662 return vaddvq_f32(mul);
663#else
664 // Brackets placed so that the order is consistent with the vectorized version
665 return (mF32[0] * mF32[0] + mF32[1] * mF32[1]) + (mF32[2] * mF32[2] + mF32[3] * mF32[3]);
666#endif
667}
668
669float Vec4::Length() const
670{
671#if defined(JPH_USE_SSE4_1)
672 return _mm_cvtss_f32(_mm_sqrt_ss(_mm_dp_ps(mValue, mValue, 0xff)));
673#elif defined(JPH_USE_NEON)
674 float32x4_t mul = vmulq_f32(mValue, mValue);
675 float32x2_t sum = vdup_n_f32(vaddvq_f32(mul));
676 return vget_lane_f32(vsqrt_f32(sum), 0);
677#else
678 // Brackets placed so that the order is consistent with the vectorized version
679 return sqrt((mF32[0] * mF32[0] + mF32[1] * mF32[1]) + (mF32[2] * mF32[2] + mF32[3] * mF32[3]));
680#endif
681}
682
684{
685#if defined(JPH_USE_SSE)
686 return _mm_sqrt_ps(mValue);
687#elif defined(JPH_USE_NEON)
688 return vsqrtq_f32(mValue);
689#else
690 return Vec4(sqrt(mF32[0]), sqrt(mF32[1]), sqrt(mF32[2]), sqrt(mF32[3]));
691#endif
692}
693
694
696{
697#if defined(JPH_USE_AVX512)
698 return _mm_fixupimm_ps(mValue, mValue, _mm_set1_epi32(0xA9A90A00), 0);
699#elif defined(JPH_USE_SSE)
700 Type minus_one = _mm_set1_ps(-1.0f);
701 Type one = _mm_set1_ps(1.0f);
702 return _mm_or_ps(_mm_and_ps(mValue, minus_one), one);
703#elif defined(JPH_USE_NEON)
704 Type minus_one = vdupq_n_f32(-1.0f);
705 Type one = vdupq_n_f32(1.0f);
706 return vreinterpretq_f32_u32(vorrq_u32(vandq_u32(vreinterpretq_u32_f32(mValue), vreinterpretq_u32_f32(minus_one)), vreinterpretq_u32_f32(one)));
707#else
708 return Vec4(std::signbit(mF32[0])? -1.0f : 1.0f,
709 std::signbit(mF32[1])? -1.0f : 1.0f,
710 std::signbit(mF32[2])? -1.0f : 1.0f,
711 std::signbit(mF32[3])? -1.0f : 1.0f);
712#endif
713}
714
716{
717#if defined(JPH_USE_SSE4_1)
718 return _mm_div_ps(mValue, _mm_sqrt_ps(_mm_dp_ps(mValue, mValue, 0xff)));
719#elif defined(JPH_USE_NEON)
720 float32x4_t mul = vmulq_f32(mValue, mValue);
721 float32x4_t sum = vdupq_n_f32(vaddvq_f32(mul));
722 return vdivq_f32(mValue, vsqrtq_f32(sum));
723#else
724 return *this / Length();
725#endif
726}
727
728void Vec4::StoreFloat4(Float4 *outV) const
729{
730#if defined(JPH_USE_SSE)
731 _mm_storeu_ps(&outV->x, mValue);
732#elif defined(JPH_USE_NEON)
733 vst1q_f32(&outV->x, mValue);
734#else
735 for (int i = 0; i < 4; ++i)
736 (&outV->x)[i] = mF32[i];
737#endif
738}
739
741{
742#if defined(JPH_USE_SSE)
743 return _mm_cvttps_epi32(mValue);
744#elif defined(JPH_USE_NEON)
745 return vcvtq_u32_f32(mValue);
746#else
747 return UVec4(uint32(mF32[0]), uint32(mF32[1]), uint32(mF32[2]), uint32(mF32[3]));
748#endif
749}
750
752{
753#if defined(JPH_USE_SSE)
754 return UVec4(_mm_castps_si128(mValue));
755#elif defined(JPH_USE_NEON)
756 return vreinterpretq_u32_f32(mValue);
757#else
758 return *reinterpret_cast<const UVec4 *>(this);
759#endif
760}
761
763{
764#if defined(JPH_USE_SSE)
765 return _mm_movemask_ps(mValue);
766#elif defined(JPH_USE_NEON)
767 int32x4_t shift = JPH_NEON_INT32x4(0, 1, 2, 3);
768 return vaddvq_u32(vshlq_u32(vshrq_n_u32(vreinterpretq_u32_f32(mValue), 31), shift));
769#else
770 return (std::signbit(mF32[0])? 1 : 0) | (std::signbit(mF32[1])? 2 : 0) | (std::signbit(mF32[2])? 4 : 0) | (std::signbit(mF32[3])? 8 : 0);
771#endif
772}
773
780
787
788void Vec4::SinCos(Vec4 &outSin, Vec4 &outCos) const
789{
790 // Implementation based on sinf.c from the cephes library, combines sinf and cosf in a single function, changes octants to quadrants and vectorizes it
791 // Original implementation by Stephen L. Moshier (See: http://www.moshier.net/)
792
793 // Make argument positive and remember sign for sin only since cos is symmetric around x (highest bit of a float is the sign bit)
794 UVec4 sin_sign = UVec4::sAnd(ReinterpretAsInt(), UVec4::sReplicate(0x80000000U));
795 Vec4 x = Vec4::sXor(*this, sin_sign.ReinterpretAsFloat());
796
797 // x / (PI / 2) rounded to nearest int gives us the quadrant closest to x
798 UVec4 quadrant = (0.6366197723675814f * x + Vec4::sReplicate(0.5f)).ToInt();
799
800 // Make x relative to the closest quadrant.
801 // This does x = x - quadrant * PI / 2 using a two step Cody-Waite argument reduction.
802 // This improves the accuracy of the result by avoiding loss of significant bits in the subtraction.
803 // We start with x = x - quadrant * PI / 2, PI / 2 in hexadecimal notation is 0x3fc90fdb, we remove the lowest 16 bits to
804 // get 0x3fc90000 (= 1.5703125) this means we can now multiply with a number of up to 2^16 without losing any bits.
805 // This leaves us with: x = (x - quadrant * 1.5703125) - quadrant * (PI / 2 - 1.5703125).
806 // PI / 2 - 1.5703125 in hexadecimal is 0x39fdaa22, stripping the lowest 12 bits we get 0x39fda000 (= 0.0004837512969970703125)
807 // This leaves uw with: x = ((x - quadrant * 1.5703125) - quadrant * 0.0004837512969970703125) - quadrant * (PI / 2 - 1.5703125 - 0.0004837512969970703125)
808 // See: https://stackoverflow.com/questions/42455143/sine-cosine-modular-extended-precision-arithmetic
809 // After this we have x in the range [-PI / 4, PI / 4].
810 Vec4 float_quadrant = quadrant.ToFloat();
811 x = ((x - float_quadrant * 1.5703125f) - float_quadrant * 0.0004837512969970703125f) - float_quadrant * 7.549789948768648e-8f;
812
813 // Calculate x2 = x^2
814 Vec4 x2 = x * x;
815
816 // Taylor expansion:
817 // Cos(x) = 1 - x^2/2! + x^4/4! - x^6/6! + x^8/8! + ... = (((x2/8!- 1/6!) * x2 + 1/4!) * x2 - 1/2!) * x2 + 1
818 Vec4 taylor_cos = ((2.443315711809948e-5f * x2 - Vec4::sReplicate(1.388731625493765e-3f)) * x2 + Vec4::sReplicate(4.166664568298827e-2f)) * x2 * x2 - 0.5f * x2 + Vec4::sOne();
819 // Sin(x) = x - x^3/3! + x^5/5! - x^7/7! + ... = ((-x2/7! + 1/5!) * x2 - 1/3!) * x2 * x + x
820 Vec4 taylor_sin = ((-1.9515295891e-4f * x2 + Vec4::sReplicate(8.3321608736e-3f)) * x2 - Vec4::sReplicate(1.6666654611e-1f)) * x2 * x + x;
821
822 // The lowest 2 bits of quadrant indicate the quadrant that we are in.
823 // Let x be the original input value and x' our value that has been mapped to the range [-PI / 4, PI / 4].
824 // since cos(x) = sin(x - PI / 2) and since we want to use the Taylor expansion as close as possible to 0,
825 // we can alternate between using the Taylor expansion for sin and cos according to the following table:
826 //
827 // quadrant sin(x) cos(x)
828 // XXX00b sin(x') cos(x')
829 // XXX01b cos(x') -sin(x')
830 // XXX10b -sin(x') -cos(x')
831 // XXX11b -cos(x') sin(x')
832 //
833 // So: sin_sign = bit2, cos_sign = bit1 ^ bit2, bit1 determines if we use sin or cos Taylor expansion
834 UVec4 bit1 = quadrant.LogicalShiftLeft<31>();
835 UVec4 bit2 = UVec4::sAnd(quadrant.LogicalShiftLeft<30>(), UVec4::sReplicate(0x80000000U));
836
837 // Select which one of the results is sin and which one is cos
838 Vec4 s = Vec4::sSelect(taylor_sin, taylor_cos, bit1);
839 Vec4 c = Vec4::sSelect(taylor_cos, taylor_sin, bit1);
840
841 // Update the signs
842 sin_sign = UVec4::sXor(sin_sign, bit2);
843 UVec4 cos_sign = UVec4::sXor(bit1, bit2);
844
845 // Correct the signs
846 outSin = Vec4::sXor(s, sin_sign.ReinterpretAsFloat());
847 outCos = Vec4::sXor(c, cos_sign.ReinterpretAsFloat());
848}
849
851{
852 // Implementation based on tanf.c from the cephes library, see Vec4::SinCos for further details
853 // Original implementation by Stephen L. Moshier (See: http://www.moshier.net/)
854
855 // Make argument positive
856 UVec4 tan_sign = UVec4::sAnd(ReinterpretAsInt(), UVec4::sReplicate(0x80000000U));
857 Vec4 x = Vec4::sXor(*this, tan_sign.ReinterpretAsFloat());
858
859 // x / (PI / 2) rounded to nearest int gives us the quadrant closest to x
860 UVec4 quadrant = (0.6366197723675814f * x + Vec4::sReplicate(0.5f)).ToInt();
861
862 // Remap x to range [-PI / 4, PI / 4], see Vec4::SinCos
863 Vec4 float_quadrant = quadrant.ToFloat();
864 x = ((x - float_quadrant * 1.5703125f) - float_quadrant * 0.0004837512969970703125f) - float_quadrant * 7.549789948768648e-8f;
865
866 // Calculate x2 = x^2
867 Vec4 x2 = x * x;
868
869 // Roughly equivalent to the Taylor expansion:
870 // Tan(x) = x + x^3/3 + 2*x^5/15 + 17*x^7/315 + 62*x^9/2835 + ...
871 Vec4 tan =
872 (((((9.38540185543e-3f * x2 + Vec4::sReplicate(3.11992232697e-3f)) * x2 + Vec4::sReplicate(2.44301354525e-2f)) * x2
873 + Vec4::sReplicate(5.34112807005e-2f)) * x2 + Vec4::sReplicate(1.33387994085e-1f)) * x2 + Vec4::sReplicate(3.33331568548e-1f)) * x2 * x + x;
874
875 // For the 2nd and 4th quadrant we need to invert the value
876 UVec4 bit1 = quadrant.LogicalShiftLeft<31>();
877 tan = Vec4::sSelect(tan, Vec4::sReplicate(-1.0f) / (tan JPH_IF_FLOATING_POINT_EXCEPTIONS_ENABLED(+ Vec4::sReplicate(FLT_MIN))), bit1); // Add small epsilon to prevent div by zero, works because tan is always positive
878
879 // Put the sign back
880 return Vec4::sXor(tan, tan_sign.ReinterpretAsFloat());
881}
882
884{
885 // Implementation based on asinf.c from the cephes library
886 // Original implementation by Stephen L. Moshier (See: http://www.moshier.net/)
887
888 // Make argument positive
889 UVec4 asin_sign = UVec4::sAnd(ReinterpretAsInt(), UVec4::sReplicate(0x80000000U));
890 Vec4 a = Vec4::sXor(*this, asin_sign.ReinterpretAsFloat());
891
892 // ASin is not defined outside the range [-1, 1] but it often happens that a value is slightly above 1 so we just clamp here
893 a = Vec4::sMin(a, Vec4::sOne());
894
895 // When |x| <= 0.5 we use the asin approximation as is
896 Vec4 z1 = a * a;
897 Vec4 x1 = a;
898
899 // When |x| > 0.5 we use the identity asin(x) = PI / 2 - 2 * asin(sqrt((1 - x) / 2))
900 Vec4 z2 = 0.5f * (Vec4::sOne() - a);
901 Vec4 x2 = z2.Sqrt();
902
903 // Select which of the two situations we have
904 UVec4 greater = Vec4::sGreater(a, Vec4::sReplicate(0.5f));
905 Vec4 z = Vec4::sSelect(z1, z2, greater);
906 Vec4 x = Vec4::sSelect(x1, x2, greater);
907
908 // Polynomial approximation of asin
909 z = ((((4.2163199048e-2f * z + Vec4::sReplicate(2.4181311049e-2f)) * z + Vec4::sReplicate(4.5470025998e-2f)) * z + Vec4::sReplicate(7.4953002686e-2f)) * z + Vec4::sReplicate(1.6666752422e-1f)) * z * x + x;
910
911 // If |x| > 0.5 we need to apply the remainder of the identity above
912 z = Vec4::sSelect(z, Vec4::sReplicate(0.5f * JPH_PI) - (z + z), greater);
913
914 // Put the sign back
915 return Vec4::sXor(z, asin_sign.ReinterpretAsFloat());
916}
917
919{
920 // Not the most accurate, but simple
921 return Vec4::sReplicate(0.5f * JPH_PI) - ASin();
922}
923
925{
926 // Implementation based on atanf.c from the cephes library
927 // Original implementation by Stephen L. Moshier (See: http://www.moshier.net/)
928
929 // Make argument positive
930 UVec4 atan_sign = UVec4::sAnd(ReinterpretAsInt(), UVec4::sReplicate(0x80000000U));
931 Vec4 x = Vec4::sXor(*this, atan_sign.ReinterpretAsFloat());
932 Vec4 y = Vec4::sZero();
933
934 // If x > Tan(PI / 8)
935 UVec4 greater1 = Vec4::sGreater(x, Vec4::sReplicate(0.4142135623730950f));
936 Vec4 x1 = (x - Vec4::sOne()) / (x + Vec4::sOne());
937
938 // If x > Tan(3 * PI / 8)
939 UVec4 greater2 = Vec4::sGreater(x, Vec4::sReplicate(2.414213562373095f));
940 Vec4 x2 = Vec4::sReplicate(-1.0f) / (x JPH_IF_FLOATING_POINT_EXCEPTIONS_ENABLED(+ Vec4::sReplicate(FLT_MIN))); // Add small epsilon to prevent div by zero, works because x is always positive
941
942 // Apply first if
943 x = Vec4::sSelect(x, x1, greater1);
944 y = Vec4::sSelect(y, Vec4::sReplicate(0.25f * JPH_PI), greater1);
945
946 // Apply second if
947 x = Vec4::sSelect(x, x2, greater2);
948 y = Vec4::sSelect(y, Vec4::sReplicate(0.5f * JPH_PI), greater2);
949
950 // Polynomial approximation
951 Vec4 z = x * x;
952 y += (((8.05374449538e-2f * z - Vec4::sReplicate(1.38776856032e-1f)) * z + Vec4::sReplicate(1.99777106478e-1f)) * z - Vec4::sReplicate(3.33329491539e-1f)) * z * x + x;
953
954 // Put the sign back
955 return Vec4::sXor(y, atan_sign.ReinterpretAsFloat());
956}
957
959{
960 UVec4 sign_mask = UVec4::sReplicate(0x80000000U);
961
962 // Determine absolute value and sign of y
963 UVec4 y_sign = UVec4::sAnd(inY.ReinterpretAsInt(), sign_mask);
964 Vec4 y_abs = Vec4::sXor(inY, y_sign.ReinterpretAsFloat());
965
966 // Determine absolute value and sign of x
967 UVec4 x_sign = UVec4::sAnd(inX.ReinterpretAsInt(), sign_mask);
968 Vec4 x_abs = Vec4::sXor(inX, x_sign.ReinterpretAsFloat());
969
970 // Always divide smallest / largest to avoid dividing by zero
971 UVec4 x_is_numerator = Vec4::sLess(x_abs, y_abs);
972 Vec4 numerator = Vec4::sSelect(y_abs, x_abs, x_is_numerator);
973 Vec4 denominator = Vec4::sSelect(x_abs, y_abs, x_is_numerator);
974 Vec4 atan = (numerator / denominator).ATan();
975
976 // If we calculated x / y instead of y / x the result is PI / 2 - result (note that this is true because we know the result is positive because the input was positive)
977 atan = Vec4::sSelect(atan, Vec4::sReplicate(0.5f * JPH_PI) - atan, x_is_numerator);
978
979 // Now we need to map to the correct quadrant
980 // x_sign y_sign result
981 // +1 +1 atan
982 // -1 +1 -atan + PI
983 // -1 -1 atan - PI
984 // +1 -1 -atan
985 // This can be written as: x_sign * y_sign * (atan - (x_sign < 0? PI : 0))
987 atan = Vec4::sXor(atan, UVec4::sXor(x_sign, y_sign).ReinterpretAsFloat());
988 return atan;
989}
990
std::uint8_t uint8
Definition Core.h:488
std::uint64_t uint64
Definition Core.h:491
#define JPH_NAMESPACE_END
Definition Core.h:419
std::uint32_t uint32
Definition Core.h:490
#define JPH_IF_FLOATING_POINT_EXCEPTIONS_ENABLED(...)
Definition Core.h:555
#define JPH_NAMESPACE_BEGIN
Definition Core.h:413
JPH_INLINE To BitCast(const From &inValue)
Definition Math.h:192
@ SWIZZLE_Z
Use the Z component.
Definition Swizzle.h:14
@ SWIZZLE_W
Use the W component.
Definition Swizzle.h:15
@ SWIZZLE_X
Use the X component.
Definition Swizzle.h:12
@ SWIZZLE_UNUSED
We always use the Z component when we don't specifically want to initialize a value,...
Definition Swizzle.h:16
@ SWIZZLE_Y
Use the Y component.
Definition Swizzle.h:13
Vec4 operator*(float inV1, Vec4Arg inV2)
Multiply vector with float.
Definition Vec4.inl:417
Class that holds 4 float values. Convert to Vec4 to perform calculations.
Definition Float4.h:11
float x
Definition Float4.h:25
float y
Definition Float4.h:26
float z
Definition Float4.h:27
float w
Definition Float4.h:28
Definition UVec4.h:12
JPH_INLINE UVec4 Swizzle() const
Swizzle the elements in inV.
JPH_INLINE uint32 GetZ() const
Definition UVec4.h:104
JPH_INLINE UVec4 LogicalShiftLeft() const
Shift all components by Count bits to the left (filling with zeros from the left)
static JPH_INLINE UVec4 sSelect(UVec4Arg inNotSet, UVec4Arg inSet, UVec4Arg inControl)
Component wise select, returns inNotSet when highest bit of inControl = 0 and inSet when highest bit ...
Definition UVec4.inl:157
JPH_INLINE uint32 GetY() const
Definition UVec4.h:103
static JPH_INLINE UVec4 sReplicate(uint32 inV)
Replicate int inV across all components.
Definition UVec4.inl:56
JPH_INLINE bool TestAllTrue() const
Test if all components are true (true is when highest bit of component is set)
Definition UVec4.inl:408
static JPH_INLINE UVec4 sAnd(UVec4Arg inV1, UVec4Arg inV2)
Logical and (component wise)
Definition UVec4.inl:202
static JPH_INLINE UVec4 sOr(UVec4Arg inV1, UVec4Arg inV2)
Logical or (component wise)
Definition UVec4.inl:174
JPH_INLINE uint32 GetW() const
Definition UVec4.h:105
Type mValue
Definition UVec4.h:211
JPH_INLINE uint32 GetX() const
Get individual components.
Definition UVec4.h:102
static JPH_INLINE UVec4 sXor(UVec4Arg inV1, UVec4Arg inV2)
Logical xor (component wise)
Definition UVec4.inl:188
JPH_INLINE UVec4 ArithmeticShiftRight() const
Shift all components by Count bits to the right (shifting in the value of the highest bit)
JPH_INLINE Vec4 ToFloat() const
Convert each component from an int to a float.
Definition UVec4.inl:329
JPH_INLINE Vec4 ReinterpretAsFloat() const
Reinterpret UVec4 as a Vec4 (doesn't change the bits)
Definition UVec4.inl:340
uint32 mU32[4]
Definition UVec4.h:212
Definition Vec3.h:17
Type mValue
Definition Vec3.h:289
float mF32[4]
Definition Vec3.h:290
Definition Vec4.h:14
JPH_INLINE bool IsNearZero(float inMaxDistSq=1.0e-12f) const
Test if vector is near zero.
Definition Vec4.inl:367
JPH_INLINE Vec4 SplatX() const
Replicate the X component to all components.
Definition Vec4.inl:568
static JPH_INLINE void sSort4(Vec4 &ioValue, UVec4 &ioIndex)
Definition Vec4.inl:309
Vec4 ATan() const
Calculate the arc tangent for each element of this vector (returns value in the range [-PI / 2,...
Definition Vec4.inl:924
static JPH_INLINE UVec4 sGreater(Vec4Arg inV1, Vec4Arg inV2)
Greater than (component wise)
Definition Vec4.inl:213
float mF32[4]
Definition Vec4.h:281
JPH_INLINE Vec4 operator-() const
Negate.
Definition Vec4.inl:508
Vec4()=default
Constructor.
static JPH_INLINE Vec4 sAnd(Vec4Arg inV1, Vec4Arg inV2)
Logical and (component wise)
Definition Vec4.inl:298
static JPH_INLINE Vec4 sLoadFloat4Aligned(const Float4 *inV)
Load 4 floats from memory, 16 bytes aligned.
Definition Vec4.inl:106
static Vec4 sATan2(Vec4Arg inY, Vec4Arg inX)
Calculate the arc tangent of y / x using the signs of the arguments to determine the correct quadrant...
Definition Vec4.inl:958
JPH_INLINE Vec4 GetSign() const
Get vector that contains the sign of each element (returns 1.0f if positive, -1.0f if negative)
Definition Vec4.inl:695
Vec4 ASin() const
Definition Vec4.inl:883
static JPH_INLINE Vec4 sXor(Vec4Arg inV1, Vec4Arg inV2)
Logical xor (component wise)
Definition Vec4.inl:287
JPH_INLINE Vec4 Abs() const
Return the absolute value of each of the components.
Definition Vec4.inl:612
JPH_INLINE Vec4 operator/(float inV2) const
Divide vector by float.
Definition Vec4.inl:431
Vec4 Tan() const
Calculate the tangent for each element of this vector (input in radians)
Definition Vec4.inl:850
JPH_INLINE UVec4 ToInt() const
Convert each component from a float to an int.
Definition Vec4.inl:740
JPH_INLINE Vec4 & operator+=(Vec4Arg inV2)
Add two float vectors (component wise)
Definition Vec4.inl:495
static JPH_INLINE UVec4 sLessOrEqual(Vec4Arg inV1, Vec4Arg inV2)
Less than or equal (component wise)
Definition Vec4.inl:199
static JPH_INLINE UVec4 sLess(Vec4Arg inV1, Vec4Arg inV2)
Less than (component wise)
Definition Vec4.inl:185
JPH_INLINE float Length() const
Length of vector.
Definition Vec4.inl:669
static JPH_INLINE void sSort4Reverse(Vec4 &ioValue, UVec4 &ioIndex)
Definition Vec4.inl:333
static JPH_INLINE Vec4 sOne()
Vector with all ones.
Definition Vec4.inl:85
static JPH_INLINE Vec4 sFusedMultiplyAdd(Vec4Arg inMul1, Vec4Arg inMul2, Vec4Arg inAdd)
Calculates inMul1 * inMul2 + inAdd.
Definition Vec4.inl:241
JPH_INLINE Vec4 Normalized() const
Normalize vector.
Definition Vec4.inl:715
static JPH_INLINE UVec4 sEquals(Vec4Arg inV1, Vec4Arg inV2)
Equals (component wise)
Definition Vec4.inl:171
JPH_INLINE float ReduceMax() const
Get the maximum of X, Y, Z and W.
Definition Vec4.inl:781
JPH_INLINE Vec4 Reciprocal() const
Reciprocal vector (1 / value) for each of the components.
Definition Vec4.inl:625
JPH_INLINE Vec4 SplatY() const
Replicate the Y component to all components.
Definition Vec4.inl:579
JPH_INLINE UVec4 ReinterpretAsInt() const
Reinterpret Vec4 as a UVec4 (doesn't change the bits)
Definition Vec4.inl:751
static JPH_INLINE UVec4 sGreaterOrEqual(Vec4Arg inV1, Vec4Arg inV2)
Greater than or equal (component wise)
Definition Vec4.inl:227
static JPH_INLINE Vec4 sMin(Vec4Arg inV1, Vec4Arg inV2)
Return the minimum value of each of the components.
Definition Vec4.inl:143
JPH_INLINE Vec4 SplatZ() const
Replicate the Z component to all components.
Definition Vec4.inl:590
JPH_INLINE Vec4 Sqrt() const
Component wise square root.
Definition Vec4.inl:683
JPH_INLINE Vec4 & operator*=(float inV2)
Multiply vector with float.
Definition Vec4.inl:442
static JPH_INLINE Vec4 sGatherFloat4(const float *inBase, UVec4Arg inOffsets)
Gather 4 floats from memory at inBase + inOffsets[i] * Scale.
JPH_INLINE Vec4 operator+(Vec4Arg inV2) const
Add two float vectors (component wise)
Definition Vec4.inl:481
JPH_INLINE Vec4 & operator/=(float inV2)
Divide vector by float.
Definition Vec4.inl:468
JPH_INLINE bool IsNormalized(float inTolerance=1.0e-6f) const
Test if vector is normalized.
Definition Vec4.inl:372
JPH_INLINE bool operator==(Vec4Arg inV2) const
Comparison.
Definition Vec4.inl:357
JPH_INLINE Vec4 SplatW() const
Replicate the W component to all components.
Definition Vec4.inl:601
JPH_INLINE Vec4 DotV(Vec4Arg inV2) const
Dot product, returns the dot product in X, Y and Z components.
Definition Vec4.inl:630
JPH_INLINE bool IsClose(Vec4Arg inV2, float inMaxDistSq=1.0e-12f) const
Test if two vectors are close.
Definition Vec4.inl:362
JPH_INLINE float GetX() const
Get individual components.
Definition Vec4.h:116
static JPH_INLINE Vec4 sLoadFloat4(const Float4 *inV)
Load 4 floats from memory.
Definition Vec4.inl:95
static JPH_INLINE Vec4 sZero()
Vector with all zeros.
Definition Vec4.inl:63
JPH_INLINE Vec4 Swizzle() const
Swizzle the elements in inV.
struct { float mData[4];} Type
Definition Vec4.h:24
static JPH_INLINE Vec4 sOr(Vec4Arg inV1, Vec4Arg inV2)
Logical or (component wise)
Definition Vec4.inl:276
JPH_INLINE float ReduceMin() const
Get the minimum of X, Y, Z and W.
Definition Vec4.inl:774
Type mValue
Definition Vec4.h:280
JPH_INLINE Vec4 & operator-=(Vec4Arg inV2)
Subtract two float vectors (component wise)
Definition Vec4.inl:541
JPH_INLINE float LengthSq() const
Squared length of vector.
Definition Vec4.inl:656
static JPH_INLINE Vec4 sMax(Vec4Arg inV1, Vec4Arg inV2)
Return the maximum of each of the components.
Definition Vec4.inl:157
JPH_INLINE float Dot(Vec4Arg inV2) const
Dot product.
Definition Vec4.inl:643
JPH_INLINE bool IsNaN() const
Test if vector contains NaN elements.
Definition Vec4.inl:377
static JPH_INLINE Vec4 sNaN()
Vector with all NaN's.
Definition Vec4.inl:90
Vec4 ACos() const
Definition Vec4.inl:918
static JPH_INLINE Vec4 sSelect(Vec4Arg inNotSet, Vec4Arg inSet, UVec4Arg inControl)
Component wise select, returns inNotSet when highest bit of inControl = 0 and inSet when highest bit ...
Definition Vec4.inl:259
JPH_INLINE int GetSignBits() const
Store if X is negative in bit 0, Y in bit 1, Z in bit 2 and W in bit 3.
Definition Vec4.inl:762
static JPH_INLINE Vec4 sReplicate(float inV)
Replicate inV across all components.
Definition Vec4.inl:74
void SinCos(Vec4 &outSin, Vec4 &outCos) const
Calculate the sine and cosine for each element of this vector (input in radians)
Definition Vec4.inl:788
JPH_INLINE void StoreFloat4(Float4 *outV) const
Store 4 floats to memory.
Definition Vec4.inl:728
friend JPH_INLINE Vec4 operator*(float inV1, Vec4Arg inV2)
Multiply vector with float.
Definition Vec4.inl:417