16#if defined(JPH_USE_AVX)
17 mValue = _mm256_cvtps_pd(inRHS.mValue);
18#elif defined(JPH_USE_SSE)
19 mValue.mLow = _mm_cvtps_pd(inRHS.mValue);
20 mValue.mHigh = _mm_cvtps_pd(_mm_shuffle_ps(inRHS.mValue, inRHS.mValue, _MM_SHUFFLE(2, 2, 2, 2)));
21#elif defined(JPH_USE_NEON)
22 mValue.val[0] = vcvt_f64_f32(vget_low_f32(inRHS.mValue));
23 mValue.val[1] = vcvt_high_f64_f32(inRHS.mValue);
25 mF64[0] = (double)inRHS.GetX();
26 mF64[1] = (double)inRHS.GetY();
27 mF64[2] = (double)inRHS.GetZ();
28 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
41#if defined(JPH_USE_AVX)
42 mValue = _mm256_set_pd(inZ, inZ, inY, inX);
43#elif defined(JPH_USE_SSE)
44 mValue.mLow = _mm_set_pd(inY, inX);
45 mValue.mHigh = _mm_set1_pd(inZ);
46#elif defined(JPH_USE_NEON)
47 mValue.val[0] = vcombine_f64(vcreate_f64(BitCast<uint64>(inX)), vcreate_f64(BitCast<uint64>(inY)));
48 mValue.val[1] = vdupq_n_f64(inZ);
53 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
61#if defined(JPH_USE_AVX)
62 Type x = _mm256_castpd128_pd256(_mm_load_sd(&inV.
x));
63 Type y = _mm256_castpd128_pd256(_mm_load_sd(&inV.
y));
64 Type z = _mm256_broadcast_sd(&inV.
z);
65 Type xy = _mm256_unpacklo_pd(x, y);
66 mValue = _mm256_blend_pd(xy, z, 0b1100);
67#elif defined(JPH_USE_SSE)
68 mValue.mLow = _mm_loadu_pd(&inV.
x);
69 mValue.mHigh = _mm_set1_pd(inV.
z);
70#elif defined(JPH_USE_NEON)
71 mValue.val[0] = vld1q_f64(&inV.
x);
72 mValue.val[1] = vdupq_n_f64(inV.
z);
77 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
85#ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
94#ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
95 #if defined(JPH_USE_AVX)
96 return _mm256_shuffle_pd(inValue, inValue, 2);
97 #elif defined(JPH_USE_SSE)
99 value.mLow = inValue.mLow;
100 value.mHigh = _mm_shuffle_pd(inValue.mHigh, inValue.mHigh, 0);
102 #elif defined(JPH_USE_NEON)
104 value.val[0] = inValue.val[0];
105 value.val[1] = vdupq_laneq_f64(inValue.val[1], 0);
109 value.mData[0] = inValue.mData[0];
110 value.mData[1] = inValue.mData[1];
111 value.mData[2] = inValue.mData[2];
112 value.mData[3] = inValue.mData[2];
122#if defined(JPH_USE_AVX)
123 return _mm256_setzero_pd();
124#elif defined(JPH_USE_SSE)
125 __m128d zero = _mm_setzero_pd();
126 return DVec3({ zero, zero });
127#elif defined(JPH_USE_NEON)
128 float64x2_t zero = vdupq_n_f64(0.0);
129 return DVec3({ zero, zero });
131 return DVec3(0, 0, 0);
137#if defined(JPH_USE_AVX)
138 return _mm256_set1_pd(inV);
139#elif defined(JPH_USE_SSE)
140 __m128d value = _mm_set1_pd(inV);
141 return DVec3({ value, value });
142#elif defined(JPH_USE_NEON)
143 float64x2_t value = vdupq_n_f64(inV);
144 return DVec3({ value, value });
146 return DVec3(inV, inV, inV);
152 return sReplicate(numeric_limits<double>::quiet_NaN());
157#if defined(JPH_USE_AVX)
158 Type v = _mm256_loadu_pd(&inV.
x);
159#elif defined(JPH_USE_SSE)
161 v.mLow = _mm_loadu_pd(&inV.
x);
162 v.mHigh = _mm_set1_pd(inV.
z);
163#elif defined(JPH_USE_NEON)
164 Type v = vld1q_f64_x2(&inV.
x);
166 Type v = { inV.
x, inV.
y, inV.
z };
180#if defined(JPH_USE_AVX)
181 return _mm256_cvtpd_ps(mValue);
182#elif defined(JPH_USE_SSE)
183 __m128 low = _mm_cvtpd_ps(mValue.mLow);
184 __m128 high = _mm_cvtpd_ps(mValue.mHigh);
185 return _mm_shuffle_ps(low, high, _MM_SHUFFLE(1, 0, 1, 0));
186#elif defined(JPH_USE_NEON)
187 return vcvt_high_f32_f64(vcvtx_f32_f64(mValue.val[0]), mValue.val[1]);
189 return Vec3((
float)GetX(), (
float)GetY(), (
float)GetZ());
195#if defined(JPH_USE_AVX)
197#elif defined(JPH_USE_SSE)
199#elif defined(JPH_USE_NEON)
210#if defined(JPH_USE_AVX)
212#elif defined(JPH_USE_SSE)
214#elif defined(JPH_USE_NEON)
225 return sMax(
sMin(inV, inMax), inMin);
230#if defined(JPH_USE_AVX)
231 return _mm256_cmp_pd(inV1.
mValue, inV2.
mValue, _CMP_EQ_OQ);
232#elif defined(JPH_USE_SSE)
234#elif defined(JPH_USE_NEON)
235 return DVec3({ vreinterpretq_f64_u64(vceqq_f64(inV1.
mValue.val[0], inV2.
mValue.val[0])), vreinterpretq_f64_u64(vceqq_f64(inV1.
mValue.val[1], inV2.
mValue.val[1])) });
245#if defined(JPH_USE_AVX)
246 return _mm256_cmp_pd(inV1.
mValue, inV2.
mValue, _CMP_LT_OQ);
247#elif defined(JPH_USE_SSE)
249#elif defined(JPH_USE_NEON)
250 return DVec3({ vreinterpretq_f64_u64(vcltq_f64(inV1.
mValue.val[0], inV2.
mValue.val[0])), vreinterpretq_f64_u64(vcltq_f64(inV1.
mValue.val[1], inV2.
mValue.val[1])) });
260#if defined(JPH_USE_AVX)
261 return _mm256_cmp_pd(inV1.
mValue, inV2.
mValue, _CMP_LE_OQ);
262#elif defined(JPH_USE_SSE)
264#elif defined(JPH_USE_NEON)
265 return DVec3({ vreinterpretq_f64_u64(vcleq_f64(inV1.
mValue.val[0], inV2.
mValue.val[0])), vreinterpretq_f64_u64(vcleq_f64(inV1.
mValue.val[1], inV2.
mValue.val[1])) });
275#if defined(JPH_USE_AVX)
276 return _mm256_cmp_pd(inV1.
mValue, inV2.
mValue, _CMP_GT_OQ);
277#elif defined(JPH_USE_SSE)
279#elif defined(JPH_USE_NEON)
280 return DVec3({ vreinterpretq_f64_u64(vcgtq_f64(inV1.
mValue.val[0], inV2.
mValue.val[0])), vreinterpretq_f64_u64(vcgtq_f64(inV1.
mValue.val[1], inV2.
mValue.val[1])) });
290#if defined(JPH_USE_AVX)
291 return _mm256_cmp_pd(inV1.
mValue, inV2.
mValue, _CMP_GE_OQ);
292#elif defined(JPH_USE_SSE)
294#elif defined(JPH_USE_NEON)
295 return DVec3({ vreinterpretq_f64_u64(vcgeq_f64(inV1.
mValue.val[0], inV2.
mValue.val[0])), vreinterpretq_f64_u64(vcgeq_f64(inV1.
mValue.val[1], inV2.
mValue.val[1])) });
305#if defined(JPH_USE_AVX)
311#elif defined(JPH_USE_NEON)
314 return inMul1 * inMul2 + inAdd;
320#if defined(JPH_USE_AVX)
322#elif defined(JPH_USE_SSE4_1)
325#elif defined(JPH_USE_NEON)
326 Type v = { vbslq_f64(vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_f64(inControl.
mValue.val[0]), 63)), inSet.
mValue.val[0], inNotSet.
mValue.val[0]),
327 vbslq_f64(vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_f64(inControl.
mValue.val[1]), 63)), inSet.
mValue.val[1], inNotSet.
mValue.val[1]) };
331 for (
int i = 0; i < 3; i++)
332 result.
mF64[i] = (BitCast<uint64>(inControl.
mF64[i]) & (
uint64(1) << 63))? inSet.
mF64[i] : inNotSet.
mF64[i];
333#ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
342#if defined(JPH_USE_AVX)
344#elif defined(JPH_USE_SSE)
346#elif defined(JPH_USE_NEON)
347 return DVec3({ vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(inV1.
mValue.val[0]), vreinterpretq_u64_f64(inV2.
mValue.val[0]))),
348 vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(inV1.
mValue.val[1]), vreinterpretq_u64_f64(inV2.
mValue.val[1]))) });
350 return DVec3(BitCast<double>(BitCast<uint64>(inV1.
mF64[0]) | BitCast<uint64>(inV2.
mF64[0])),
351 BitCast<double>(BitCast<uint64>(inV1.
mF64[1]) | BitCast<uint64>(inV2.
mF64[1])),
352 BitCast<double>(BitCast<uint64>(inV1.
mF64[2]) | BitCast<uint64>(inV2.
mF64[2])));
358#if defined(JPH_USE_AVX)
360#elif defined(JPH_USE_SSE)
362#elif defined(JPH_USE_NEON)
363 return DVec3({ vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(inV1.
mValue.val[0]), vreinterpretq_u64_f64(inV2.
mValue.val[0]))),
364 vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(inV1.
mValue.val[1]), vreinterpretq_u64_f64(inV2.
mValue.val[1]))) });
366 return DVec3(BitCast<double>(BitCast<uint64>(inV1.
mF64[0]) ^ BitCast<uint64>(inV2.
mF64[0])),
367 BitCast<double>(BitCast<uint64>(inV1.
mF64[1]) ^ BitCast<uint64>(inV2.
mF64[1])),
368 BitCast<double>(BitCast<uint64>(inV1.
mF64[2]) ^ BitCast<uint64>(inV2.
mF64[2])));
374#if defined(JPH_USE_AVX)
376#elif defined(JPH_USE_SSE)
378#elif defined(JPH_USE_NEON)
379 return DVec3({ vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(inV1.
mValue.val[0]), vreinterpretq_u64_f64(inV2.
mValue.val[0]))),
380 vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(inV1.
mValue.val[1]), vreinterpretq_u64_f64(inV2.
mValue.val[1]))) });
382 return DVec3(BitCast<double>(BitCast<uint64>(inV1.
mF64[0]) & BitCast<uint64>(inV2.
mF64[0])),
383 BitCast<double>(BitCast<uint64>(inV1.
mF64[1]) & BitCast<uint64>(inV2.
mF64[1])),
384 BitCast<double>(BitCast<uint64>(inV1.
mF64[2]) & BitCast<uint64>(inV2.
mF64[2])));
390#if defined(JPH_USE_AVX)
391 return _mm256_movemask_pd(
mValue) & 0x7;
392#elif defined(JPH_USE_SSE)
393 return (_mm_movemask_pd(
mValue.mLow) + (_mm_movemask_pd(
mValue.mHigh) << 2)) & 0x7;
395 return int((BitCast<uint64>(
mF64[0]) >> 63) | ((BitCast<uint64>(
mF64[1]) >> 63) << 1) | ((BitCast<uint64>(
mF64[2]) >> 63) << 2));
416 return (inV2 - *
this).LengthSq() <= inMaxDistSq;
426#if defined(JPH_USE_AVX)
428#elif defined(JPH_USE_SSE)
430#elif defined(JPH_USE_NEON)
439#if defined(JPH_USE_AVX)
440 return _mm256_mul_pd(
mValue, _mm256_set1_pd(inV2));
441#elif defined(JPH_USE_SSE)
442 __m128d v = _mm_set1_pd(inV2);
444#elif defined(JPH_USE_NEON)
445 return DVec3({ vmulq_n_f64(
mValue.val[0], inV2), vmulq_n_f64(
mValue.val[1], inV2) });
453#if defined(JPH_USE_AVX)
454 return _mm256_mul_pd(_mm256_set1_pd(inV1), inV2.
mValue);
455#elif defined(JPH_USE_SSE)
456 __m128d v = _mm_set1_pd(inV1);
457 return DVec3({ _mm_mul_pd(v, inV2.
mValue.mLow), _mm_mul_pd(v, inV2.
mValue.mHigh) });
458#elif defined(JPH_USE_NEON)
459 return DVec3({ vmulq_n_f64(inV2.
mValue.val[0], inV1), vmulq_n_f64(inV2.
mValue.val[1], inV1) });
467#if defined(JPH_USE_AVX)
468 return _mm256_div_pd(
mValue, _mm256_set1_pd(inV2));
469#elif defined(JPH_USE_SSE)
470 __m128d v = _mm_set1_pd(inV2);
472#elif defined(JPH_USE_NEON)
473 float64x2_t v = vdupq_n_f64(inV2);
482#if defined(JPH_USE_AVX)
484#elif defined(JPH_USE_SSE)
485 __m128d v = _mm_set1_pd(inV2);
488#elif defined(JPH_USE_NEON)
492 for (
int i = 0; i < 3; ++i)
494 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
503#if defined(JPH_USE_AVX)
505#elif defined(JPH_USE_SSE)
508#elif defined(JPH_USE_NEON)
512 for (
int i = 0; i < 3; ++i)
514 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
523#if defined(JPH_USE_AVX)
525#elif defined(JPH_USE_SSE)
526 __m128d v = _mm_set1_pd(inV2);
529#elif defined(JPH_USE_NEON)
530 float64x2_t v = vdupq_n_f64(inV2);
534 for (
int i = 0; i < 3; ++i)
536 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
545#if defined(JPH_USE_AVX)
546 return _mm256_add_pd(
mValue, _mm256_cvtps_pd(inV2.
mValue));
547#elif defined(JPH_USE_SSE)
548 return DVec3({ _mm_add_pd(
mValue.mLow, _mm_cvtps_pd(inV2.
mValue)), _mm_add_pd(
mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.
mValue, inV2.
mValue, _MM_SHUFFLE(2, 2, 2, 2)))) });
549#elif defined(JPH_USE_NEON)
550 return DVec3({ vaddq_f64(
mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.
mValue))), vaddq_f64(
mValue.val[1], vcvt_high_f64_f32(inV2.
mValue)) });
558#if defined(JPH_USE_AVX)
560#elif defined(JPH_USE_SSE)
562#elif defined(JPH_USE_NEON)
571#if defined(JPH_USE_AVX)
573#elif defined(JPH_USE_SSE)
575 mValue.mHigh = _mm_add_pd(
mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.
mValue, inV2.
mValue, _MM_SHUFFLE(2, 2, 2, 2))));
576#elif defined(JPH_USE_NEON)
580 for (
int i = 0; i < 3; ++i)
582 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
591#if defined(JPH_USE_AVX)
593#elif defined(JPH_USE_SSE)
596#elif defined(JPH_USE_NEON)
600 for (
int i = 0; i < 3; ++i)
602 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
611#if defined(JPH_USE_AVX)
612 return _mm256_sub_pd(_mm256_setzero_pd(),
mValue);
613#elif defined(JPH_USE_SSE)
614 __m128d zero = _mm_setzero_pd();
616#elif defined(JPH_USE_NEON)
617 #ifdef JPH_CROSS_PLATFORM_DETERMINISTIC
618 float64x2_t zero = vdupq_n_f64(0);
619 return DVec3({ vsubq_f64(zero,
mValue.val[0]), vsubq_f64(zero,
mValue.val[1]) });
624 #ifdef JPH_CROSS_PLATFORM_DETERMINISTIC
634#if defined(JPH_USE_AVX)
635 return _mm256_sub_pd(
mValue, _mm256_cvtps_pd(inV2.
mValue));
636#elif defined(JPH_USE_SSE)
637 return DVec3({ _mm_sub_pd(
mValue.mLow, _mm_cvtps_pd(inV2.
mValue)), _mm_sub_pd(
mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.
mValue, inV2.
mValue, _MM_SHUFFLE(2, 2, 2, 2)))) });
638#elif defined(JPH_USE_NEON)
639 return DVec3({ vsubq_f64(
mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.
mValue))), vsubq_f64(
mValue.val[1], vcvt_high_f64_f32(inV2.
mValue)) });
647#if defined(JPH_USE_AVX)
649#elif defined(JPH_USE_SSE)
651#elif defined(JPH_USE_NEON)
660#if defined(JPH_USE_AVX)
662#elif defined(JPH_USE_SSE)
664 mValue.mHigh = _mm_sub_pd(
mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.
mValue, inV2.
mValue, _MM_SHUFFLE(2, 2, 2, 2))));
665#elif defined(JPH_USE_NEON)
669 for (
int i = 0; i < 3; ++i)
671 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
680#if defined(JPH_USE_AVX)
682#elif defined(JPH_USE_SSE)
685#elif defined(JPH_USE_NEON)
689 for (
int i = 0; i < 3; ++i)
691 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
701#if defined(JPH_USE_AVX)
703#elif defined(JPH_USE_SSE)
705#elif defined(JPH_USE_NEON)
714#if defined(JPH_USE_AVX512)
716#elif defined(JPH_USE_AVX)
717 return _mm256_max_pd(_mm256_sub_pd(_mm256_setzero_pd(),
mValue),
mValue);
718#elif defined(JPH_USE_SSE)
719 __m128d zero = _mm_setzero_pd();
721#elif defined(JPH_USE_NEON)
735#if defined(JPH_USE_AVX2)
736 __m256d t1 = _mm256_permute4x64_pd(inV2.
mValue, _MM_SHUFFLE(0, 0, 2, 1));
737 t1 = _mm256_mul_pd(t1,
mValue);
738 __m256d t2 = _mm256_permute4x64_pd(
mValue, _MM_SHUFFLE(0, 0, 2, 1));
739 t2 = _mm256_mul_pd(t2, inV2.
mValue);
740 __m256d t3 = _mm256_sub_pd(t1, t2);
741 return _mm256_permute4x64_pd(t3, _MM_SHUFFLE(0, 0, 2, 1));
751#if defined(JPH_USE_AVX)
753 __m128d xy = _mm256_castpd256_pd128(mul);
754 __m128d yx = _mm_shuffle_pd(xy, xy, 1);
755 __m128d sum = _mm_add_pd(xy, yx);
756 __m128d zw = _mm256_extractf128_pd(mul, 1);
757 sum = _mm_add_pd(sum, zw);
758 return _mm_cvtsd_f64(sum);
759#elif defined(JPH_USE_SSE)
761 __m128d yx = _mm_shuffle_pd(xy, xy, 1);
762 __m128d sum = _mm_add_pd(xy, yx);
763 __m128d z = _mm_mul_sd(
mValue.mHigh, inV2.
mValue.mHigh);
764 sum = _mm_add_pd(sum, z);
765 return _mm_cvtsd_f64(sum);
766#elif defined(JPH_USE_NEON)
767 float64x2_t mul_low = vmulq_f64(
mValue.val[0], inV2.
mValue.val[0]);
768 float64x2_t mul_high = vmulq_f64(
mValue.val[1], inV2.
mValue.val[1]);
769 return vaddvq_f64(mul_low) + vgetq_lane_f64(mul_high, 0);
772 for (
int i = 0; i < 3; i++)
785#if defined(JPH_USE_AVX)
786 return _mm256_sqrt_pd(
mValue);
787#elif defined(JPH_USE_SSE)
789#elif defined(JPH_USE_NEON)
798 return sqrt(
Dot(*
this));
808 return abs(
LengthSq() - 1.0) <= inTolerance;
813#if defined(JPH_USE_AVX512)
814 return (_mm256_fpclass_pd_mask(
mValue, 0b10000001) & 0x7) != 0;
815#elif defined(JPH_USE_AVX)
816 return (_mm256_movemask_pd(_mm256_cmp_pd(
mValue,
mValue, _CMP_UNORD_Q)) & 0x7) != 0;
817#elif defined(JPH_USE_SSE)
818 return ((_mm_movemask_pd(_mm_cmpunord_pd(
mValue.mLow,
mValue.mLow)) + (_mm_movemask_pd(_mm_cmpunord_pd(
mValue.mHigh,
mValue.mHigh)) << 2)) & 0x7) != 0;
820 return isnan(
mF64[0]) || isnan(
mF64[1]) || isnan(
mF64[2]);
826#if defined(JPH_USE_AVX512)
827 return _mm256_fixupimm_pd(
mValue,
mValue, _mm256_set1_epi32(0xA9A90A00), 0);
828#elif defined(JPH_USE_AVX)
829 __m256d minus_one = _mm256_set1_pd(-1.0);
830 __m256d one = _mm256_set1_pd(1.0);
831 return _mm256_or_pd(_mm256_and_pd(
mValue, minus_one), one);
832#elif defined(JPH_USE_SSE)
833 __m128d minus_one = _mm_set1_pd(-1.0);
834 __m128d one = _mm_set1_pd(1.0);
835 return DVec3({ _mm_or_pd(_mm_and_pd(
mValue.mLow, minus_one), one), _mm_or_pd(_mm_and_pd(
mValue.mHigh, minus_one), one) });
836#elif defined(JPH_USE_NEON)
837 uint64x2_t minus_one = vreinterpretq_u64_f64(vdupq_n_f64(-1.0f));
838 uint64x2_t one = vreinterpretq_u64_f64(vdupq_n_f64(1.0f));
839 return DVec3({ vreinterpretq_f64_u64(vorrq_u64(vandq_u64(vreinterpretq_u64_f64(
mValue.val[0]), minus_one), one)),
840 vreinterpretq_f64_u64(vorrq_u64(vandq_u64(vreinterpretq_u64_f64(
mValue.val[1]), minus_one), one)) });
842 return DVec3(std::signbit(
mF64[0])? -1.0 : 1.0,
843 std::signbit(
mF64[1])? -1.0 : 1.0,
844 std::signbit(
mF64[2])? -1.0 : 1.0);
851 constexpr uint64 cDoubleToFloatMantissaLoss = (1U << 29) - 1;
853#if defined(JPH_USE_AVX)
854 return _mm256_and_pd(
mValue, _mm256_castsi256_pd(_mm256_set1_epi64x(int64_t(~cDoubleToFloatMantissaLoss))));
855#elif defined(JPH_USE_SSE)
856 __m128d mask = _mm_castsi128_pd(_mm_set1_epi64x(int64_t(~cDoubleToFloatMantissaLoss)));
858#elif defined(JPH_USE_NEON)
859 uint64x2_t mask = vdupq_n_u64(~cDoubleToFloatMantissaLoss);
860 return DVec3({ vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(
mValue.val[0]), mask)),
861 vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(
mValue.val[1]), mask)) });
863 double x = BitCast<double>(BitCast<uint64>(
mF64[0]) & ~cDoubleToFloatMantissaLoss);
864 double y = BitCast<double>(BitCast<uint64>(
mF64[1]) & ~cDoubleToFloatMantissaLoss);
865 double z = BitCast<double>(BitCast<uint64>(
mF64[2]) & ~cDoubleToFloatMantissaLoss);
867 return DVec3(x, y, z);
874 constexpr uint64 cDoubleToFloatMantissaLoss = (1U << 29) - 1;
876#if defined(JPH_USE_AVX512)
877 __m256i mantissa_loss = _mm256_set1_epi64x(cDoubleToFloatMantissaLoss);
878 __mmask8 is_zero = _mm256_testn_epi64_mask(_mm256_castpd_si256(
mValue), mantissa_loss);
879 __m256d value_or_mantissa_loss = _mm256_or_pd(
mValue, _mm256_castsi256_pd(mantissa_loss));
880 return _mm256_mask_blend_pd(is_zero, value_or_mantissa_loss,
mValue);
881#elif defined(JPH_USE_AVX)
882 __m256i mantissa_loss = _mm256_set1_epi64x(cDoubleToFloatMantissaLoss);
883 __m256d value_and_mantissa_loss = _mm256_and_pd(
mValue, _mm256_castsi256_pd(mantissa_loss));
884 __m256d is_zero = _mm256_cmp_pd(value_and_mantissa_loss, _mm256_setzero_pd(), _CMP_EQ_OQ);
885 __m256d value_or_mantissa_loss = _mm256_or_pd(
mValue, _mm256_castsi256_pd(mantissa_loss));
886 return _mm256_blendv_pd(value_or_mantissa_loss,
mValue, is_zero);
887#elif defined(JPH_USE_SSE4_1)
888 __m128i mantissa_loss = _mm_set1_epi64x(cDoubleToFloatMantissaLoss);
889 __m128d zero = _mm_setzero_pd();
890 __m128d value_and_mantissa_loss_low = _mm_and_pd(
mValue.mLow, _mm_castsi128_pd(mantissa_loss));
891 __m128d is_zero_low = _mm_cmpeq_pd(value_and_mantissa_loss_low, zero);
892 __m128d value_or_mantissa_loss_low = _mm_or_pd(
mValue.mLow, _mm_castsi128_pd(mantissa_loss));
893 __m128d value_and_mantissa_loss_high = _mm_and_pd(
mValue.mHigh, _mm_castsi128_pd(mantissa_loss));
894 __m128d is_zero_high = _mm_cmpeq_pd(value_and_mantissa_loss_high, zero);
895 __m128d value_or_mantissa_loss_high = _mm_or_pd(
mValue.mHigh, _mm_castsi128_pd(mantissa_loss));
896 return DVec3({ _mm_blendv_pd(value_or_mantissa_loss_low,
mValue.mLow, is_zero_low), _mm_blendv_pd(value_or_mantissa_loss_high,
mValue.mHigh, is_zero_high) });
897#elif defined(JPH_USE_NEON)
898 uint64x2_t mantissa_loss = vdupq_n_u64(cDoubleToFloatMantissaLoss);
899 float64x2_t zero = vdupq_n_f64(0.0);
900 float64x2_t value_and_mantissa_loss_low = vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(
mValue.val[0]), mantissa_loss));
901 uint64x2_t is_zero_low = vceqq_f64(value_and_mantissa_loss_low, zero);
902 float64x2_t value_or_mantissa_loss_low = vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(
mValue.val[0]), mantissa_loss));
903 float64x2_t value_and_mantissa_loss_high = vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(
mValue.val[1]), mantissa_loss));
904 float64x2_t value_low = vbslq_f64(is_zero_low,
mValue.val[0], value_or_mantissa_loss_low);
905 uint64x2_t is_zero_high = vceqq_f64(value_and_mantissa_loss_high, zero);
906 float64x2_t value_or_mantissa_loss_high = vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(
mValue.val[1]), mantissa_loss));
907 float64x2_t value_high = vbslq_f64(is_zero_high,
mValue.val[1], value_or_mantissa_loss_high);
908 return DVec3({ value_low, value_high });
914 double x = BitCast<double>((ux & cDoubleToFloatMantissaLoss) == 0? ux : (ux | cDoubleToFloatMantissaLoss));
915 double y = BitCast<double>((uy & cDoubleToFloatMantissaLoss) == 0? uy : (uy | cDoubleToFloatMantissaLoss));
916 double z = BitCast<double>((uz & cDoubleToFloatMantissaLoss) == 0? uz : (uz | cDoubleToFloatMantissaLoss));
918 return DVec3(x, y, z);
std::uint64_t uint64
Definition Core.h:450
#define JPH_NAMESPACE_END
Definition Core.h:379
#define JPH_NAMESPACE_BEGIN
Definition Core.h:373
DVec3 operator*(double inV1, DVec3Arg inV2)
Definition DVec3.inl:451
#define JPH_MAKE_HASHABLE(type,...)
Definition HashCombine.h:191
#define JPH_ASSERT(...)
Definition IssueReporting.h:33
static JPH_INLINE DVec3 sLess(DVec3Arg inV1, DVec3Arg inV2)
Less than (component wise)
Definition DVec3.inl:243
double mF64[4]
Definition DVec3.h:280
static JPH_INLINE DVec3 sMax(DVec3Arg inV1, DVec3Arg inV2)
Return the maximum of each of the components.
Definition DVec3.inl:208
{ double mData[4] Type
Definition DVec3.h:29
JPH_INLINE bool TestAnyTrue() const
Test if any of the components are true (true is when highest bit of component is set)
Definition DVec3.inl:399
JPH_INLINE Vec3 ToVec3RoundDown() const
Convert to float vector 3 rounding down.
Definition DVec3.inl:922
static JPH_INLINE DVec3 sClamp(DVec3Arg inV, DVec3Arg inMin, DVec3Arg inMax)
Clamp a vector between min and max (component wise)
Definition DVec3.inl:223
static JPH_INLINE DVec3 sMin(DVec3Arg inV1, DVec3Arg inV2)
Return the minimum value of each of the components.
Definition DVec3.inl:193
JPH_INLINE int GetTrues() const
Store if X is true in bit 0, Y in bit 1, Z in bit 2 and W in bit 3 (true is when highest bit of compo...
Definition DVec3.inl:388
static JPH_INLINE DVec3 sAnd(DVec3Arg inV1, DVec3Arg inV2)
Logical and (component wise)
Definition DVec3.inl:372
JPH_INLINE DVec3 & operator*=(double inV2)
Multiply vector with double.
Definition DVec3.inl:480
JPH_INLINE DVec3 Abs() const
Return the absolute value of each of the components.
Definition DVec3.inl:712
static JPH_INLINE DVec3 sFusedMultiplyAdd(DVec3Arg inMul1, DVec3Arg inMul2, DVec3Arg inAdd)
Calculates inMul1 * inMul2 + inAdd.
Definition DVec3.inl:303
static JPH_INLINE Type sFixW(TypeArg inValue)
Internal helper function that ensures that the Z component is replicated to the W component to preven...
Definition DVec3.inl:92
JPH_INLINE DVec3 Sqrt() const
Component wise square root.
Definition DVec3.inl:783
JPH_INLINE DVec3 GetSign() const
Get vector that contains the sign of each element (returns 1 if positive, -1 if negative)
Definition DVec3.inl:824
Type mValue
Definition DVec3.h:279
static JPH_INLINE DVec3 sXor(DVec3Arg inV1, DVec3Arg inV2)
Logical xor (component wise)
Definition DVec3.inl:356
static JPH_INLINE DVec3 sGreaterOrEqual(DVec3Arg inV1, DVec3Arg inV2)
Greater than or equal (component wise)
Definition DVec3.inl:288
JPH_INLINE DVec3 operator+(Vec3Arg inV2) const
Add two vectors (component wise)
Definition DVec3.inl:543
JPH_INLINE bool IsClose(DVec3Arg inV2, double inMaxDistSq=1.0e-24) const
Test if two vectors are close.
Definition DVec3.inl:414
JPH_INLINE bool IsNormalized(double inTolerance=1.0e-12) const
Test if vector is normalized.
Definition DVec3.inl:806
static JPH_INLINE DVec3 sSelect(DVec3Arg inNotSet, DVec3Arg inSet, DVec3Arg inControl)
Component wise select, returns inNotSet when highest bit of inControl = 0 and inSet when highest bit ...
Definition DVec3.inl:318
const Type & TypeArg
Definition DVec3.h:30
static JPH_INLINE DVec3 sNaN()
Vector with all NaN's.
Definition DVec3.inl:150
friend JPH_INLINE DVec3 operator*(double inV1, DVec3Arg inV2)
Multiply vector with double.
Definition DVec3.inl:451
static JPH_INLINE DVec3 sGreater(DVec3Arg inV1, DVec3Arg inV2)
Greater than (component wise)
Definition DVec3.inl:273
JPH_INLINE void StoreDouble3(Double3 *outV) const
Store 3 doubles to memory.
Definition DVec3.inl:171
static JPH_INLINE DVec3 sOr(DVec3Arg inV1, DVec3Arg inV2)
Logical or (component wise)
Definition DVec3.inl:340
static JPH_INLINE DVec3 sZero()
Vector with all zeros.
Definition DVec3.inl:120
JPH_INLINE bool TestAllTrue() const
Test if all components are true (true is when highest bit of component is set)
Definition DVec3.inl:404
JPH_INLINE double Length() const
Length of vector.
Definition DVec3.inl:796
JPH_INLINE DVec3 operator-() const
Negate.
Definition DVec3.inl:609
JPH_INLINE bool IsNaN() const
Test if vector contains NaN elements.
Definition DVec3.inl:811
JPH_INLINE Vec3 ToVec3RoundUp() const
Convert to float vector 3 rounding up.
Definition DVec3.inl:929
static const double cTrue
Representations of true and false for boolean operations.
Definition DVec3.h:274
DVec3()=default
Constructor.
JPH_INLINE void CheckW() const
Internal helper function that checks that W is equal to Z, so e.g. dividing by it should not generate...
Definition DVec3.inl:83
JPH_INLINE double LengthSq() const
Squared length of vector.
Definition DVec3.inl:778
JPH_INLINE DVec3 Normalized() const
Normalize vector.
Definition DVec3.inl:801
JPH_INLINE DVec3 operator/(double inV2) const
Divide vector by double.
Definition DVec3.inl:465
JPH_INLINE double Dot(DVec3Arg inV2) const
Dot product.
Definition DVec3.inl:749
static JPH_INLINE DVec3 sReplicate(double inV)
Replicate inV across all components.
Definition DVec3.inl:135
static JPH_INLINE DVec3 sLessOrEqual(DVec3Arg inV1, DVec3Arg inV2)
Less than or equal (component wise)
Definition DVec3.inl:258
JPH_INLINE DVec3 PrepareRoundToInf() const
Prepare to convert to float vector 3 rounding towards positive/negative inf (returns DVec3 that can b...
Definition DVec3.inl:871
JPH_INLINE DVec3 & operator+=(Vec3Arg inV2)
Add two vectors (component wise)
Definition DVec3.inl:569
static JPH_INLINE DVec3 sLoadDouble3Unsafe(const Double3 &inV)
Load 3 doubles from memory (reads 64 bits extra which it doesn't use)
Definition DVec3.inl:155
JPH_INLINE DVec3 & operator/=(double inV2)
Divide vector by double.
Definition DVec3.inl:521
JPH_INLINE DVec3 Cross(DVec3Arg inV2) const
Cross product.
Definition DVec3.inl:733
JPH_INLINE DVec3 & operator-=(Vec3Arg inV2)
Subtract two vectors (component wise)
Definition DVec3.inl:658
JPH_INLINE DVec3 PrepareRoundToZero() const
Prepare to convert to float vector 3 rounding towards zero (returns DVec3 that can be converted to a ...
Definition DVec3.inl:848
JPH_INLINE DVec3 Reciprocal() const
Reciprocal vector (1 / value) for each of the components.
Definition DVec3.inl:728
static JPH_INLINE DVec3 sEquals(DVec3Arg inV1, DVec3Arg inV2)
Equals (component wise)
Definition DVec3.inl:228
JPH_INLINE bool IsNearZero(double inMaxDistSq=1.0e-24) const
Test if vector is near zero.
Definition DVec3.inl:419
JPH_INLINE bool operator==(DVec3Arg inV2) const
Comparison.
Definition DVec3.inl:409
static const double cFalse
Definition DVec3.h:275
Class that holds 3 doubles. Used as a storage class. Convert to DVec3 for calculations.
Definition Double3.h:13
double z
Definition Double3.h:40
double y
Definition Double3.h:39
double x
Definition Double3.h:38
Type mValue
Definition Vec3.h:286
float mF32[4]
Definition Vec3.h:287