Jolt Physics
A multi core friendly Game Physics Engine
Loading...
Searching...
No Matches
Mat44.inl
Go to the documentation of this file.
1// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
2// SPDX-FileCopyrightText: 2021 Jorrit Rouwe
3// SPDX-License-Identifier: MIT
4
5#pragma once
6
7#include <Jolt/Math/Vec3.h>
8#include <Jolt/Math/Vec4.h>
9#include <Jolt/Math/Quat.h>
10
12
13#define JPH_EL(r, c) mCol[c].mF32[r]
14
15Mat44::Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec4Arg inC4) :
16 mCol { inC1, inC2, inC3, inC4 }
17{
18}
19
20Mat44::Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec3Arg inC4) :
21 mCol { inC1, inC2, inC3, Vec4(inC4, 1.0f) }
22{
23}
24
25Mat44::Mat44(Type inC1, Type inC2, Type inC3, Type inC4) :
26 mCol { inC1, inC2, inC3, inC4 }
27{
28}
29
34
36{
37 return Mat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), Vec4(0, 0, 0, 1));
38}
39
44
46{
47 Mat44 result;
48 for (int c = 0; c < 4; ++c)
49 result.mCol[c] = Vec4::sLoadFloat4(inV + c);
50 return result;
51}
52
54{
55 Mat44 result;
56 for (int c = 0; c < 4; ++c)
57 result.mCol[c] = Vec4::sLoadFloat4Aligned(inV + c);
58 return result;
59}
60
62{
63 Vec4 sv, cv;
64 Vec4::sReplicate(inX).SinCos(sv, cv);
65 float s = sv.GetX(), c = cv.GetX();
66 return Mat44(Vec4(1, 0, 0, 0), Vec4(0, c, s, 0), Vec4(0, -s, c, 0), Vec4(0, 0, 0, 1));
67}
68
70{
71 Vec4 sv, cv;
72 Vec4::sReplicate(inY).SinCos(sv, cv);
73 float s = sv.GetX(), c = cv.GetX();
74 return Mat44(Vec4(c, 0, -s, 0), Vec4(0, 1, 0, 0), Vec4(s, 0, c, 0), Vec4(0, 0, 0, 1));
75}
76
78{
79 Vec4 sv, cv;
80 Vec4::sReplicate(inZ).SinCos(sv, cv);
81 float s = sv.GetX(), c = cv.GetX();
82 return Mat44(Vec4(c, s, 0, 0), Vec4(-s, c, 0, 0), Vec4(0, 0, 1, 0), Vec4(0, 0, 0, 1));
83}
84
86{
87 JPH_ASSERT(inQuat.IsNormalized());
88
89 // See: https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation section 'Quaternion-derived rotation matrix'
90#ifdef JPH_USE_SSE4_1
91 __m128 xyzw = inQuat.mValue.mValue;
92 __m128 two_xyzw = _mm_add_ps(xyzw, xyzw);
93 __m128 yzxw = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 0, 2, 1));
94 __m128 two_yzxw = _mm_add_ps(yzxw, yzxw);
95 __m128 zxyw = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 1, 0, 2));
96 __m128 two_zxyw = _mm_add_ps(zxyw, zxyw);
97 __m128 wwww = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 3, 3, 3));
98 __m128 diagonal = _mm_sub_ps(_mm_sub_ps(_mm_set1_ps(1.0f), _mm_mul_ps(two_yzxw, yzxw)), _mm_mul_ps(two_zxyw, zxyw)); // (1 - 2 y^2 - 2 z^2, 1 - 2 x^2 - 2 z^2, 1 - 2 x^2 - 2 y^2, 1 - 4 w^2)
99 __m128 plus = _mm_add_ps(_mm_mul_ps(two_xyzw, zxyw), _mm_mul_ps(two_yzxw, wwww)); // 2 * (xz + yw, xy + zw, yz + xw, ww)
100 __m128 minus = _mm_sub_ps(_mm_mul_ps(two_yzxw, xyzw), _mm_mul_ps(two_zxyw, wwww)); // 2 * (xy - zw, yz - xw, xz - yw, 0)
101
102 // Workaround for compiler changing _mm_sub_ps(_mm_mul_ps(...), ...) into a fused multiply sub instruction, resulting in w not being 0
103 // There doesn't appear to be a reliable way to turn this off in Clang
104 minus = _mm_insert_ps(minus, minus, 0b1000);
105
106 __m128 col0 = _mm_blend_ps(_mm_blend_ps(plus, diagonal, 0b0001), minus, 0b1100); // (1 - 2 y^2 - 2 z^2, 2 xy + 2 zw, 2 xz - 2 yw, 0)
107 __m128 col1 = _mm_blend_ps(_mm_blend_ps(diagonal, minus, 0b1001), plus, 0b0100); // (2 xy - 2 zw, 1 - 2 x^2 - 2 z^2, 2 yz + 2 xw, 0)
108 __m128 col2 = _mm_blend_ps(_mm_blend_ps(minus, plus, 0b0001), diagonal, 0b0100); // (2 xz + 2 yw, 2 yz - 2 xw, 1 - 2 x^2 - 2 y^2, 0)
109 __m128 col3 = _mm_set_ps(1, 0, 0, 0);
110
111 return Mat44(col0, col1, col2, col3);
112#else
113 float x = inQuat.GetX();
114 float y = inQuat.GetY();
115 float z = inQuat.GetZ();
116 float w = inQuat.GetW();
117
118 float tx = x + x; // Note: Using x + x instead of 2.0f * x to force this function to return the same value as the SSE4.1 version across platforms.
119 float ty = y + y;
120 float tz = z + z;
121
122 float xx = tx * x;
123 float yy = ty * y;
124 float zz = tz * z;
125 float xy = tx * y;
126 float xz = tx * z;
127 float xw = tx * w;
128 float yz = ty * z;
129 float yw = ty * w;
130 float zw = tz * w;
131
132 return Mat44(Vec4((1.0f - yy) - zz, xy + zw, xz - yw, 0.0f), // Note: Added extra brackets to force this function to return the same value as the SSE4.1 version across platforms.
133 Vec4(xy - zw, (1.0f - zz) - xx, yz + xw, 0.0f),
134 Vec4(xz + yw, yz - xw, (1.0f - xx) - yy, 0.0f),
135 Vec4(0.0f, 0.0f, 0.0f, 1.0f));
136#endif
137}
138
139Mat44 Mat44::sRotation(Vec3Arg inAxis, float inAngle)
140{
141 return sRotation(Quat::sRotation(inAxis, inAngle));
142}
143
145{
146 return Mat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), Vec4(inV, 1));
147}
148
150{
151 Mat44 m = sRotation(inR);
152 m.SetTranslation(inT);
153 return m;
154}
155
157{
158 Mat44 m = sRotation(inR.Conjugated());
159 m.SetTranslation(-m.Multiply3x3(inT));
160 return m;
161}
162
163Mat44 Mat44::sScale(float inScale)
164{
165 return Mat44(Vec4(inScale, 0, 0, 0), Vec4(0, inScale, 0, 0), Vec4(0, 0, inScale, 0), Vec4(0, 0, 0, 1));
166}
167
169{
170 return Mat44(Vec4(inV.GetX(), 0, 0, 0), Vec4(0, inV.GetY(), 0, 0), Vec4(0, 0, inV.GetZ(), 0), Vec4(0, 0, 0, 1));
171}
172
174{
175 Vec4 v1(inV1, 0);
176 return Mat44(v1 * inV2.SplatX(), v1 * inV2.SplatY(), v1 * inV2.SplatZ(), Vec4(0, 0, 0, 1));
177}
178
180{
181#ifdef JPH_USE_SSE4_1
182 // Zero out the W component
183 __m128 zero = _mm_setzero_ps();
184 __m128 v = _mm_blend_ps(inV.mValue, zero, 0b1000);
185
186 // Negate
187 __m128 min_v = _mm_sub_ps(zero, v);
188
189 return Mat44(
190 _mm_shuffle_ps(v, min_v, _MM_SHUFFLE(3, 1, 2, 3)), // [0, z, -y, 0]
191 _mm_shuffle_ps(min_v, v, _MM_SHUFFLE(3, 0, 3, 2)), // [-z, 0, x, 0]
192 _mm_blend_ps(_mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 1)), _mm_shuffle_ps(min_v, min_v, _MM_SHUFFLE(3, 3, 0, 3)), 0b0010), // [y, -x, 0, 0]
193 Vec4(0, 0, 0, 1));
194#else
195 float x = inV.GetX();
196 float y = inV.GetY();
197 float z = inV.GetZ();
198
199 return Mat44(
200 Vec4(0, z, -y, 0),
201 Vec4(-z, 0, x, 0),
202 Vec4(y, -x, 0, 0),
203 Vec4(0, 0, 0, 1));
204#endif
205}
206
208{
209 Vec3 direction = (inTarget - inPos).NormalizedOr(-Vec3::sAxisZ());
210 Vec3 right = direction.Cross(inUp).NormalizedOr(Vec3::sAxisX());
211 Vec3 up = right.Cross(direction);
212
213 return Mat44(Vec4(right, 0), Vec4(up, 0), Vec4(-direction, 0), Vec4(inPos, 1)).InversedRotationTranslation();
214}
215
216Mat44 Mat44::sPerspective(float inFovY, float inAspect, float inNear, float inFar)
217{
218 float height = 1.0f / Tan(0.5f * inFovY);
219 float width = height / inAspect;
220 float range = inFar / (inNear - inFar);
221
222 return Mat44(Vec4(width, 0.0f, 0.0f, 0.0f), Vec4(0.0f, height, 0.0f, 0.0f), Vec4(0.0f, 0.0f, range, -1.0f), Vec4(0.0f, 0.0f, range * inNear, 0.0f));
223}
224
226{
227 return UVec4::sAnd(
228 UVec4::sAnd(Vec4::sEquals(mCol[0], inM2.mCol[0]), Vec4::sEquals(mCol[1], inM2.mCol[1])),
229 UVec4::sAnd(Vec4::sEquals(mCol[2], inM2.mCol[2]), Vec4::sEquals(mCol[3], inM2.mCol[3]))
230 ).TestAllTrue();
231}
232
233bool Mat44::IsClose(Mat44Arg inM2, float inMaxDistSq) const
234{
235 for (int i = 0; i < 4; ++i)
236 if (!mCol[i].IsClose(inM2.mCol[i], inMaxDistSq))
237 return false;
238 return true;
239}
240
242{
243 Mat44 result;
244#if defined(JPH_USE_SSE)
245 for (int i = 0; i < 4; ++i)
246 {
247 __m128 c = inM.mCol[i].mValue;
248 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)));
249 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1))));
250 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2))));
251 t = _mm_add_ps(t, _mm_mul_ps(mCol[3].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3))));
252 result.mCol[i].mValue = t;
253 }
254#elif defined(JPH_USE_NEON)
255 for (int i = 0; i < 4; ++i)
256 {
257 Type c = inM.mCol[i].mValue;
258 Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0));
259 t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1));
260 t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2));
261 t = vmlaq_f32(t, mCol[3].mValue, vdupq_laneq_f32(c, 3));
262 result.mCol[i].mValue = t;
263 }
264#else
265 for (int i = 0; i < 4; ++i)
266 result.mCol[i] = mCol[0] * inM.mCol[i].mF32[0] + mCol[1] * inM.mCol[i].mF32[1] + mCol[2] * inM.mCol[i].mF32[2] + mCol[3] * inM.mCol[i].mF32[3];
267#endif
268 return result;
269}
270
272{
273#if defined(JPH_USE_SSE)
274 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
275 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
276 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
277 t = _mm_add_ps(t, mCol[3].mValue);
278 return Vec3::sFixW(t);
279#elif defined(JPH_USE_NEON)
280 Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
281 t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
282 t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
283 t = vaddq_f32(t, mCol[3].mValue); // Don't combine this with the first mul into a fused multiply add, causes precision issues
284 return Vec3::sFixW(t);
285#else
286 return Vec3(
287 mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2] + mCol[3].mF32[0],
288 mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2] + mCol[3].mF32[1],
289 mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2] + mCol[3].mF32[2]);
290#endif
291}
292
294{
295#if defined(JPH_USE_SSE)
296 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
297 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
298 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
299 t = _mm_add_ps(t, _mm_mul_ps(mCol[3].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(3, 3, 3, 3))));
300 return t;
301#elif defined(JPH_USE_NEON)
302 Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
303 t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
304 t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
305 t = vmlaq_f32(t, mCol[3].mValue, vdupq_laneq_f32(inV.mValue, 3));
306 return t;
307#else
308 return Vec4(
309 mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2] + mCol[3].mF32[0] * inV.mF32[3],
310 mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2] + mCol[3].mF32[1] * inV.mF32[3],
311 mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2] + mCol[3].mF32[2] * inV.mF32[3],
312 mCol[0].mF32[3] * inV.mF32[0] + mCol[1].mF32[3] * inV.mF32[1] + mCol[2].mF32[3] * inV.mF32[2] + mCol[3].mF32[3] * inV.mF32[3]);
313#endif
314}
315
317{
318#if defined(JPH_USE_SSE)
319 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
320 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
321 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
322 return Vec3::sFixW(t);
323#elif defined(JPH_USE_NEON)
324 Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
325 t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
326 t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
327 return Vec3::sFixW(t);
328#else
329 return Vec3(
330 mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2],
331 mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2],
332 mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2]);
333#endif
334}
335
337{
338#if defined(JPH_USE_SSE4_1)
339 __m128 x = _mm_dp_ps(mCol[0].mValue, inV.mValue, 0x7f);
340 __m128 y = _mm_dp_ps(mCol[1].mValue, inV.mValue, 0x7f);
341 __m128 xy = _mm_blend_ps(x, y, 0b0010);
342 __m128 z = _mm_dp_ps(mCol[2].mValue, inV.mValue, 0x7f);
343 __m128 xyzz = _mm_blend_ps(xy, z, 0b1100);
344 return xyzz;
345#else
346 return Transposed3x3().Multiply3x3(inV);
347#endif
348}
349
351{
352 JPH_ASSERT(mCol[0][3] == 0.0f);
353 JPH_ASSERT(mCol[1][3] == 0.0f);
354 JPH_ASSERT(mCol[2][3] == 0.0f);
355
356 Mat44 result;
357#if defined(JPH_USE_SSE)
358 for (int i = 0; i < 3; ++i)
359 {
360 __m128 c = inM.mCol[i].mValue;
361 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)));
362 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1))));
363 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2))));
364 result.mCol[i].mValue = t;
365 }
366#elif defined(JPH_USE_NEON)
367 for (int i = 0; i < 3; ++i)
368 {
369 Type c = inM.mCol[i].mValue;
370 Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0));
371 t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1));
372 t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2));
373 result.mCol[i].mValue = t;
374 }
375#else
376 for (int i = 0; i < 3; ++i)
377 result.mCol[i] = mCol[0] * inM.mCol[i].mF32[0] + mCol[1] * inM.mCol[i].mF32[1] + mCol[2] * inM.mCol[i].mF32[2];
378#endif
379 result.mCol[3] = Vec4(0, 0, 0, 1);
380 return result;
381}
382
384{
385 // Transpose left hand side
386 Mat44 trans = Transposed3x3();
387
388 // Do 3x3 matrix multiply
389 Mat44 result;
390 result.mCol[0] = trans.mCol[0] * inM.mCol[0].SplatX() + trans.mCol[1] * inM.mCol[0].SplatY() + trans.mCol[2] * inM.mCol[0].SplatZ();
391 result.mCol[1] = trans.mCol[0] * inM.mCol[1].SplatX() + trans.mCol[1] * inM.mCol[1].SplatY() + trans.mCol[2] * inM.mCol[1].SplatZ();
392 result.mCol[2] = trans.mCol[0] * inM.mCol[2].SplatX() + trans.mCol[1] * inM.mCol[2].SplatY() + trans.mCol[2] * inM.mCol[2].SplatZ();
393 result.mCol[3] = Vec4(0, 0, 0, 1);
394 return result;
395}
396
398{
399 JPH_ASSERT(mCol[0][3] == 0.0f);
400 JPH_ASSERT(mCol[1][3] == 0.0f);
401 JPH_ASSERT(mCol[2][3] == 0.0f);
402
403 Mat44 result;
404 result.mCol[0] = mCol[0] * inM.mCol[0].SplatX() + mCol[1] * inM.mCol[1].SplatX() + mCol[2] * inM.mCol[2].SplatX();
405 result.mCol[1] = mCol[0] * inM.mCol[0].SplatY() + mCol[1] * inM.mCol[1].SplatY() + mCol[2] * inM.mCol[2].SplatY();
406 result.mCol[2] = mCol[0] * inM.mCol[0].SplatZ() + mCol[1] * inM.mCol[1].SplatZ() + mCol[2] * inM.mCol[2].SplatZ();
407 result.mCol[3] = Vec4(0, 0, 0, 1);
408 return result;
409}
410
411Mat44 Mat44::operator * (float inV) const
412{
413 Vec4 multiplier = Vec4::sReplicate(inV);
414
415 Mat44 result;
416 for (int c = 0; c < 4; ++c)
417 result.mCol[c] = mCol[c] * multiplier;
418 return result;
419}
420
422{
423 for (int c = 0; c < 4; ++c)
424 mCol[c] *= inV;
425
426 return *this;
427}
428
430{
431 Mat44 result;
432 for (int i = 0; i < 4; ++i)
433 result.mCol[i] = mCol[i] + inM.mCol[i];
434 return result;
435}
436
438{
439 Mat44 result;
440 for (int i = 0; i < 4; ++i)
441 result.mCol[i] = -mCol[i];
442 return result;
443}
444
446{
447 Mat44 result;
448 for (int i = 0; i < 4; ++i)
449 result.mCol[i] = mCol[i] - inM.mCol[i];
450 return result;
451}
452
454{
455 for (int c = 0; c < 4; ++c)
456 mCol[c] += inM.mCol[c];
457
458 return *this;
459}
460
462{
463 for (int c = 0; c < 4; ++c)
464 mCol[c].StoreFloat4(outV + c);
465}
466
468{
469#if defined(JPH_USE_SSE)
470 __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
471 __m128 tmp3 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
472 __m128 tmp2 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(1, 0, 1, 0));
473 __m128 tmp4 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(3, 2, 3, 2));
474
475 Mat44 result;
476 result.mCol[0].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0));
477 result.mCol[1].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1));
478 result.mCol[2].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(2, 0, 2, 0));
479 result.mCol[3].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(3, 1, 3, 1));
480 return result;
481#elif defined(JPH_USE_NEON)
482 float32x4x2_t tmp1 = vzipq_f32(mCol[0].mValue, mCol[2].mValue);
483 float32x4x2_t tmp2 = vzipq_f32(mCol[1].mValue, mCol[3].mValue);
484 float32x4x2_t tmp3 = vzipq_f32(tmp1.val[0], tmp2.val[0]);
485 float32x4x2_t tmp4 = vzipq_f32(tmp1.val[1], tmp2.val[1]);
486
487 Mat44 result;
488 result.mCol[0].mValue = tmp3.val[0];
489 result.mCol[1].mValue = tmp3.val[1];
490 result.mCol[2].mValue = tmp4.val[0];
491 result.mCol[3].mValue = tmp4.val[1];
492 return result;
493#else
494 Mat44 result;
495 for (int c = 0; c < 4; ++c)
496 for (int r = 0; r < 4; ++r)
497 result.mCol[r].mF32[c] = mCol[c].mF32[r];
498 return result;
499#endif
500}
501
503{
504#if defined(JPH_USE_SSE)
505 __m128 zero = _mm_setzero_ps();
506 __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
507 __m128 tmp3 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
508 __m128 tmp2 = _mm_shuffle_ps(mCol[2].mValue, zero, _MM_SHUFFLE(1, 0, 1, 0));
509 __m128 tmp4 = _mm_shuffle_ps(mCol[2].mValue, zero, _MM_SHUFFLE(3, 2, 3, 2));
510
511 Mat44 result;
512 result.mCol[0].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0));
513 result.mCol[1].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1));
514 result.mCol[2].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(2, 0, 2, 0));
515#elif defined(JPH_USE_NEON)
516 float32x4x2_t tmp1 = vzipq_f32(mCol[0].mValue, mCol[2].mValue);
517 float32x4x2_t tmp2 = vzipq_f32(mCol[1].mValue, vdupq_n_f32(0));
518 float32x4x2_t tmp3 = vzipq_f32(tmp1.val[0], tmp2.val[0]);
519 float32x4x2_t tmp4 = vzipq_f32(tmp1.val[1], tmp2.val[1]);
520
521 Mat44 result;
522 result.mCol[0].mValue = tmp3.val[0];
523 result.mCol[1].mValue = tmp3.val[1];
524 result.mCol[2].mValue = tmp4.val[0];
525#else
526 Mat44 result;
527 for (int c = 0; c < 3; ++c)
528 {
529 for (int r = 0; r < 3; ++r)
530 result.mCol[c].mF32[r] = mCol[r].mF32[c];
531 result.mCol[c].mF32[3] = 0;
532 }
533#endif
534 result.mCol[3] = Vec4(0, 0, 0, 1);
535 return result;
536}
537
539{
540#if defined(JPH_USE_SSE)
541 // Algorithm from: http://download.intel.com/design/PentiumIII/sml/24504301.pdf
542 // Streaming SIMD Extensions - Inverse of 4x4 Matrix
543 // Adapted to load data using _mm_shuffle_ps instead of loading from memory
544 // Replaced _mm_rcp_ps with _mm_div_ps for better accuracy
545
546 __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
547 __m128 row1 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(1, 0, 1, 0));
548 __m128 row0 = _mm_shuffle_ps(tmp1, row1, _MM_SHUFFLE(2, 0, 2, 0));
549 row1 = _mm_shuffle_ps(row1, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
550 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
551 __m128 row3 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(3, 2, 3, 2));
552 __m128 row2 = _mm_shuffle_ps(tmp1, row3, _MM_SHUFFLE(2, 0, 2, 0));
553 row3 = _mm_shuffle_ps(row3, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
554
555 tmp1 = _mm_mul_ps(row2, row3);
556 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
557 __m128 minor0 = _mm_mul_ps(row1, tmp1);
558 __m128 minor1 = _mm_mul_ps(row0, tmp1);
559 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
560 minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
561 minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
562 minor1 = _mm_shuffle_ps(minor1, minor1, _MM_SHUFFLE(1, 0, 3, 2));
563
564 tmp1 = _mm_mul_ps(row1, row2);
565 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
566 minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
567 __m128 minor3 = _mm_mul_ps(row0, tmp1);
568 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
569 minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
570 minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3);
571 minor3 = _mm_shuffle_ps(minor3, minor3, _MM_SHUFFLE(1, 0, 3, 2));
572
573 tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, _MM_SHUFFLE(1, 0, 3, 2)), row3);
574 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
575 row2 = _mm_shuffle_ps(row2, row2, _MM_SHUFFLE(1, 0, 3, 2));
576 minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
577 __m128 minor2 = _mm_mul_ps(row0, tmp1);
578 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
579 minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
580 minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
581 minor2 = _mm_shuffle_ps(minor2, minor2, _MM_SHUFFLE(1, 0, 3, 2));
582
583 tmp1 = _mm_mul_ps(row0, row1);
584 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
585 minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
586 minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3);
587 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
588 minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
589 minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1));
590
591 tmp1 = _mm_mul_ps(row0, row3);
592 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
593 minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
594 minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
595 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
596 minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
597 minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
598
599 tmp1 = _mm_mul_ps(row0, row2);
600 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
601 minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
602 minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1));
603 tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
604 minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
605 minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3);
606
607 __m128 det = _mm_mul_ps(row0, minor0);
608 det = _mm_add_ps(_mm_shuffle_ps(det, det, _MM_SHUFFLE(2, 3, 0, 1)), det); // Original code did (x + z) + (y + w), changed to (x + y) + (z + w) to match the ARM code below and make the result cross platform deterministic
609 det = _mm_add_ss(_mm_shuffle_ps(det, det, _MM_SHUFFLE(1, 0, 3, 2)), det);
610 det = _mm_div_ss(_mm_set_ss(1.0f), det);
611 det = _mm_shuffle_ps(det, det, _MM_SHUFFLE(0, 0, 0, 0));
612
613 Mat44 result;
614 result.mCol[0].mValue = _mm_mul_ps(det, minor0);
615 result.mCol[1].mValue = _mm_mul_ps(det, minor1);
616 result.mCol[2].mValue = _mm_mul_ps(det, minor2);
617 result.mCol[3].mValue = _mm_mul_ps(det, minor3);
618 return result;
619#elif defined(JPH_USE_NEON)
620 // Adapted from the SSE version, there's surprising few articles about efficient ways of calculating an inverse for ARM on the internet
621 Type tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 0, 1, 4, 5);
622 Type row1 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, mCol[3].mValue, 0, 1, 4, 5);
623 Type row0 = JPH_NEON_SHUFFLE_F32x4(tmp1, row1, 0, 2, 4, 6);
624 row1 = JPH_NEON_SHUFFLE_F32x4(row1, tmp1, 1, 3, 5, 7);
625 tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 2, 3, 6, 7);
626 Type row3 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, mCol[3].mValue, 2, 3, 6, 7);
627 Type row2 = JPH_NEON_SHUFFLE_F32x4(tmp1, row3, 0, 2, 4, 6);
628 row3 = JPH_NEON_SHUFFLE_F32x4(row3, tmp1, 1, 3, 5, 7);
629
630 tmp1 = vmulq_f32(row2, row3);
631 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
632 Type minor0 = vmulq_f32(row1, tmp1);
633 Type minor1 = vmulq_f32(row0, tmp1);
634 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
635 minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0);
636 minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1);
637 minor1 = JPH_NEON_SHUFFLE_F32x4(minor1, minor1, 2, 3, 0, 1);
638
639 tmp1 = vmulq_f32(row1, row2);
640 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
641 minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0);
642 Type minor3 = vmulq_f32(row0, tmp1);
643 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
644 minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1));
645 minor3 = vsubq_f32(vmulq_f32(row0, tmp1), minor3);
646 minor3 = JPH_NEON_SHUFFLE_F32x4(minor3, minor3, 2, 3, 0, 1);
647
648 tmp1 = JPH_NEON_SHUFFLE_F32x4(row1, row1, 2, 3, 0, 1);
649 tmp1 = vmulq_f32(tmp1, row3);
650 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
651 row2 = JPH_NEON_SHUFFLE_F32x4(row2, row2, 2, 3, 0, 1);
652 minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0);
653 Type minor2 = vmulq_f32(row0, tmp1);
654 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
655 minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1));
656 minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2);
657 minor2 = JPH_NEON_SHUFFLE_F32x4(minor2, minor2, 2, 3, 0, 1);
658
659 tmp1 = vmulq_f32(row0, row1);
660 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
661 minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2);
662 minor3 = vsubq_f32(vmulq_f32(row2, tmp1), minor3);
663 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
664 minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2);
665 minor3 = vsubq_f32(minor3, vmulq_f32(row2, tmp1));
666
667 tmp1 = vmulq_f32(row0, row3);
668 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
669 minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1));
670 minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2);
671 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
672 minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1);
673 minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1));
674
675 tmp1 = vmulq_f32(row0, row2);
676 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
677 minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1);
678 minor3 = vsubq_f32(minor3, vmulq_f32(row1, tmp1));
679 tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
680 minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1));
681 minor3 = vaddq_f32(vmulq_f32(row1, tmp1), minor3);
682
683 Type det = vmulq_f32(row0, minor0);
684 det = vdupq_n_f32(vaddvq_f32(det));
685 det = vdivq_f32(vdupq_n_f32(1.0f), det);
686
687 Mat44 result;
688 result.mCol[0].mValue = vmulq_f32(det, minor0);
689 result.mCol[1].mValue = vmulq_f32(det, minor1);
690 result.mCol[2].mValue = vmulq_f32(det, minor2);
691 result.mCol[3].mValue = vmulq_f32(det, minor3);
692 return result;
693#else
694 float m00 = JPH_EL(0, 0), m10 = JPH_EL(1, 0), m20 = JPH_EL(2, 0), m30 = JPH_EL(3, 0);
695 float m01 = JPH_EL(0, 1), m11 = JPH_EL(1, 1), m21 = JPH_EL(2, 1), m31 = JPH_EL(3, 1);
696 float m02 = JPH_EL(0, 2), m12 = JPH_EL(1, 2), m22 = JPH_EL(2, 2), m32 = JPH_EL(3, 2);
697 float m03 = JPH_EL(0, 3), m13 = JPH_EL(1, 3), m23 = JPH_EL(2, 3), m33 = JPH_EL(3, 3);
698
699 float m10211120 = m10 * m21 - m11 * m20;
700 float m10221220 = m10 * m22 - m12 * m20;
701 float m10231320 = m10 * m23 - m13 * m20;
702 float m10311130 = m10 * m31 - m11 * m30;
703 float m10321230 = m10 * m32 - m12 * m30;
704 float m10331330 = m10 * m33 - m13 * m30;
705 float m11221221 = m11 * m22 - m12 * m21;
706 float m11231321 = m11 * m23 - m13 * m21;
707 float m11321231 = m11 * m32 - m12 * m31;
708 float m11331331 = m11 * m33 - m13 * m31;
709 float m12231322 = m12 * m23 - m13 * m22;
710 float m12331332 = m12 * m33 - m13 * m32;
711 float m20312130 = m20 * m31 - m21 * m30;
712 float m20322230 = m20 * m32 - m22 * m30;
713 float m20332330 = m20 * m33 - m23 * m30;
714 float m21322231 = m21 * m32 - m22 * m31;
715 float m21332331 = m21 * m33 - m23 * m31;
716 float m22332332 = m22 * m33 - m23 * m32;
717
718 Vec4 col0(m11 * m22332332 - m12 * m21332331 + m13 * m21322231, -m10 * m22332332 + m12 * m20332330 - m13 * m20322230, m10 * m21332331 - m11 * m20332330 + m13 * m20312130, -m10 * m21322231 + m11 * m20322230 - m12 * m20312130);
719 Vec4 col1(-m01 * m22332332 + m02 * m21332331 - m03 * m21322231, m00 * m22332332 - m02 * m20332330 + m03 * m20322230, -m00 * m21332331 + m01 * m20332330 - m03 * m20312130, m00 * m21322231 - m01 * m20322230 + m02 * m20312130);
720 Vec4 col2(m01 * m12331332 - m02 * m11331331 + m03 * m11321231, -m00 * m12331332 + m02 * m10331330 - m03 * m10321230, m00 * m11331331 - m01 * m10331330 + m03 * m10311130, -m00 * m11321231 + m01 * m10321230 - m02 * m10311130);
721 Vec4 col3(-m01 * m12231322 + m02 * m11231321 - m03 * m11221221, m00 * m12231322 - m02 * m10231320 + m03 * m10221220, -m00 * m11231321 + m01 * m10231320 - m03 * m10211120, m00 * m11221221 - m01 * m10221220 + m02 * m10211120);
722
723 float det = m00 * col0.mF32[0] + m01 * col0.mF32[1] + m02 * col0.mF32[2] + m03 * col0.mF32[3];
724
725 return Mat44(col0 / det, col1 / det, col2 / det, col3 / det);
726#endif
727}
728
735
737{
738 return GetAxisX().Dot(GetAxisY().Cross(GetAxisZ()));
739}
740
742{
743 return Mat44(
744 Vec4(JPH_EL(1, 1), JPH_EL(1, 2), JPH_EL(1, 0), 0) * Vec4(JPH_EL(2, 2), JPH_EL(2, 0), JPH_EL(2, 1), 0)
745 - Vec4(JPH_EL(1, 2), JPH_EL(1, 0), JPH_EL(1, 1), 0) * Vec4(JPH_EL(2, 1), JPH_EL(2, 2), JPH_EL(2, 0), 0),
746 Vec4(JPH_EL(0, 2), JPH_EL(0, 0), JPH_EL(0, 1), 0) * Vec4(JPH_EL(2, 1), JPH_EL(2, 2), JPH_EL(2, 0), 0)
747 - Vec4(JPH_EL(0, 1), JPH_EL(0, 2), JPH_EL(0, 0), 0) * Vec4(JPH_EL(2, 2), JPH_EL(2, 0), JPH_EL(2, 1), 0),
748 Vec4(JPH_EL(0, 1), JPH_EL(0, 2), JPH_EL(0, 0), 0) * Vec4(JPH_EL(1, 2), JPH_EL(1, 0), JPH_EL(1, 1), 0)
749 - Vec4(JPH_EL(0, 2), JPH_EL(0, 0), JPH_EL(0, 1), 0) * Vec4(JPH_EL(1, 1), JPH_EL(1, 2), JPH_EL(1, 0), 0),
750 Vec4(0, 0, 0, 1));
751}
752
754{
755 float det = GetDeterminant3x3();
756
757 return Mat44(
758 (Vec4(JPH_EL(1, 1), JPH_EL(1, 2), JPH_EL(1, 0), 0) * Vec4(JPH_EL(2, 2), JPH_EL(2, 0), JPH_EL(2, 1), 0)
759 - Vec4(JPH_EL(1, 2), JPH_EL(1, 0), JPH_EL(1, 1), 0) * Vec4(JPH_EL(2, 1), JPH_EL(2, 2), JPH_EL(2, 0), 0)) / det,
760 (Vec4(JPH_EL(0, 2), JPH_EL(0, 0), JPH_EL(0, 1), 0) * Vec4(JPH_EL(2, 1), JPH_EL(2, 2), JPH_EL(2, 0), 0)
761 - Vec4(JPH_EL(0, 1), JPH_EL(0, 2), JPH_EL(0, 0), 0) * Vec4(JPH_EL(2, 2), JPH_EL(2, 0), JPH_EL(2, 1), 0)) / det,
762 (Vec4(JPH_EL(0, 1), JPH_EL(0, 2), JPH_EL(0, 0), 0) * Vec4(JPH_EL(1, 2), JPH_EL(1, 0), JPH_EL(1, 1), 0)
763 - Vec4(JPH_EL(0, 2), JPH_EL(0, 0), JPH_EL(0, 1), 0) * Vec4(JPH_EL(1, 1), JPH_EL(1, 2), JPH_EL(1, 0), 0)) / det,
764 Vec4(0, 0, 0, 1));
765}
766
768{
769 float det = inM.GetDeterminant3x3();
770
771 // If the determinant is zero the matrix is singular and we return false
772 if (det == 0.0f)
773 return false;
774
775 // Finish calculating the inverse
776 *this = inM.Adjointed3x3();
777 mCol[0] /= det;
778 mCol[1] /= det;
779 mCol[2] /= det;
780 return true;
781}
782
784{
785 float tr = mCol[0].mF32[0] + mCol[1].mF32[1] + mCol[2].mF32[2];
786
787 if (tr >= 0.0f)
788 {
789 float s = sqrt(tr + 1.0f);
790 float is = 0.5f / s;
791 return Quat(
792 (mCol[1].mF32[2] - mCol[2].mF32[1]) * is,
793 (mCol[2].mF32[0] - mCol[0].mF32[2]) * is,
794 (mCol[0].mF32[1] - mCol[1].mF32[0]) * is,
795 0.5f * s);
796 }
797 else
798 {
799 int i = 0;
800 if (mCol[1].mF32[1] > mCol[0].mF32[0]) i = 1;
801 if (mCol[2].mF32[2] > mCol[i].mF32[i]) i = 2;
802
803 if (i == 0)
804 {
805 float s = sqrt(mCol[0].mF32[0] - (mCol[1].mF32[1] + mCol[2].mF32[2]) + 1);
806 float is = 0.5f / s;
807 return Quat(
808 0.5f * s,
809 (mCol[1].mF32[0] + mCol[0].mF32[1]) * is,
810 (mCol[0].mF32[2] + mCol[2].mF32[0]) * is,
811 (mCol[1].mF32[2] - mCol[2].mF32[1]) * is);
812 }
813 else if (i == 1)
814 {
815 float s = sqrt(mCol[1].mF32[1] - (mCol[2].mF32[2] + mCol[0].mF32[0]) + 1);
816 float is = 0.5f / s;
817 return Quat(
818 (mCol[1].mF32[0] + mCol[0].mF32[1]) * is,
819 0.5f * s,
820 (mCol[2].mF32[1] + mCol[1].mF32[2]) * is,
821 (mCol[2].mF32[0] - mCol[0].mF32[2]) * is);
822 }
823 else
824 {
825 JPH_ASSERT(i == 2);
826
827 float s = sqrt(mCol[2].mF32[2] - (mCol[0].mF32[0] + mCol[1].mF32[1]) + 1);
828 float is = 0.5f / s;
829 return Quat(
830 (mCol[0].mF32[2] + mCol[2].mF32[0]) * is,
831 (mCol[2].mF32[1] + mCol[1].mF32[2]) * is,
832 0.5f * s,
833 (mCol[0].mF32[1] - mCol[1].mF32[0]) * is);
834 }
835 }
836}
837
839{
840 return Mat44(
841 Vec4(1, 1, -1, -1) * inQ.mValue.Swizzle<SWIZZLE_W, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_X>(),
842 Vec4(-1, 1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(),
843 Vec4(1, -1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(),
844 inQ.mValue);
845}
846
848{
849 return Mat44(
850 Vec4(1, -1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_W, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_X>(),
851 Vec4(1, 1, -1, -1) * inQ.mValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(),
852 Vec4(-1, 1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(),
853 inQ.mValue);
854}
855
857{
858 JPH_ASSERT(mCol[0][3] == 0.0f);
859 JPH_ASSERT(mCol[1][3] == 0.0f);
860 JPH_ASSERT(mCol[2][3] == 0.0f);
861
862 return Mat44(mCol[0], mCol[1], mCol[2], Vec4(0, 0, 0, 1));
863}
864
866{
867#if defined(JPH_USE_AVX512)
868 return Mat44(_mm_maskz_mov_ps(0b0111, mCol[0].mValue),
869 _mm_maskz_mov_ps(0b0111, mCol[1].mValue),
870 _mm_maskz_mov_ps(0b0111, mCol[2].mValue),
871 Vec4(0, 0, 0, 1));
872#elif defined(JPH_USE_SSE4_1)
873 __m128 zero = _mm_setzero_ps();
874 return Mat44(_mm_blend_ps(mCol[0].mValue, zero, 8),
875 _mm_blend_ps(mCol[1].mValue, zero, 8),
876 _mm_blend_ps(mCol[2].mValue, zero, 8),
877 Vec4(0, 0, 0, 1));
878#elif defined(JPH_USE_NEON)
879 return Mat44(vsetq_lane_f32(0, mCol[0].mValue, 3),
880 vsetq_lane_f32(0, mCol[1].mValue, 3),
881 vsetq_lane_f32(0, mCol[2].mValue, 3),
882 Vec4(0, 0, 0, 1));
883#else
884 return Mat44(Vec4(mCol[0].mF32[0], mCol[0].mF32[1], mCol[0].mF32[2], 0),
885 Vec4(mCol[1].mF32[0], mCol[1].mF32[1], mCol[1].mF32[2], 0),
886 Vec4(mCol[2].mF32[0], mCol[2].mF32[1], mCol[2].mF32[2], 0),
887 Vec4(0, 0, 0, 1));
888#endif
889}
890
892{
893 mCol[0] = inRotation.mCol[0];
894 mCol[1] = inRotation.mCol[1];
895 mCol[2] = inRotation.mCol[2];
896}
897
899{
900 return Mat44(mCol[0], mCol[1], mCol[2], Vec4(GetTranslation() + Multiply3x3(inTranslation), 1));
901}
902
904{
905 return Mat44(mCol[0], mCol[1], mCol[2], Vec4(GetTranslation() + inTranslation, 1));
906}
907
909{
910 return Mat44(inScale.GetX() * mCol[0], inScale.GetY() * mCol[1], inScale.GetZ() * mCol[2], mCol[3]);
911}
912
914{
915 Vec4 scale(inScale, 1);
916 return Mat44(scale * mCol[0], scale * mCol[1], scale * mCol[2], scale * mCol[3]);
917}
918
920{
921 // Start the modified Gram-Schmidt algorithm
922 // X axis will just be normalized
923 Vec3 x = GetAxisX();
924
925 // Make Y axis perpendicular to X
926 Vec3 y = GetAxisY();
927 float x_dot_x = x.LengthSq();
928 y -= (x.Dot(y) / x_dot_x) * x;
929
930 // Make Z axis perpendicular to X
931 Vec3 z = GetAxisZ();
932 z -= (x.Dot(z) / x_dot_x) * x;
933
934 // Make Z axis perpendicular to Y
935 float y_dot_y = y.LengthSq();
936 z -= (y.Dot(z) / y_dot_y) * y;
937
938 // Determine the scale
939 float z_dot_z = z.LengthSq();
940 outScale = Vec3(x_dot_x, y_dot_y, z_dot_z).Sqrt();
941
942 // If the resulting x, y and z vectors don't form a right handed matrix, flip the z axis.
943 if (x.Cross(y).Dot(z) < 0.0f)
944 outScale.SetZ(-outScale.GetZ());
945
946 // Determine the rotation and translation
947 return Mat44(Vec4(x / outScale.GetX(), 0), Vec4(y / outScale.GetY(), 0), Vec4(z / outScale.GetZ(), 0), GetColumn4(3));
948}
949
950#undef JPH_EL
951
#define JPH_NAMESPACE_END
Definition Core.h:378
#define JPH_NAMESPACE_BEGIN
Definition Core.h:372
#define JPH_ASSERT(...)
Definition IssueReporting.h:33
#define JPH_EL(r, c)
Definition Mat44.inl:13
@ SWIZZLE_Z
Use the Z component.
Definition Swizzle.h:14
@ SWIZZLE_W
Use the W component.
Definition Swizzle.h:15
@ SWIZZLE_X
Use the X component.
Definition Swizzle.h:12
@ SWIZZLE_Y
Use the Y component.
Definition Swizzle.h:13
JPH_INLINE float Tan(float inX)
Tangent of x (input in radians)
Definition Trigonometry.h:28
Class that holds 4 float values. Convert to Vec4 to perform calculations.
Definition Float4.h:11
Holds a 4x4 matrix of floats, but supports also operations on the 3x3 upper left part of the matrix.
Definition Mat44.h:13
JPH_INLINE Vec3 GetAxisY() const
Definition Mat44.h:148
JPH_INLINE Mat44 PostTranslated(Vec3Arg inTranslation) const
Post multiply by translation matrix: result = Mat44::sTranslation(inTranslation) * this (i....
Definition Mat44.inl:903
JPH_INLINE Mat44 PreTranslated(Vec3Arg inTranslation) const
Pre multiply by translation matrix: result = this * Mat44::sTranslation(inTranslation)
Definition Mat44.inl:898
JPH_INLINE Vec3 GetAxisZ() const
Definition Mat44.h:150
static JPH_INLINE Mat44 sIdentity()
Identity matrix.
Definition Mat44.inl:35
JPH_INLINE Mat44 Multiply3x3LeftTransposed(Mat44Arg inM) const
Multiply transpose of 3x3 matrix by 3x3 matrix ( )
Definition Mat44.inl:383
static JPH_INLINE Mat44 sZero()
Zero matrix.
Definition Mat44.inl:30
JPH_INLINE Quat GetQuaternion() const
Convert to quaternion.
Definition Mat44.inl:783
JPH_INLINE void StoreFloat4x4(Float4 *outV) const
Store matrix to memory.
Definition Mat44.inl:461
JPH_INLINE Mat44 operator-() const
Negate.
Definition Mat44.inl:437
static JPH_INLINE Mat44 sCrossProduct(Vec3Arg inV)
Get matrix that represents a cross product .
Definition Mat44.inl:179
JPH_INLINE Mat44 Transposed3x3() const
Transpose 3x3 subpart of matrix.
Definition Mat44.inl:502
JPH_INLINE Mat44 & operator*=(float inV)
Multiply matrix with float.
Definition Mat44.inl:421
JPH_INLINE float GetDeterminant3x3() const
Get the determinant of a 3x3 matrix.
Definition Mat44.inl:736
static JPH_INLINE Mat44 sQuatRightMultiply(QuatArg inQ)
Returns matrix MR so that (where p and q are quaternions)
Definition Mat44.inl:847
JPH_INLINE Mat44 Transposed() const
Transpose matrix.
Definition Mat44.inl:467
Vec4::Type Type
Definition Mat44.h:18
JPH_INLINE Mat44 Adjointed3x3() const
Get the adjoint of a 3x3 matrix.
Definition Mat44.inl:741
JPH_INLINE bool operator==(Mat44Arg inM2) const
Comparison.
Definition Mat44.inl:225
static JPH_INLINE Mat44 sRotationZ(float inZ)
Definition Mat44.inl:77
static JPH_INLINE Mat44 sLoadFloat4x4(const Float4 *inV)
Load 16 floats from memory.
Definition Mat44.inl:45
JPH_INLINE Vec3 Multiply3x3Transposed(Vec3Arg inV) const
Multiply vector by only 3x3 part of the transpose of the matrix ( )
Definition Mat44.inl:336
static JPH_INLINE Mat44 sOuterProduct(Vec3Arg inV1, Vec3Arg inV2)
Get outer product of inV and inV2 (equivalent to )
Definition Mat44.inl:173
static JPH_INLINE Mat44 sLookAt(Vec3Arg inPos, Vec3Arg inTarget, Vec3Arg inUp)
Definition Mat44.inl:207
JPH_INLINE Mat44 GetRotation() const
Get rotation part only (note: retains the first 3 values from the bottom row)
Definition Mat44.inl:856
JPH_INLINE Mat44 GetRotationSafe() const
Get rotation part only (note: also clears the bottom row)
Definition Mat44.inl:865
JPH_INLINE Mat44 Multiply3x3RightTransposed(Mat44Arg inM) const
Multiply 3x3 matrix by the transpose of a 3x3 matrix ( )
Definition Mat44.inl:397
static JPH_INLINE Mat44 sNaN()
Matrix filled with NaN's.
Definition Mat44.inl:40
JPH_INLINE Mat44 & operator+=(Mat44Arg inM)
Per element addition of matrix.
Definition Mat44.inl:453
JPH_INLINE Mat44 PostScaled(Vec3Arg inScale) const
Scale a matrix: result = Mat44::sScale(inScale) * this.
Definition Mat44.inl:913
JPH_INLINE bool IsClose(Mat44Arg inM2, float inMaxDistSq=1.0e-12f) const
Test if two matrices are close.
Definition Mat44.inl:233
JPH_INLINE bool SetInversed3x3(Mat44Arg inM)
*this = inM.Inversed3x3(), returns false if the matrix is singular in which case *this is unchanged
Definition Mat44.inl:767
static JPH_INLINE Mat44 sScale(float inScale)
Get matrix that scales uniformly.
Definition Mat44.inl:163
static JPH_INLINE Mat44 sLoadFloat4x4Aligned(const Float4 *inV)
Load 16 floats from memory, 16 bytes aligned.
Definition Mat44.inl:53
static JPH_INLINE Mat44 sTranslation(Vec3Arg inV)
Get matrix that translates.
Definition Mat44.inl:144
JPH_INLINE Vec4 GetColumn4(uint inCol) const
Definition Mat44.h:160
JPH_INLINE Mat44 Decompose(Vec3 &outScale) const
Definition Mat44.inl:919
JPH_INLINE Vec3 GetAxisX() const
Access to the columns.
Definition Mat44.h:146
JPH_INLINE Mat44 Inversed() const
Inverse 4x4 matrix.
Definition Mat44.inl:538
JPH_INLINE Vec3 Multiply3x3(Vec3Arg inV) const
Multiply vector by only 3x3 part of the matrix.
Definition Mat44.inl:316
static JPH_INLINE Mat44 sRotationTranslation(QuatArg inR, Vec3Arg inT)
Get matrix that rotates and translates.
Definition Mat44.inl:149
JPH_INLINE void SetRotation(Mat44Arg inRotation)
Updates the rotation part of this matrix (the first 3 columns)
Definition Mat44.inl:891
JPH_INLINE Vec3 GetTranslation() const
Definition Mat44.h:152
JPH_INLINE Mat44 operator+(Mat44Arg inM) const
Per element addition of matrix.
Definition Mat44.inl:429
static JPH_INLINE Mat44 sRotation(Vec3Arg inAxis, float inAngle)
Rotate around arbitrary axis.
Definition Mat44.inl:139
static JPH_INLINE Mat44 sInverseRotationTranslation(QuatArg inR, Vec3Arg inT)
Get inverse matrix of sRotationTranslation.
Definition Mat44.inl:156
Mat44()=default
Constructor.
JPH_INLINE Mat44 Inversed3x3() const
Inverse 3x3 matrix.
Definition Mat44.inl:753
static JPH_INLINE Mat44 sQuatLeftMultiply(QuatArg inQ)
Returns matrix ML so that (where p and q are quaternions)
Definition Mat44.inl:838
JPH_INLINE void SetTranslation(Vec3Arg inV)
Definition Mat44.h:153
static JPH_INLINE Mat44 sPerspective(float inFovY, float inAspect, float inNear, float inFar)
Returns a right-handed perspective projection matrix.
Definition Mat44.inl:216
static JPH_INLINE Mat44 sRotationX(float inX)
Rotate around X, Y or Z axis (angle in radians)
Definition Mat44.inl:61
JPH_INLINE Mat44 InversedRotationTranslation() const
Inverse 4x4 matrix when it only contains rotation and translation.
Definition Mat44.inl:729
JPH_INLINE Mat44 PreScaled(Vec3Arg inScale) const
Scale a matrix: result = this * Mat44::sScale(inScale)
Definition Mat44.inl:908
static JPH_INLINE Mat44 sRotationY(float inY)
Definition Mat44.inl:69
friend JPH_INLINE Mat44 operator*(float inV, Mat44Arg inM)
Definition Mat44.h:128
Definition Quat.h:33
JPH_INLINE float GetW() const
Get W component (real part)
Definition Quat.h:78
JPH_INLINE float GetY() const
Get Y component (imaginary part j)
Definition Quat.h:72
JPH_INLINE float GetZ() const
Get Z component (imaginary part k)
Definition Quat.h:75
static JPH_INLINE Quat sRotation(Vec3Arg inAxis, float inAngle)
Rotation from axis and angle.
Definition Quat.inl:74
JPH_INLINE float GetX() const
Get X component (imaginary part i)
Definition Quat.h:69
JPH_INLINE Quat Conjugated() const
The conjugate [w, -x, -y, -z] is the same as the inverse for unit quaternions.
Definition Quat.h:178
bool IsNormalized(float inTolerance=1.0e-5f) const
If the length of this quaternion is 1 +/- inTolerance.
Definition Quat.h:59
Vec4 mValue
4 vector that stores [x, y, z, w] parts of the quaternion
Definition Quat.h:248
JPH_INLINE bool TestAllTrue() const
Test if all components are true (true is when highest bit of component is set)
Definition UVec4.inl:405
static JPH_INLINE UVec4 sAnd(UVec4Arg inV1, UVec4Arg inV2)
Logical and (component wise)
Definition UVec4.inl:199
Definition Vec3.h:17
JPH_INLINE float Dot(Vec3Arg inV2) const
Dot product.
Definition Vec3.inl:645
static JPH_INLINE Type sFixW(Type inValue)
Internal helper function that ensures that the Z component is replicated to the W component to preven...
static JPH_INLINE Vec3 sAxisX()
Vectors with the principal axis.
Definition Vec3.h:53
JPH_INLINE Vec4 SplatX() const
Replicate the X component to all components.
Definition Vec3.inl:529
JPH_INLINE Vec3 Cross(Vec3Arg inV2) const
Cross product.
Definition Vec3.inl:590
JPH_INLINE float GetX() const
Get individual components.
Definition Vec3.h:124
JPH_INLINE Vec3 NormalizedOr(Vec3Arg inZeroValue) const
Normalize vector or return inZeroValue if the length of the vector is zero.
Definition Vec3.inl:716
JPH_INLINE Vec4 SplatZ() const
Replicate the Z component to all components.
Definition Vec3.inl:551
JPH_INLINE void SetZ(float inZ)
Definition Vec3.h:132
static JPH_INLINE Vec3 sAxisZ()
Definition Vec3.h:55
Type mValue
Definition Vec3.h:286
JPH_INLINE float GetY() const
Definition Vec3.h:125
JPH_INLINE Vec4 SplatY() const
Replicate the Y component to all components.
Definition Vec3.inl:540
JPH_INLINE float LengthSq() const
Squared length of vector.
Definition Vec3.inl:661
float mF32[4]
Definition Vec3.h:287
JPH_INLINE Vec3 Sqrt() const
Component wise square root.
Definition Vec3.inl:691
JPH_INLINE float GetZ() const
Definition Vec3.h:126
Definition Vec4.h:14
JPH_INLINE Vec4 SplatX() const
Replicate the X component to all components.
Definition Vec4.inl:555
float mF32[4]
Definition Vec4.h:275
static JPH_INLINE Vec4 sLoadFloat4Aligned(const Float4 *inV)
Load 4 floats from memory, 16 bytes aligned.
Definition Vec4.inl:101
static JPH_INLINE UVec4 sEquals(Vec4Arg inV1, Vec4Arg inV2)
Equals (component wise)
Definition Vec4.inl:166
JPH_INLINE Vec4 SplatY() const
Replicate the Y component to all components.
Definition Vec4.inl:566
JPH_INLINE Vec4 SplatZ() const
Replicate the Z component to all components.
Definition Vec4.inl:577
JPH_INLINE float GetX() const
Get individual components.
Definition Vec4.h:113
static JPH_INLINE Vec4 sLoadFloat4(const Float4 *inV)
Load 4 floats from memory.
Definition Vec4.inl:90
static JPH_INLINE Vec4 sZero()
Vector with all zeros.
Definition Vec4.inl:63
JPH_INLINE Vec4 Swizzle() const
Swizzle the elements in inV.
Type mValue
Definition Vec4.h:274
static JPH_INLINE Vec4 sNaN()
Vector with all NaN's.
Definition Vec4.inl:85
static JPH_INLINE Vec4 sReplicate(float inV)
Replicate inV across all components.
Definition Vec4.inl:74
void SinCos(Vec4 &outSin, Vec4 &outCos) const
Calculate the sine and cosine for each element of this vector (input in radians)
Definition Vec4.inl:775