Jolt Physics
A multi core friendly Game Physics Engine
Loading...
Searching...
No Matches
DMat44.inl
Go to the documentation of this file.
1// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
2// SPDX-FileCopyrightText: 2021 Jorrit Rouwe
3// SPDX-License-Identifier: MIT
4
5#pragma once
6
7#include <Jolt/Math/DVec3.h>
8
10
12 mCol { inC1, inC2, inC3 },
13 mCol3(inC4)
14{
15}
16
17DMat44::DMat44(Type inC1, Type inC2, Type inC3, DTypeArg inC4) :
18 mCol { inC1, inC2, inC3 },
19 mCol3(inC4)
20{
21}
22
24 mCol { inM.GetColumn4(0), inM.GetColumn4(1), inM.GetColumn4(2) },
25 mCol3(inM.GetTranslation())
26{
27}
28
30 mCol { inRot.GetColumn4(0), inRot.GetColumn4(1), inRot.GetColumn4(2) },
31 mCol3(inT)
32{
33}
34
39
41{
42 return DMat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), DVec3::sZero());
43}
44
52
54{
55 return mCol[0] == inM2.mCol[0]
56 && mCol[1] == inM2.mCol[1]
57 && mCol[2] == inM2.mCol[2]
58 && mCol3 == inM2.mCol3;
59}
60
61bool DMat44::IsClose(DMat44Arg inM2, float inMaxDistSq) const
62{
63 for (int i = 0; i < 3; ++i)
64 if (!mCol[i].IsClose(inM2.mCol[i], inMaxDistSq))
65 return false;
66 return mCol3.IsClose(inM2.mCol3, double(inMaxDistSq));
67}
68
70{
71#if defined(JPH_USE_AVX)
72 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
73 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
74 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
75 return DVec3::sFixW(_mm256_add_pd(mCol3.mValue, _mm256_cvtps_pd(t)));
76#elif defined(JPH_USE_SSE)
77 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
78 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
79 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
80 __m128d low = _mm_add_pd(mCol3.mValue.mLow, _mm_cvtps_pd(t));
81 __m128d high = _mm_add_pd(mCol3.mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(t, t, _MM_SHUFFLE(2, 2, 2, 2))));
82 return DVec3({ low, high });
83#elif defined(JPH_USE_NEON)
84 float32x4_t t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
85 t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
86 t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
87 float64x2_t low = vaddq_f64(mCol3.mValue.val[0], vcvt_f64_f32(vget_low_f32(t)));
88 float64x2_t high = vaddq_f64(mCol3.mValue.val[1], vcvt_high_f64_f32(t));
89 return DVec3::sFixW({ low, high });
90#elif defined(JPH_USE_RVV)
91 const vfloat32m1_t v0 = __riscv_vfmv_v_f_f32m1(inV.mF32[0], 4);
92 const vfloat32m1_t v1 = __riscv_vfmv_v_f_f32m1(inV.mF32[1], 4);
93 const vfloat32m1_t v2 = __riscv_vfmv_v_f_f32m1(inV.mF32[2], 4);
94
95 const vfloat32m1_t col0 = __riscv_vle32_v_f32m1(mCol[0].mF32, 4);
96 const vfloat32m1_t col1 = __riscv_vle32_v_f32m1(mCol[1].mF32, 4);
97 const vfloat32m1_t col2 = __riscv_vle32_v_f32m1(mCol[2].mF32, 4);
98 const vfloat64m2_t col3 = __riscv_vle64_v_f64m2(mCol3.mF64, 4);
99
100 vfloat32m1_t t = __riscv_vfmul_vv_f32m1(col0, v0, 4);
101 t = __riscv_vfmacc_vv_f32m1(t, col1, v1, 4);
102 t = __riscv_vfmacc_vv_f32m1(t, col2, v2, 4);
103
104 vfloat64m2_t t_f64 = __riscv_vfwcvt_f_f_v_f64m2(t, 4);
105 t_f64 = __riscv_vfadd_vv_f64m2(t_f64, col3, 4);
106
107 DVec3 v;
108 __riscv_vse64_v_f64m2(v.mF64, t_f64, 4);
109 return DVec3::sFixW(v.mValue);
110#else
111 return DVec3(
112 mCol3.mF64[0] + double(mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2]),
113 mCol3.mF64[1] + double(mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2]),
114 mCol3.mF64[2] + double(mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2]));
115#endif
116}
117
119{
120#if defined(JPH_USE_AVX)
121 __m256d t = _mm256_add_pd(mCol3.mValue, _mm256_mul_pd(_mm256_cvtps_pd(mCol[0].mValue), _mm256_set1_pd(inV.mF64[0])));
122 t = _mm256_add_pd(t, _mm256_mul_pd(_mm256_cvtps_pd(mCol[1].mValue), _mm256_set1_pd(inV.mF64[1])));
123 t = _mm256_add_pd(t, _mm256_mul_pd(_mm256_cvtps_pd(mCol[2].mValue), _mm256_set1_pd(inV.mF64[2])));
124 return DVec3::sFixW(t);
125#elif defined(JPH_USE_SSE)
126 __m128d xxxx = _mm_set1_pd(inV.mF64[0]);
127 __m128d yyyy = _mm_set1_pd(inV.mF64[1]);
128 __m128d zzzz = _mm_set1_pd(inV.mF64[2]);
129 __m128 col0 = mCol[0].mValue;
130 __m128 col1 = mCol[1].mValue;
131 __m128 col2 = mCol[2].mValue;
132 __m128d t_low = _mm_add_pd(mCol3.mValue.mLow, _mm_mul_pd(_mm_cvtps_pd(col0), xxxx));
133 t_low = _mm_add_pd(t_low, _mm_mul_pd(_mm_cvtps_pd(col1), yyyy));
134 t_low = _mm_add_pd(t_low, _mm_mul_pd(_mm_cvtps_pd(col2), zzzz));
135 __m128d t_high = _mm_add_pd(mCol3.mValue.mHigh, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col0, col0, _MM_SHUFFLE(2, 2, 2, 2))), xxxx));
136 t_high = _mm_add_pd(t_high, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col1, col1, _MM_SHUFFLE(2, 2, 2, 2))), yyyy));
137 t_high = _mm_add_pd(t_high, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col2, col2, _MM_SHUFFLE(2, 2, 2, 2))), zzzz));
138 return DVec3({ t_low, t_high });
139#elif defined(JPH_USE_NEON)
140 float64x2_t xxxx = vdupq_laneq_f64(inV.mValue.val[0], 0);
141 float64x2_t yyyy = vdupq_laneq_f64(inV.mValue.val[0], 1);
142 float64x2_t zzzz = vdupq_laneq_f64(inV.mValue.val[1], 0);
143 float32x4_t col0 = mCol[0].mValue;
144 float32x4_t col1 = mCol[1].mValue;
145 float32x4_t col2 = mCol[2].mValue;
146 float64x2_t t_low = vaddq_f64(mCol3.mValue.val[0], vmulq_f64(vcvt_f64_f32(vget_low_f32(col0)), xxxx));
147 t_low = vaddq_f64(t_low, vmulq_f64(vcvt_f64_f32(vget_low_f32(col1)), yyyy));
148 t_low = vaddq_f64(t_low, vmulq_f64(vcvt_f64_f32(vget_low_f32(col2)), zzzz));
149 float64x2_t t_high = vaddq_f64(mCol3.mValue.val[1], vmulq_f64(vcvt_high_f64_f32(col0), xxxx));
150 t_high = vaddq_f64(t_high, vmulq_f64(vcvt_high_f64_f32(col1), yyyy));
151 t_high = vaddq_f64(t_high, vmulq_f64(vcvt_high_f64_f32(col2), zzzz));
152 return DVec3::sFixW({ t_low, t_high });
153#elif defined(JPH_USE_RVV)
154 const vfloat64m2_t xxxx = __riscv_vfmv_v_f_f64m2(inV.mF64[0], 4);
155 const vfloat64m2_t yyyy = __riscv_vfmv_v_f_f64m2(inV.mF64[1], 4);
156 const vfloat64m2_t zzzz = __riscv_vfmv_v_f_f64m2(inV.mF64[2], 4);
157
158 const vfloat32m1_t col0_f32 = __riscv_vle32_v_f32m1(mCol[0].mF32, 4);
159 const vfloat32m1_t col1_f32 = __riscv_vle32_v_f32m1(mCol[1].mF32, 4);
160 const vfloat32m1_t col2_f32 = __riscv_vle32_v_f32m1(mCol[2].mF32, 4);
161
162 const vfloat64m2_t col0 = __riscv_vfwcvt_f_f_v_f64m2(col0_f32, 4);
163 const vfloat64m2_t col1 = __riscv_vfwcvt_f_f_v_f64m2(col1_f32, 4);
164 const vfloat64m2_t col2 = __riscv_vfwcvt_f_f_v_f64m2(col2_f32, 4);
165
166 const vfloat64m2_t col3 = __riscv_vle64_v_f64m2(mCol3.mF64, 4);
167
168 vfloat64m2_t t = __riscv_vfmul_vv_f64m2(col0, xxxx, 4);
169 t = __riscv_vfmacc_vv_f64m2(t, col1, yyyy, 4);
170 t = __riscv_vfmacc_vv_f64m2(t, col2, zzzz, 4);
171 t = __riscv_vfadd_vv_f64m2(t, col3, 4);
172
173 DVec3 v;
174 __riscv_vse64_v_f64m2(v.mF64, t, 4);
175 return DVec3::sFixW(v.mValue);
176#else
177 return DVec3(
178 mCol3.mF64[0] + double(mCol[0].mF32[0]) * inV.mF64[0] + double(mCol[1].mF32[0]) * inV.mF64[1] + double(mCol[2].mF32[0]) * inV.mF64[2],
179 mCol3.mF64[1] + double(mCol[0].mF32[1]) * inV.mF64[0] + double(mCol[1].mF32[1]) * inV.mF64[1] + double(mCol[2].mF32[1]) * inV.mF64[2],
180 mCol3.mF64[2] + double(mCol[0].mF32[2]) * inV.mF64[0] + double(mCol[1].mF32[2]) * inV.mF64[1] + double(mCol[2].mF32[2]) * inV.mF64[2]);
181#endif
182}
183
185{
186#if defined(JPH_USE_AVX)
187 __m256d t = _mm256_mul_pd(_mm256_cvtps_pd(mCol[0].mValue), _mm256_set1_pd(inV.mF64[0]));
188 t = _mm256_add_pd(t, _mm256_mul_pd(_mm256_cvtps_pd(mCol[1].mValue), _mm256_set1_pd(inV.mF64[1])));
189 t = _mm256_add_pd(t, _mm256_mul_pd(_mm256_cvtps_pd(mCol[2].mValue), _mm256_set1_pd(inV.mF64[2])));
190 return DVec3::sFixW(t);
191#elif defined(JPH_USE_SSE)
192 __m128d xxxx = _mm_set1_pd(inV.mF64[0]);
193 __m128d yyyy = _mm_set1_pd(inV.mF64[1]);
194 __m128d zzzz = _mm_set1_pd(inV.mF64[2]);
195 __m128 col0 = mCol[0].mValue;
196 __m128 col1 = mCol[1].mValue;
197 __m128 col2 = mCol[2].mValue;
198 __m128d t_low = _mm_mul_pd(_mm_cvtps_pd(col0), xxxx);
199 t_low = _mm_add_pd(t_low, _mm_mul_pd(_mm_cvtps_pd(col1), yyyy));
200 t_low = _mm_add_pd(t_low, _mm_mul_pd(_mm_cvtps_pd(col2), zzzz));
201 __m128d t_high = _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col0, col0, _MM_SHUFFLE(2, 2, 2, 2))), xxxx);
202 t_high = _mm_add_pd(t_high, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col1, col1, _MM_SHUFFLE(2, 2, 2, 2))), yyyy));
203 t_high = _mm_add_pd(t_high, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col2, col2, _MM_SHUFFLE(2, 2, 2, 2))), zzzz));
204 return DVec3({ t_low, t_high });
205#elif defined(JPH_USE_NEON)
206 float64x2_t xxxx = vdupq_laneq_f64(inV.mValue.val[0], 0);
207 float64x2_t yyyy = vdupq_laneq_f64(inV.mValue.val[0], 1);
208 float64x2_t zzzz = vdupq_laneq_f64(inV.mValue.val[1], 0);
209 float32x4_t col0 = mCol[0].mValue;
210 float32x4_t col1 = mCol[1].mValue;
211 float32x4_t col2 = mCol[2].mValue;
212 float64x2_t t_low = vmulq_f64(vcvt_f64_f32(vget_low_f32(col0)), xxxx);
213 t_low = vaddq_f64(t_low, vmulq_f64(vcvt_f64_f32(vget_low_f32(col1)), yyyy));
214 t_low = vaddq_f64(t_low, vmulq_f64(vcvt_f64_f32(vget_low_f32(col2)), zzzz));
215 float64x2_t t_high = vmulq_f64(vcvt_high_f64_f32(col0), xxxx);
216 t_high = vaddq_f64(t_high, vmulq_f64(vcvt_high_f64_f32(col1), yyyy));
217 t_high = vaddq_f64(t_high, vmulq_f64(vcvt_high_f64_f32(col2), zzzz));
218 return DVec3::sFixW({ t_low, t_high });
219#elif defined(JPH_USE_RVV)
220 const vfloat64m2_t xxxx = __riscv_vfmv_v_f_f64m2(inV.mF64[0], 4);
221 const vfloat64m2_t yyyy = __riscv_vfmv_v_f_f64m2(inV.mF64[1], 4);
222 const vfloat64m2_t zzzz = __riscv_vfmv_v_f_f64m2(inV.mF64[2], 4);
223
224 const vfloat32m1_t col0 = __riscv_vle32_v_f32m1(mCol[0].mF32, 4);
225 const vfloat32m1_t col1 = __riscv_vle32_v_f32m1(mCol[1].mF32, 4);
226 const vfloat32m1_t col2 = __riscv_vle32_v_f32m1(mCol[2].mF32, 4);
227
228 const vfloat64m2_t col0_f64 = __riscv_vfwcvt_f_f_v_f64m2(col0, 4);
229 const vfloat64m2_t col1_f64 = __riscv_vfwcvt_f_f_v_f64m2(col1, 4);
230 const vfloat64m2_t col2_f64 = __riscv_vfwcvt_f_f_v_f64m2(col2, 4);
231
232 vfloat64m2_t t = __riscv_vfmul_vv_f64m2(col0_f64, xxxx, 4);
233 t = __riscv_vfmacc_vv_f64m2(t, col1_f64, yyyy, 4);
234 t = __riscv_vfmacc_vv_f64m2(t, col2_f64, zzzz, 4);
235
236 DVec3::Type v;
237 __riscv_vse64_v_f64m2(v.mData, t, 4);
238 return DVec3::sFixW(v);
239#else
240 return DVec3(
241 double(mCol[0].mF32[0]) * inV.mF64[0] + double(mCol[1].mF32[0]) * inV.mF64[1] + double(mCol[2].mF32[0]) * inV.mF64[2],
242 double(mCol[0].mF32[1]) * inV.mF64[0] + double(mCol[1].mF32[1]) * inV.mF64[1] + double(mCol[2].mF32[1]) * inV.mF64[2],
243 double(mCol[0].mF32[2]) * inV.mF64[0] + double(mCol[1].mF32[2]) * inV.mF64[1] + double(mCol[2].mF32[2]) * inV.mF64[2]);
244#endif
245}
246
248{
249 DMat44 result;
250
251 // Rotation part
252#if defined(JPH_USE_SSE)
253 for (int i = 0; i < 3; ++i)
254 {
255 __m128 c = inM.GetColumn4(i).mValue;
256 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)));
257 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1))));
258 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2))));
259 result.mCol[i].mValue = t;
260 }
261#elif defined(JPH_USE_NEON)
262 for (int i = 0; i < 3; ++i)
263 {
264 Type c = inM.GetColumn4(i).mValue;
265 Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0));
266 t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1));
267 t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2));
268 result.mCol[i].mValue = t;
269 }
270#elif defined(JPH_USE_RVV)
271 for (int i = 0; i < 3; ++i)
272 {
273 const Vec4 v = inM.GetColumn4(i);
274 const vfloat32m1_t v0 = __riscv_vfmv_v_f_f32m1(v.mF32[0], 4);
275 const vfloat32m1_t v1 = __riscv_vfmv_v_f_f32m1(v.mF32[1], 4);
276 const vfloat32m1_t v2 = __riscv_vfmv_v_f_f32m1(v.mF32[2], 4);
277
278 const vfloat32m1_t col0 = __riscv_vle32_v_f32m1(mCol[0].mF32, 4);
279 const vfloat32m1_t col1 = __riscv_vle32_v_f32m1(mCol[1].mF32, 4);
280 const vfloat32m1_t col2 = __riscv_vle32_v_f32m1(mCol[2].mF32, 4);
281
282 vfloat32m1_t t = __riscv_vfmul_vv_f32m1(v0, col0, 4);
283 t = __riscv_vfmacc_vv_f32m1(t, col1, v1, 4);
284 t = __riscv_vfmacc_vv_f32m1(t, col2, v2, 4);
285 __riscv_vse32_v_f32m1(result.mCol[i].mF32, t, 4);
286 }
287#else
288 for (int i = 0; i < 3; ++i)
289 {
290 Vec4 coli = inM.GetColumn4(i);
291 result.mCol[i] = mCol[0] * coli.mF32[0] + mCol[1] * coli.mF32[1] + mCol[2] * coli.mF32[2];
292 }
293#endif
294
295 // Translation part
296 result.mCol3 = *this * inM.GetTranslation();
297
298 return result;
299}
300
302{
303 DMat44 result;
304
305 // Rotation part
306#if defined(JPH_USE_SSE)
307 for (int i = 0; i < 3; ++i)
308 {
309 __m128 c = inM.mCol[i].mValue;
310 __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)));
311 t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1))));
312 t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2))));
313 result.mCol[i].mValue = t;
314 }
315#elif defined(JPH_USE_NEON)
316 for (int i = 0; i < 3; ++i)
317 {
318 Type c = inM.GetColumn4(i).mValue;
319 Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0));
320 t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1));
321 t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2));
322 result.mCol[i].mValue = t;
323 }
324#elif defined(JPH_USE_RVV)
325 for (int i = 0; i < 3; ++i)
326 {
327 const float *col_i = inM.mCol[i].mF32;
328 const vfloat32m1_t v0 = __riscv_vfmv_v_f_f32m1(col_i[0], 4);
329 const vfloat32m1_t v1 = __riscv_vfmv_v_f_f32m1(col_i[1], 4);
330 const vfloat32m1_t v2 = __riscv_vfmv_v_f_f32m1(col_i[2], 4);
331
332 const vfloat32m1_t col0 = __riscv_vle32_v_f32m1(mCol[0].mF32, 4);
333 const vfloat32m1_t col1 = __riscv_vle32_v_f32m1(mCol[1].mF32, 4);
334 const vfloat32m1_t col2 = __riscv_vle32_v_f32m1(mCol[2].mF32, 4);
335
336 vfloat32m1_t t = __riscv_vfmul_vv_f32m1(v0, col0, 4);
337 t = __riscv_vfmacc_vv_f32m1(t, col1, v1, 4);
338 t = __riscv_vfmacc_vv_f32m1(t, col2, v2, 4);
339 __riscv_vse32_v_f32m1(result.mCol[i].mF32, t, 4);
340 }
341#else
342 for (int i = 0; i < 3; ++i)
343 {
344 Vec4 coli = inM.mCol[i];
345 result.mCol[i] = mCol[0] * coli.mF32[0] + mCol[1] * coli.mF32[1] + mCol[2] * coli.mF32[2];
346 }
347#endif
348
349 // Translation part
350 result.mCol3 = *this * inM.GetTranslation();
351
352 return result;
353}
354
356{
357 mCol[0] = inRotation.GetColumn4(0);
358 mCol[1] = inRotation.GetColumn4(1);
359 mCol[2] = inRotation.GetColumn4(2);
360}
361
363{
364 return DMat44(inScale.GetX() * mCol[0], inScale.GetY() * mCol[1], inScale.GetZ() * mCol[2], mCol3);
365}
366
368{
369 Vec4 scale(inScale, 1);
370 return DMat44(scale * mCol[0], scale * mCol[1], scale * mCol[2], DVec3(scale) * mCol3);
371}
372
374{
375 return DMat44(mCol[0], mCol[1], mCol[2], GetTranslation() + Multiply3x3(inTranslation));
376}
377
379{
380 return DMat44(mCol[0], mCol[1], mCol[2], GetTranslation() + Multiply3x3(inTranslation));
381}
382
384{
385 return DMat44(mCol[0], mCol[1], mCol[2], GetTranslation() + inTranslation);
386}
387
389{
390 return DMat44(mCol[0], mCol[1], mCol[2], GetTranslation() + inTranslation);
391}
392
394{
395 DMat44 m(GetRotation().Inversed3x3());
396 m.mCol3 = -m.Multiply3x3(mCol3);
397 return m;
398}
399
401{
403 m.mCol3 = -m.Multiply3x3(mCol3);
404 return m;
405}
406
#define JPH_NAMESPACE_END
Definition Core.h:428
#define JPH_NAMESPACE_BEGIN
Definition Core.h:422
Holds a 4x4 matrix of floats with the last column consisting of doubles.
Definition DMat44.h:13
JPH_INLINE bool operator==(DMat44Arg inM2) const
Comparison.
Definition DMat44.inl:53
DVec3::TypeArg DTypeArg
Definition DMat44.h:20
JPH_INLINE void SetTranslation(DVec3Arg inV)
Definition DMat44.h:112
JPH_INLINE Vec4 GetColumn4(uint inCol) const
Definition DMat44.h:115
JPH_INLINE DMat44 PostTranslated(Vec3Arg inTranslation) const
Post multiply by translation matrix: result = Mat44::sTranslation(inTranslation) * this (i....
Definition DMat44.inl:383
JPH_INLINE Mat44 Transposed3x3() const
Transpose 3x3 subpart of matrix.
Definition DMat44.h:119
JPH_INLINE DMat44 PreTranslated(Vec3Arg inTranslation) const
Pre multiply by translation matrix: result = this * Mat44::sTranslation(inTranslation)
Definition DMat44.inl:373
static JPH_INLINE DMat44 sZero()
Zero matrix.
Definition DMat44.inl:35
DMat44()=default
Constructor.
static JPH_INLINE DMat44 sIdentity()
Identity matrix.
Definition DMat44.inl:40
JPH_INLINE Vec3 Multiply3x3(Vec3Arg inV) const
Multiply vector by only 3x3 part of the matrix.
Definition DMat44.h:78
JPH_INLINE DMat44 PostScaled(Vec3Arg inScale) const
Scale a matrix: result = Mat44::sScale(inScale) * this.
Definition DMat44.inl:367
static JPH_INLINE DMat44 sInverseRotationTranslation(QuatArg inR, DVec3Arg inT)
Get inverse matrix of sRotationTranslation.
Definition DMat44.inl:45
JPH_INLINE void SetRotation(Mat44Arg inRotation)
Updates the rotation part of this matrix (the first 3 columns)
Definition DMat44.inl:355
JPH_INLINE DMat44 PreScaled(Vec3Arg inScale) const
Scale a matrix: result = this * Mat44::sScale(inScale)
Definition DMat44.inl:362
JPH_INLINE DMat44 InversedRotationTranslation() const
Inverse 4x4 matrix when it only contains rotation and translation.
Definition DMat44.inl:400
JPH_INLINE DMat44 Inversed() const
Inverse 4x4 matrix.
Definition DMat44.inl:393
JPH_INLINE DVec3 GetTranslation() const
Definition DMat44.h:111
JPH_INLINE DMat44 operator*(Mat44Arg inM) const
Multiply matrix by matrix.
Definition DMat44.inl:247
JPH_INLINE bool IsClose(DMat44Arg inM2, float inMaxDistSq=1.0e-12f) const
Test if two matrices are close.
Definition DMat44.inl:61
JPH_INLINE Mat44 GetRotation() const
Get rotation part only (note: retains the first 3 values from the bottom row)
Definition DMat44.h:128
Vec4::Type Type
Definition DMat44.h:18
Definition DVec3.h:14
double mF64[4]
Definition DVec3.h:283
static JPH_INLINE Type sFixW(TypeArg inValue)
Internal helper function that ensures that the Z component is replicated to the W component to preven...
Definition DVec3.inl:104
Type mValue
Definition DVec3.h:282
JPH_INLINE bool IsClose(DVec3Arg inV2, double inMaxDistSq=1.0e-24) const
Test if two vectors are close.
Definition DVec3.inl:559
static JPH_INLINE DVec3 sZero()
Vector with all zeros.
Definition DVec3.inl:138
struct { double mData[4];} Type
Definition DVec3.h:29
Holds a 4x4 matrix of floats, but supports also operations on the 3x3 upper left part of the matrix.
Definition Mat44.h:13
JPH_INLINE Vec4 GetColumn4(uint inCol) const
Definition Mat44.h:160
JPH_INLINE Vec3 GetTranslation() const
Definition Mat44.h:152
static JPH_INLINE Mat44 sRotation(Vec3Arg inAxis, float inAngle)
Rotate around arbitrary axis.
Definition Mat44.inl:139
Definition Quat.h:33
JPH_INLINE Quat Conjugated() const
The conjugate [w, -x, -y, -z] is the same as the inverse for unit quaternions.
Definition Quat.h:185
Definition Vec3.h:17
JPH_INLINE float GetX() const
Get individual components.
Definition Vec3.h:127
Type mValue
Definition Vec3.h:299
JPH_INLINE float GetY() const
Definition Vec3.h:128
float mF32[4]
Definition Vec3.h:300
JPH_INLINE float GetZ() const
Definition Vec3.h:129
Definition Vec4.h:14
float mF32[4]
Definition Vec4.h:312
static JPH_INLINE Vec4 sZero()
Vector with all zeros.
Definition Vec4.inl:81
Type mValue
Definition Vec4.h:311