aboutsummaryrefslogtreecommitdiff
path: root/3rdparty
diff options
context:
space:
mode:
Diffstat (limited to '3rdparty')
-rw-r--r--3rdparty/glm/README.md2
-rw-r--r--3rdparty/glm/source/.appveyor.yml92
-rw-r--r--3rdparty/glm/source/.gitignore61
-rw-r--r--3rdparty/glm/source/.travis.yml388
-rw-r--r--3rdparty/glm/source/CMakeLists.txt45
-rw-r--r--3rdparty/glm/source/cmake/cmake_uninstall.cmake.in21
-rw-r--r--3rdparty/glm/source/copying.txt54
-rw-r--r--3rdparty/glm/source/glm/CMakeLists.txt78
-rw-r--r--3rdparty/glm/source/glm/common.hpp539
-rw-r--r--3rdparty/glm/source/glm/detail/_features.hpp394
-rw-r--r--3rdparty/glm/source/glm/detail/_fixes.hpp27
-rw-r--r--3rdparty/glm/source/glm/detail/_noise.hpp81
-rw-r--r--3rdparty/glm/source/glm/detail/_swizzle.hpp804
-rw-r--r--3rdparty/glm/source/glm/detail/_swizzle_func.hpp682
-rw-r--r--3rdparty/glm/source/glm/detail/_vectorize.hpp162
-rw-r--r--3rdparty/glm/source/glm/detail/compute_common.hpp50
-rw-r--r--3rdparty/glm/source/glm/detail/compute_vector_relational.hpp30
-rw-r--r--3rdparty/glm/source/glm/detail/func_common.inl792
-rw-r--r--3rdparty/glm/source/glm/detail/func_common_simd.inl231
-rw-r--r--3rdparty/glm/source/glm/detail/func_exponential.inl152
-rw-r--r--3rdparty/glm/source/glm/detail/func_exponential_simd.inl37
-rw-r--r--3rdparty/glm/source/glm/detail/func_geometric.inl243
-rw-r--r--3rdparty/glm/source/glm/detail/func_geometric_simd.inl163
-rw-r--r--3rdparty/glm/source/glm/detail/func_integer.inl372
-rw-r--r--3rdparty/glm/source/glm/detail/func_integer_simd.inl65
-rw-r--r--3rdparty/glm/source/glm/detail/func_matrix.inl443
-rw-r--r--3rdparty/glm/source/glm/detail/func_matrix_simd.inl249
-rw-r--r--3rdparty/glm/source/glm/detail/func_packing.inl189
-rw-r--r--3rdparty/glm/source/glm/detail/func_packing_simd.inl6
-rw-r--r--3rdparty/glm/source/glm/detail/func_trigonometric.inl197
-rw-r--r--3rdparty/glm/source/glm/detail/func_trigonometric_simd.inl (renamed from 3rdparty/glm/source)0
-rw-r--r--3rdparty/glm/source/glm/detail/func_vector_relational.inl87
-rw-r--r--3rdparty/glm/source/glm/detail/func_vector_relational_simd.inl6
-rw-r--r--3rdparty/glm/source/glm/detail/glm.cpp263
-rw-r--r--3rdparty/glm/source/glm/detail/qualifier.hpp230
-rw-r--r--3rdparty/glm/source/glm/detail/setup.hpp1156
-rw-r--r--3rdparty/glm/source/glm/detail/type_float.hpp68
-rw-r--r--3rdparty/glm/source/glm/detail/type_half.hpp16
-rw-r--r--3rdparty/glm/source/glm/detail/type_half.inl241
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat2x2.hpp177
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat2x2.inl536
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat2x3.hpp159
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat2x3.inl510
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat2x4.hpp161
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat2x4.inl520
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat3x2.hpp167
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat3x2.inl532
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat3x3.hpp184
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat3x3.inl601
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat3x4.hpp166
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat3x4.inl578
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat4x2.hpp171
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat4x2.inl574
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat4x3.hpp171
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat4x3.inl598
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat4x4.hpp189
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat4x4.inl706
-rw-r--r--3rdparty/glm/source/glm/detail/type_mat4x4_simd.inl6
-rw-r--r--3rdparty/glm/source/glm/detail/type_quat.hpp191
-rw-r--r--3rdparty/glm/source/glm/detail/type_quat.inl412
-rw-r--r--3rdparty/glm/source/glm/detail/type_quat_simd.inl188
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec1.hpp308
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec1.inl553
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec2.hpp402
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec2.inl915
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec3.hpp435
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec3.inl1070
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec4.hpp508
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec4.inl1142
-rw-r--r--3rdparty/glm/source/glm/detail/type_vec4_simd.inl775
-rw-r--r--3rdparty/glm/source/glm/exponential.hpp110
-rw-r--r--3rdparty/glm/source/glm/ext.hpp255
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_clip_space.hpp522
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_clip_space.inl555
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_common.hpp36
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_common.inl16
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double2x2.hpp23
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double2x2_precision.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double2x3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double2x3_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double2x4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double2x4_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double3x2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double3x2_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double3x3.hpp23
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double3x3_precision.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double3x4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double3x4_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double4x2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double4x2_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double4x3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double4x3_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double4x4.hpp23
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_double4x4_precision.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float2x2.hpp23
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float2x2_precision.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float2x3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float2x3_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float2x4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float2x4_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float3x2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float3x2_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float3x3.hpp23
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float3x3_precision.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float3x4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float3x4_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float4x2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float4x2_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float4x3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float4x3_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float4x4.hpp23
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_float4x4_precision.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int2x2.hpp38
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int2x2_sized.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int2x3.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int2x3_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int2x4.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int2x4_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int3x2.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int3x2_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int3x3.hpp38
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int3x3_sized.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int3x4.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int3x4_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int4x2.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int4x2_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int4x3.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int4x3_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int4x4.hpp38
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_int4x4_sized.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_integer.hpp91
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_integer.inl38
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_projection.hpp149
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_projection.inl106
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_relational.hpp132
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_relational.inl88
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_transform.hpp144
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_transform.inl153
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint2x2.hpp38
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint2x2_sized.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint2x3.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint2x3_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint2x4.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint2x4_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint3x2.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint3x2_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint3x3.hpp38
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint3x3_sized.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint3x4.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint3x4_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint4x2.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint4x2_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint4x3.hpp33
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint4x3_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint4x4.hpp38
-rw-r--r--3rdparty/glm/source/glm/ext/matrix_uint4x4_sized.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_common.hpp135
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_common.inl144
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_common_simd.inl18
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_double.hpp39
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_double_precision.hpp42
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_exponential.hpp63
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_exponential.inl89
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_float.hpp39
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_float_precision.hpp36
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_geometric.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_geometric.inl36
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_relational.hpp62
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_relational.inl35
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_transform.hpp47
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_transform.inl24
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_trigonometric.hpp63
-rw-r--r--3rdparty/glm/source/glm/ext/quaternion_trigonometric.inl37
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_common.hpp181
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_common.inl170
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_constants.hpp40
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_constants.inl24
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_int_sized.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_integer.hpp92
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_integer.inl243
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_packing.hpp32
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_packing.inl (renamed from 3rdparty/imguicolortextedit/source)0
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_reciprocal.hpp135
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_reciprocal.inl107
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_relational.hpp65
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_relational.inl40
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_uint_sized.hpp70
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_ulp.hpp74
-rw-r--r--3rdparty/glm/source/glm/ext/scalar_ulp.inl284
-rw-r--r--3rdparty/glm/source/glm/ext/vector_bool1.hpp30
-rw-r--r--3rdparty/glm/source/glm/ext/vector_bool1_precision.hpp34
-rw-r--r--3rdparty/glm/source/glm/ext/vector_bool2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_bool2_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_bool3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_bool3_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_bool4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_bool4_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_common.hpp228
-rw-r--r--3rdparty/glm/source/glm/ext/vector_common.inl147
-rw-r--r--3rdparty/glm/source/glm/ext/vector_double1.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_double1_precision.hpp36
-rw-r--r--3rdparty/glm/source/glm/ext/vector_double2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_double2_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_double3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_double3_precision.hpp34
-rw-r--r--3rdparty/glm/source/glm/ext/vector_double4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_double4_precision.hpp35
-rw-r--r--3rdparty/glm/source/glm/ext/vector_float1.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_float1_precision.hpp36
-rw-r--r--3rdparty/glm/source/glm/ext/vector_float2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_float2_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_float3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_float3_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_float4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_float4_precision.hpp31
-rw-r--r--3rdparty/glm/source/glm/ext/vector_int1.hpp32
-rw-r--r--3rdparty/glm/source/glm/ext/vector_int1_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/vector_int2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_int2_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/vector_int3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_int3_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/vector_int4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_int4_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/vector_integer.hpp149
-rw-r--r--3rdparty/glm/source/glm/ext/vector_integer.inl85
-rw-r--r--3rdparty/glm/source/glm/ext/vector_packing.hpp32
-rw-r--r--3rdparty/glm/source/glm/ext/vector_packing.inl (renamed from 3rdparty/tracy/tracy)0
-rw-r--r--3rdparty/glm/source/glm/ext/vector_reciprocal.hpp135
-rw-r--r--3rdparty/glm/source/glm/ext/vector_reciprocal.inl105
-rw-r--r--3rdparty/glm/source/glm/ext/vector_relational.hpp107
-rw-r--r--3rdparty/glm/source/glm/ext/vector_relational.inl75
-rw-r--r--3rdparty/glm/source/glm/ext/vector_uint1.hpp32
-rw-r--r--3rdparty/glm/source/glm/ext/vector_uint1_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/vector_uint2.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_uint2_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/vector_uint3.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_uint3_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/vector_uint4.hpp18
-rw-r--r--3rdparty/glm/source/glm/ext/vector_uint4_sized.hpp49
-rw-r--r--3rdparty/glm/source/glm/ext/vector_ulp.hpp109
-rw-r--r--3rdparty/glm/source/glm/ext/vector_ulp.inl74
-rw-r--r--3rdparty/glm/source/glm/fwd.hpp1233
-rw-r--r--3rdparty/glm/source/glm/geometric.hpp116
-rw-r--r--3rdparty/glm/source/glm/glm.hpp136
-rw-r--r--3rdparty/glm/source/glm/gtc/bitfield.hpp266
-rw-r--r--3rdparty/glm/source/glm/gtc/bitfield.inl626
-rw-r--r--3rdparty/glm/source/glm/gtc/color_space.hpp56
-rw-r--r--3rdparty/glm/source/glm/gtc/color_space.inl84
-rw-r--r--3rdparty/glm/source/glm/gtc/constants.hpp165
-rw-r--r--3rdparty/glm/source/glm/gtc/constants.inl167
-rw-r--r--3rdparty/glm/source/glm/gtc/epsilon.hpp60
-rw-r--r--3rdparty/glm/source/glm/gtc/epsilon.inl80
-rw-r--r--3rdparty/glm/source/glm/gtc/integer.hpp43
-rw-r--r--3rdparty/glm/source/glm/gtc/integer.inl33
-rw-r--r--3rdparty/glm/source/glm/gtc/matrix_access.hpp60
-rw-r--r--3rdparty/glm/source/glm/gtc/matrix_access.inl62
-rw-r--r--3rdparty/glm/source/glm/gtc/matrix_integer.hpp433
-rw-r--r--3rdparty/glm/source/glm/gtc/matrix_inverse.hpp50
-rw-r--r--3rdparty/glm/source/glm/gtc/matrix_inverse.inl118
-rw-r--r--3rdparty/glm/source/glm/gtc/matrix_transform.hpp36
-rw-r--r--3rdparty/glm/source/glm/gtc/matrix_transform.inl3
-rw-r--r--3rdparty/glm/source/glm/gtc/noise.hpp61
-rw-r--r--3rdparty/glm/source/glm/gtc/noise.inl807
-rw-r--r--3rdparty/glm/source/glm/gtc/packing.hpp728
-rw-r--r--3rdparty/glm/source/glm/gtc/packing.inl938
-rw-r--r--3rdparty/glm/source/glm/gtc/quaternion.hpp173
-rw-r--r--3rdparty/glm/source/glm/gtc/quaternion.inl208
-rw-r--r--3rdparty/glm/source/glm/gtc/quaternion_simd.inl0
-rw-r--r--3rdparty/glm/source/glm/gtc/random.hpp82
-rw-r--r--3rdparty/glm/source/glm/gtc/random.inl303
-rw-r--r--3rdparty/glm/source/glm/gtc/reciprocal.hpp24
-rw-r--r--3rdparty/glm/source/glm/gtc/round.hpp160
-rw-r--r--3rdparty/glm/source/glm/gtc/round.inl155
-rw-r--r--3rdparty/glm/source/glm/gtc/type_aligned.hpp1315
-rw-r--r--3rdparty/glm/source/glm/gtc/type_precision.hpp2094
-rw-r--r--3rdparty/glm/source/glm/gtc/type_precision.inl6
-rw-r--r--3rdparty/glm/source/glm/gtc/type_ptr.hpp230
-rw-r--r--3rdparty/glm/source/glm/gtc/type_ptr.inl386
-rw-r--r--3rdparty/glm/source/glm/gtc/ulp.hpp152
-rw-r--r--3rdparty/glm/source/glm/gtc/ulp.inl173
-rw-r--r--3rdparty/glm/source/glm/gtc/vec1.hpp30
-rw-r--r--3rdparty/glm/source/glm/gtx/associated_min_max.hpp207
-rw-r--r--3rdparty/glm/source/glm/gtx/associated_min_max.inl354
-rw-r--r--3rdparty/glm/source/glm/gtx/bit.hpp98
-rw-r--r--3rdparty/glm/source/glm/gtx/bit.inl92
-rw-r--r--3rdparty/glm/source/glm/gtx/closest_point.hpp49
-rw-r--r--3rdparty/glm/source/glm/gtx/closest_point.inl45
-rw-r--r--3rdparty/glm/source/glm/gtx/color_encoding.hpp54
-rw-r--r--3rdparty/glm/source/glm/gtx/color_encoding.inl45
-rw-r--r--3rdparty/glm/source/glm/gtx/color_space.hpp72
-rw-r--r--3rdparty/glm/source/glm/gtx/color_space.inl141
-rw-r--r--3rdparty/glm/source/glm/gtx/color_space_YCoCg.hpp60
-rw-r--r--3rdparty/glm/source/glm/gtx/color_space_YCoCg.inl107
-rw-r--r--3rdparty/glm/source/glm/gtx/common.hpp76
-rw-r--r--3rdparty/glm/source/glm/gtx/common.inl125
-rw-r--r--3rdparty/glm/source/glm/gtx/compatibility.hpp133
-rw-r--r--3rdparty/glm/source/glm/gtx/compatibility.inl62
-rw-r--r--3rdparty/glm/source/glm/gtx/component_wise.hpp69
-rw-r--r--3rdparty/glm/source/glm/gtx/component_wise.inl127
-rw-r--r--3rdparty/glm/source/glm/gtx/dual_quaternion.hpp274
-rw-r--r--3rdparty/glm/source/glm/gtx/dual_quaternion.inl352
-rw-r--r--3rdparty/glm/source/glm/gtx/easing.hpp219
-rw-r--r--3rdparty/glm/source/glm/gtx/easing.inl436
-rw-r--r--3rdparty/glm/source/glm/gtx/euler_angles.hpp335
-rw-r--r--3rdparty/glm/source/glm/gtx/euler_angles.inl899
-rw-r--r--3rdparty/glm/source/glm/gtx/extend.hpp42
-rw-r--r--3rdparty/glm/source/glm/gtx/extend.inl48
-rw-r--r--3rdparty/glm/source/glm/gtx/extended_min_max.hpp137
-rw-r--r--3rdparty/glm/source/glm/gtx/extended_min_max.inl138
-rw-r--r--3rdparty/glm/source/glm/gtx/exterior_product.hpp45
-rw-r--r--3rdparty/glm/source/glm/gtx/exterior_product.inl26
-rw-r--r--3rdparty/glm/source/glm/gtx/fast_exponential.hpp95
-rw-r--r--3rdparty/glm/source/glm/gtx/fast_exponential.inl136
-rw-r--r--3rdparty/glm/source/glm/gtx/fast_square_root.hpp98
-rw-r--r--3rdparty/glm/source/glm/gtx/fast_square_root.inl75
-rw-r--r--3rdparty/glm/source/glm/gtx/fast_trigonometry.hpp79
-rw-r--r--3rdparty/glm/source/glm/gtx/fast_trigonometry.inl142
-rw-r--r--3rdparty/glm/source/glm/gtx/float_notmalize.inl13
-rw-r--r--3rdparty/glm/source/glm/gtx/functions.hpp56
-rw-r--r--3rdparty/glm/source/glm/gtx/functions.inl30
-rw-r--r--3rdparty/glm/source/glm/gtx/gradient_paint.hpp53
-rw-r--r--3rdparty/glm/source/glm/gtx/gradient_paint.inl36
-rw-r--r--3rdparty/glm/source/glm/gtx/handed_coordinate_space.hpp50
-rw-r--r--3rdparty/glm/source/glm/gtx/handed_coordinate_space.inl26
-rw-r--r--3rdparty/glm/source/glm/gtx/hash.hpp142
-rw-r--r--3rdparty/glm/source/glm/gtx/hash.inl184
-rw-r--r--3rdparty/glm/source/glm/gtx/integer.hpp76
-rw-r--r--3rdparty/glm/source/glm/gtx/integer.inl185
-rw-r--r--3rdparty/glm/source/glm/gtx/intersect.hpp92
-rw-r--r--3rdparty/glm/source/glm/gtx/intersect.inl200
-rw-r--r--3rdparty/glm/source/glm/gtx/io.hpp201
-rw-r--r--3rdparty/glm/source/glm/gtx/io.inl440
-rw-r--r--3rdparty/glm/source/glm/gtx/log_base.hpp48
-rw-r--r--3rdparty/glm/source/glm/gtx/log_base.inl16
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_cross_product.hpp47
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_cross_product.inl37
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_decompose.hpp46
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_decompose.inl192
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_factorisation.hpp69
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_factorisation.inl84
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_interpolation.hpp60
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_interpolation.inl146
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_major_storage.hpp119
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_major_storage.inl166
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_operation.hpp103
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_operation.inl176
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_query.hpp77
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_query.inl113
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_transform_2d.hpp81
-rw-r--r--3rdparty/glm/source/glm/gtx/matrix_transform_2d.inl68
-rw-r--r--3rdparty/glm/source/glm/gtx/mixed_product.hpp41
-rw-r--r--3rdparty/glm/source/glm/gtx/mixed_product.inl15
-rw-r--r--3rdparty/glm/source/glm/gtx/norm.hpp88
-rw-r--r--3rdparty/glm/source/glm/gtx/norm.inl95
-rw-r--r--3rdparty/glm/source/glm/gtx/normal.hpp41
-rw-r--r--3rdparty/glm/source/glm/gtx/normal.inl15
-rw-r--r--3rdparty/glm/source/glm/gtx/normalize_dot.hpp49
-rw-r--r--3rdparty/glm/source/glm/gtx/normalize_dot.inl16
-rw-r--r--3rdparty/glm/source/glm/gtx/number_precision.hpp61
-rw-r--r--3rdparty/glm/source/glm/gtx/number_precision.inl6
-rw-r--r--3rdparty/glm/source/glm/gtx/optimum_pow.hpp54
-rw-r--r--3rdparty/glm/source/glm/gtx/optimum_pow.inl22
-rw-r--r--3rdparty/glm/source/glm/gtx/orthonormalize.hpp49
-rw-r--r--3rdparty/glm/source/glm/gtx/orthonormalize.inl29
-rw-r--r--3rdparty/glm/source/glm/gtx/pca.hpp111
-rw-r--r--3rdparty/glm/source/glm/gtx/pca.inl343
-rw-r--r--3rdparty/glm/source/glm/gtx/perpendicular.hpp41
-rw-r--r--3rdparty/glm/source/glm/gtx/perpendicular.inl10
-rw-r--r--3rdparty/glm/source/glm/gtx/polar_coordinates.hpp48
-rw-r--r--3rdparty/glm/source/glm/gtx/polar_coordinates.inl36
-rw-r--r--3rdparty/glm/source/glm/gtx/projection.hpp43
-rw-r--r--3rdparty/glm/source/glm/gtx/projection.inl10
-rw-r--r--3rdparty/glm/source/glm/gtx/quaternion.hpp174
-rw-r--r--3rdparty/glm/source/glm/gtx/quaternion.inl159
-rw-r--r--3rdparty/glm/source/glm/gtx/range.hpp98
-rw-r--r--3rdparty/glm/source/glm/gtx/raw_data.hpp51
-rw-r--r--3rdparty/glm/source/glm/gtx/raw_data.inl2
-rw-r--r--3rdparty/glm/source/glm/gtx/rotate_normalized_axis.hpp68
-rw-r--r--3rdparty/glm/source/glm/gtx/rotate_normalized_axis.inl58
-rw-r--r--3rdparty/glm/source/glm/gtx/rotate_vector.hpp123
-rw-r--r--3rdparty/glm/source/glm/gtx/rotate_vector.inl187
-rw-r--r--3rdparty/glm/source/glm/gtx/scalar_multiplication.hpp75
-rw-r--r--3rdparty/glm/source/glm/gtx/scalar_relational.hpp36
-rw-r--r--3rdparty/glm/source/glm/gtx/scalar_relational.inl88
-rw-r--r--3rdparty/glm/source/glm/gtx/spline.hpp65
-rw-r--r--3rdparty/glm/source/glm/gtx/spline.inl60
-rw-r--r--3rdparty/glm/source/glm/gtx/std_based_type.hpp68
-rw-r--r--3rdparty/glm/source/glm/gtx/std_based_type.inl6
-rw-r--r--3rdparty/glm/source/glm/gtx/string_cast.hpp46
-rw-r--r--3rdparty/glm/source/glm/gtx/string_cast.inl492
-rw-r--r--3rdparty/glm/source/glm/gtx/texture.hpp46
-rw-r--r--3rdparty/glm/source/glm/gtx/texture.inl17
-rw-r--r--3rdparty/glm/source/glm/gtx/transform.hpp60
-rw-r--r--3rdparty/glm/source/glm/gtx/transform.inl23
-rw-r--r--3rdparty/glm/source/glm/gtx/transform2.hpp89
-rw-r--r--3rdparty/glm/source/glm/gtx/transform2.inl125
-rw-r--r--3rdparty/glm/source/glm/gtx/type_aligned.hpp982
-rw-r--r--3rdparty/glm/source/glm/gtx/type_aligned.inl6
-rw-r--r--3rdparty/glm/source/glm/gtx/type_trait.hpp85
-rw-r--r--3rdparty/glm/source/glm/gtx/type_trait.inl61
-rw-r--r--3rdparty/glm/source/glm/gtx/vec_swizzle.hpp2782
-rw-r--r--3rdparty/glm/source/glm/gtx/vector_angle.hpp57
-rw-r--r--3rdparty/glm/source/glm/gtx/vector_angle.inl45
-rw-r--r--3rdparty/glm/source/glm/gtx/vector_query.hpp66
-rw-r--r--3rdparty/glm/source/glm/gtx/vector_query.inl154
-rw-r--r--3rdparty/glm/source/glm/gtx/wrap.hpp37
-rw-r--r--3rdparty/glm/source/glm/gtx/wrap.inl6
-rw-r--r--3rdparty/glm/source/glm/integer.hpp212
-rw-r--r--3rdparty/glm/source/glm/mat2x2.hpp9
-rw-r--r--3rdparty/glm/source/glm/mat2x3.hpp9
-rw-r--r--3rdparty/glm/source/glm/mat2x4.hpp9
-rw-r--r--3rdparty/glm/source/glm/mat3x2.hpp9
-rw-r--r--3rdparty/glm/source/glm/mat3x3.hpp8
-rw-r--r--3rdparty/glm/source/glm/mat3x4.hpp8
-rw-r--r--3rdparty/glm/source/glm/mat4x2.hpp9
-rw-r--r--3rdparty/glm/source/glm/mat4x3.hpp8
-rw-r--r--3rdparty/glm/source/glm/mat4x4.hpp9
-rw-r--r--3rdparty/glm/source/glm/matrix.hpp161
-rw-r--r--3rdparty/glm/source/glm/packing.hpp173
-rw-r--r--3rdparty/glm/source/glm/simd/common.h240
-rw-r--r--3rdparty/glm/source/glm/simd/exponential.h20
-rw-r--r--3rdparty/glm/source/glm/simd/geometric.h124
-rw-r--r--3rdparty/glm/source/glm/simd/integer.h115
-rw-r--r--3rdparty/glm/source/glm/simd/matrix.h1028
-rw-r--r--3rdparty/glm/source/glm/simd/neon.h155
-rw-r--r--3rdparty/glm/source/glm/simd/packing.h8
-rw-r--r--3rdparty/glm/source/glm/simd/platform.h408
-rw-r--r--3rdparty/glm/source/glm/simd/trigonometric.h9
-rw-r--r--3rdparty/glm/source/glm/simd/vector_relational.h8
-rw-r--r--3rdparty/glm/source/glm/trigonometric.hpp210
-rw-r--r--3rdparty/glm/source/glm/vec2.hpp14
-rw-r--r--3rdparty/glm/source/glm/vec3.hpp14
-rw-r--r--3rdparty/glm/source/glm/vec4.hpp15
-rw-r--r--3rdparty/glm/source/glm/vector_relational.hpp121
-rw-r--r--3rdparty/glm/source/manual.md2430
-rw-r--r--3rdparty/glm/source/readme.md1231
-rw-r--r--3rdparty/glm/source/test/CMakeLists.txt246
-rw-r--r--3rdparty/glm/source/test/bug/CMakeLists.txt1
-rw-r--r--3rdparty/glm/source/test/bug/bug_ms_vec_static.cpp31
-rw-r--r--3rdparty/glm/source/test/cmake/CMakeLists.txt8
-rw-r--r--3rdparty/glm/source/test/cmake/test_find_glm.cpp22
-rw-r--r--3rdparty/glm/source/test/core/CMakeLists.txt52
-rw-r--r--3rdparty/glm/source/test/core/core_cpp_constexpr.cpp750
-rw-r--r--3rdparty/glm/source/test/core/core_cpp_defaulted_ctor.cpp145
-rw-r--r--3rdparty/glm/source/test/core/core_force_aligned_gentypes.cpp10
-rw-r--r--3rdparty/glm/source/test/core/core_force_arch_unknown.cpp14
-rw-r--r--3rdparty/glm/source/test/core/core_force_compiler_unknown.cpp14
-rw-r--r--3rdparty/glm/source/test/core/core_force_ctor_init.cpp139
-rw-r--r--3rdparty/glm/source/test/core/core_force_cxx03.cpp14
-rw-r--r--3rdparty/glm/source/test/core/core_force_cxx98.cpp14
-rw-r--r--3rdparty/glm/source/test/core/core_force_cxx_unknown.cpp14
-rw-r--r--3rdparty/glm/source/test/core/core_force_depth_zero_to_one.cpp12
-rw-r--r--3rdparty/glm/source/test/core/core_force_explicit_ctor.cpp17
-rw-r--r--3rdparty/glm/source/test/core/core_force_inline.cpp12
-rw-r--r--3rdparty/glm/source/test/core/core_force_left_handed.cpp12
-rw-r--r--3rdparty/glm/source/test/core/core_force_platform_unknown.cpp14
-rw-r--r--3rdparty/glm/source/test/core/core_force_pure.cpp434
-rw-r--r--3rdparty/glm/source/test/core/core_force_quat_xyzw.cpp13
-rw-r--r--3rdparty/glm/source/test/core/core_force_size_t_length.cpp12
-rw-r--r--3rdparty/glm/source/test/core/core_force_unrestricted_gentype.cpp12
-rw-r--r--3rdparty/glm/source/test/core/core_force_xyzw_only.cpp58
-rw-r--r--3rdparty/glm/source/test/core/core_func_common.cpp1349
-rw-r--r--3rdparty/glm/source/test/core/core_func_exponential.cpp185
-rw-r--r--3rdparty/glm/source/test/core/core_func_geometric.cpp200
-rw-r--r--3rdparty/glm/source/test/core/core_func_integer.cpp1556
-rw-r--r--3rdparty/glm/source/test/core/core_func_integer_bit_count.cpp291
-rw-r--r--3rdparty/glm/source/test/core/core_func_integer_find_lsb.cpp416
-rw-r--r--3rdparty/glm/source/test/core/core_func_integer_find_msb.cpp440
-rw-r--r--3rdparty/glm/source/test/core/core_func_matrix.cpp312
-rw-r--r--3rdparty/glm/source/test/core/core_func_noise.cpp7
-rw-r--r--3rdparty/glm/source/test/core/core_func_packing.cpp156
-rw-r--r--3rdparty/glm/source/test/core/core_func_swizzle.cpp164
-rw-r--r--3rdparty/glm/source/test/core/core_func_trigonometric.cpp10
-rw-r--r--3rdparty/glm/source/test/core/core_func_vector_relational.cpp180
-rw-r--r--3rdparty/glm/source/test/core/core_setup_force_cxx98.cpp12
-rw-r--r--3rdparty/glm/source/test/core/core_setup_force_size_t_length.cpp22
-rw-r--r--3rdparty/glm/source/test/core/core_setup_message.cpp230
-rw-r--r--3rdparty/glm/source/test/core/core_setup_platform_unknown.cpp21
-rw-r--r--3rdparty/glm/source/test/core/core_setup_precision.cpp58
-rw-r--r--3rdparty/glm/source/test/core/core_type_aligned.cpp92
-rw-r--r--3rdparty/glm/source/test/core/core_type_cast.cpp146
-rw-r--r--3rdparty/glm/source/test/core/core_type_ctor.cpp351
-rw-r--r--3rdparty/glm/source/test/core/core_type_int.cpp26
-rw-r--r--3rdparty/glm/source/test/core/core_type_length.cpp78
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat2x2.cpp177
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat2x3.cpp142
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat2x4.cpp147
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat3x2.cpp148
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat3x3.cpp197
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat3x4.cpp149
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat4x2.cpp151
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat4x3.cpp152
-rw-r--r--3rdparty/glm/source/test/core/core_type_mat4x4.cpp218
-rw-r--r--3rdparty/glm/source/test/core/core_type_vec1.cpp169
-rw-r--r--3rdparty/glm/source/test/core/core_type_vec2.cpp392
-rw-r--r--3rdparty/glm/source/test/core/core_type_vec3.cpp628
-rw-r--r--3rdparty/glm/source/test/core/core_type_vec4.cpp850
-rw-r--r--3rdparty/glm/source/test/ext/CMakeLists.txt55
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_clip_space.cpp13
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_common.cpp53
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int2x2_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int2x3_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int2x4_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int3x2_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int3x3_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int3x4_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int4x2_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int4x3_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_int4x4_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_integer.cpp237
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_projection.cpp13
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_relational.cpp163
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_transform.cpp61
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint2x2_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint2x3_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint2x4_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint3x2_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint3x3_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint3x4_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint4x2_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint4x3_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_matrix_uint4x4_sized.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_quaternion_common.cpp61
-rw-r--r--3rdparty/glm/source/test/ext/ext_quaternion_exponential.cpp87
-rw-r--r--3rdparty/glm/source/test/ext/ext_quaternion_geometric.cpp88
-rw-r--r--3rdparty/glm/source/test/ext/ext_quaternion_relational.cpp51
-rw-r--r--3rdparty/glm/source/test/ext/ext_quaternion_transform.cpp45
-rw-r--r--3rdparty/glm/source/test/ext/ext_quaternion_trigonometric.cpp40
-rw-r--r--3rdparty/glm/source/test/ext/ext_quaternion_type.cpp113
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_common.cpp360
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_constants.cpp36
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_int_sized.cpp43
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_integer.cpp686
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_packing.cpp28
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_reciprocal.cpp171
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_relational.cpp106
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_uint_sized.cpp43
-rw-r--r--3rdparty/glm/source/test/ext/ext_scalar_ulp.cpp96
-rw-r--r--3rdparty/glm/source/test/ext/ext_vec1.cpp157
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_bool1.cpp104
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_common.cpp365
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_iec559.cpp166
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_int1_sized.cpp41
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_int2_sized.cpp41
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_int3_sized.cpp41
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_int4_sized.cpp41
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_integer.cpp547
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_integer_sized.cpp216
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_packing.cpp58
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_reciprocal.cpp186
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_relational.cpp205
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_uint1_sized.cpp41
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_uint2_sized.cpp41
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_uint3_sized.cpp41
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_uint4_sized.cpp41
-rw-r--r--3rdparty/glm/source/test/ext/ext_vector_ulp.cpp99
-rw-r--r--3rdparty/glm/source/test/glm.cppcheck6
-rw-r--r--3rdparty/glm/source/test/gtc/CMakeLists.txt20
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_bitfield.cpp936
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_color_space.cpp78
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_constants.cpp30
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_epsilon.cpp78
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_integer.cpp233
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_matrix_access.cpp383
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_matrix_integer.cpp8
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_matrix_inverse.cpp51
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_matrix_transform.cpp55
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_noise.cpp86
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_packing.cpp878
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_quaternion.cpp345
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_random.cpp381
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_reciprocal.cpp8
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_round.cpp458
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_type_aligned.cpp181
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_type_precision.cpp1041
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_type_ptr.cpp335
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_ulp.cpp99
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_user_defined_types.cpp30
-rw-r--r--3rdparty/glm/source/test/gtc/gtc_vec1.cpp8
-rw-r--r--3rdparty/glm/source/test/gtx/CMakeLists.txt59
-rw-r--r--3rdparty/glm/source/test/gtx/gtx.cpp8
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_associated_min_max.cpp10
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_closest_point.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_color_encoding.cpp51
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_color_space.cpp20
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_color_space_YCoCg.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_common.cpp161
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_compatibility.cpp19
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_component_wise.cpp116
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_dual_quaternion.cpp205
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_easing.cpp65
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_euler_angle.cpp539
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_extend.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_extended_min_max.cpp101
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_extented_min_max.cpp39
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_exterior_product.cpp14
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_fast_exponential.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_fast_square_root.cpp45
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_fast_trigonometry.cpp564
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_functions.cpp36
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_gradient_paint.cpp34
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_handed_coordinate_space.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_int_10_10_10_2.cpp18
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_integer.cpp108
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_intersect.cpp88
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_io.cpp186
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_load.cpp124
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_log_base.cpp54
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_matrix_cross_product.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_matrix_decompose.cpp19
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_matrix_factorisation.cpp105
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_matrix_interpolation.cpp122
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_matrix_major_storage.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_matrix_operation.cpp86
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_matrix_query.cpp66
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_matrix_transform_2d.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_mixed_product.cpp18
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_norm.cpp81
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_normal.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_normalize_dot.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_number_precision.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_optimum_pow.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_orthonormalize.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_pca.cpp724
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_perpendicular.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_polar_coordinates.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_projection.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_quaternion.cpp107
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_random.cpp99
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_range.cpp83
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_rotate_normalized_axis.cpp9
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_rotate_vector.cpp77
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_scalar_multiplication.cpp37
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_scalar_relational.cpp174
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_simd_mat4.cpp324
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_simd_vec4.cpp71
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_spline.cpp100
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_string_cast.cpp155
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_texture.cpp22
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_type_aligned.cpp114
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_type_trait.cpp13
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_vec_swizzle.cpp11
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_vector_angle.cpp59
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_vector_query.cpp82
-rw-r--r--3rdparty/glm/source/test/gtx/gtx_wrap.cpp191
-rw-r--r--3rdparty/glm/source/test/perf/CMakeLists.txt6
-rw-r--r--3rdparty/glm/source/test/perf/perf_matrix_div.cpp153
-rw-r--r--3rdparty/glm/source/test/perf/perf_matrix_inverse.cpp150
-rw-r--r--3rdparty/glm/source/test/perf/perf_matrix_mul.cpp154
-rw-r--r--3rdparty/glm/source/test/perf/perf_matrix_mul_vector.cpp154
-rw-r--r--3rdparty/glm/source/test/perf/perf_matrix_transpose.cpp150
-rw-r--r--3rdparty/glm/source/test/perf/perf_vector_mul_matrix.cpp154
-rw-r--r--3rdparty/glm/source/util/autoexp.txt28
-rw-r--r--3rdparty/glm/source/util/autoexp.vc2010.dat3896
-rw-r--r--3rdparty/glm/source/util/glm.natvis555
-rw-r--r--3rdparty/glm/source/util/usertype.dat407
-rw-r--r--3rdparty/imguicolortextedit/CMakeLists.txt3
-rw-r--r--3rdparty/imguicolortextedit/TextEditor.cpp3160
-rw-r--r--3rdparty/imguicolortextedit/TextEditor.h389
-rw-r--r--3rdparty/tracy/tracy/Tracy.hpp267
-rw-r--r--3rdparty/tracy/tracy/TracyC.h320
-rw-r--r--3rdparty/tracy/tracy/TracyClient.cpp56
-rw-r--r--3rdparty/tracy/tracy/TracyD3D11.hpp442
-rw-r--r--3rdparty/tracy/tracy/TracyD3D12.hpp506
-rw-r--r--3rdparty/tracy/tracy/TracyLua.hpp431
-rw-r--r--3rdparty/tracy/tracy/TracyOpenCL.hpp414
-rw-r--r--3rdparty/tracy/tracy/TracyOpenGL.hpp325
-rw-r--r--3rdparty/tracy/tracy/TracyVulkan.hpp512
-rw-r--r--3rdparty/tracy/tracy/client/TracyAlloc.cpp42
-rw-r--r--3rdparty/tracy/tracy/client/TracyArmCpuTable.hpp370
-rw-r--r--3rdparty/tracy/tracy/client/TracyCallstack.cpp1005
-rw-r--r--3rdparty/tracy/tracy/client/TracyCallstack.h35
-rw-r--r--3rdparty/tracy/tracy/client/TracyCallstack.hpp125
-rw-r--r--3rdparty/tracy/tracy/client/TracyDebug.hpp11
-rw-r--r--3rdparty/tracy/tracy/client/TracyDxt1.cpp641
-rw-r--r--3rdparty/tracy/tracy/client/TracyDxt1.hpp11
-rw-r--r--3rdparty/tracy/tracy/client/TracyFastVector.hpp118
-rw-r--r--3rdparty/tracy/tracy/client/TracyLock.hpp548
-rw-r--r--3rdparty/tracy/tracy/client/TracyProfiler.cpp4238
-rw-r--r--3rdparty/tracy/tracy/client/TracyProfiler.hpp942
-rw-r--r--3rdparty/tracy/tracy/client/TracyRingBuffer.hpp131
-rw-r--r--3rdparty/tracy/tracy/client/TracyScoped.hpp175
-rw-r--r--3rdparty/tracy/tracy/client/TracyStringHelpers.hpp50
-rw-r--r--3rdparty/tracy/tracy/client/TracySysTime.cpp108
-rw-r--r--3rdparty/tracy/tracy/client/TracySysTime.hpp36
-rw-r--r--3rdparty/tracy/tracy/client/TracySysTrace.cpp1489
-rw-r--r--3rdparty/tracy/tracy/client/TracySysTrace.hpp28
-rw-r--r--3rdparty/tracy/tracy/client/TracyThread.hpp85
-rw-r--r--3rdparty/tracy/tracy/client/tracy_SPSCQueue.h148
-rw-r--r--3rdparty/tracy/tracy/client/tracy_concurrentqueue.h1446
-rw-r--r--3rdparty/tracy/tracy/client/tracy_rpmalloc.cpp2500
-rw-r--r--3rdparty/tracy/tracy/client/tracy_rpmalloc.hpp261
-rw-r--r--3rdparty/tracy/tracy/common/TracyAlign.hpp27
-rw-r--r--3rdparty/tracy/tracy/common/TracyAlloc.hpp69
-rw-r--r--3rdparty/tracy/tracy/common/TracyApi.h16
-rw-r--r--3rdparty/tracy/tracy/common/TracyColor.hpp690
-rw-r--r--3rdparty/tracy/tracy/common/TracyForceInline.hpp20
-rw-r--r--3rdparty/tracy/tracy/common/TracyMutex.hpp24
-rw-r--r--3rdparty/tracy/tracy/common/TracyProtocol.hpp139
-rw-r--r--3rdparty/tracy/tracy/common/TracyQueue.hpp850
-rw-r--r--3rdparty/tracy/tracy/common/TracySocket.cpp749
-rw-r--r--3rdparty/tracy/tracy/common/TracySocket.hpp156
-rw-r--r--3rdparty/tracy/tracy/common/TracyStackFrames.cpp122
-rw-r--r--3rdparty/tracy/tracy/common/TracyStackFrames.hpp22
-rw-r--r--3rdparty/tracy/tracy/common/TracySystem.cpp304
-rw-r--r--3rdparty/tracy/tracy/common/TracySystem.hpp32
-rw-r--r--3rdparty/tracy/tracy/common/TracyUwp.hpp11
-rw-r--r--3rdparty/tracy/tracy/common/TracyYield.hpp26
-rw-r--r--3rdparty/tracy/tracy/common/src-from-vcxproj.mk21
-rw-r--r--3rdparty/tracy/tracy/common/tracy_lz4.cpp2492
-rw-r--r--3rdparty/tracy/tracy/common/tracy_lz4.hpp777
-rw-r--r--3rdparty/tracy/tracy/common/tracy_lz4hc.cpp1620
-rw-r--r--3rdparty/tracy/tracy/common/tracy_lz4hc.hpp405
-rw-r--r--3rdparty/tracy/tracy/common/unix-release.mk13
-rw-r--r--3rdparty/tracy/tracy/common/unix.mk82
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/LICENSE29
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/alloc.cpp174
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/backtrace.hpp186
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/config.h22
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/dwarf.cpp4407
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/elf.cpp4928
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/fileline.cpp351
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/filenames.hpp52
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/internal.hpp385
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/macho.cpp1360
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/mmapio.cpp115
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/posix.cpp109
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/sort.cpp113
-rw-r--r--3rdparty/tracy/tracy/libbacktrace/state.cpp76
729 files changed, 144809 insertions, 1 deletions
diff --git a/3rdparty/glm/README.md b/3rdparty/glm/README.md
new file mode 100644
index 0000000..b164cb1
--- /dev/null
+++ b/3rdparty/glm/README.md
@@ -0,0 +1,2 @@
+The following files are removed from glm's source code:
+- docs/
diff --git a/3rdparty/glm/source/.appveyor.yml b/3rdparty/glm/source/.appveyor.yml
new file mode 100644
index 0000000..5ce6028
--- /dev/null
+++ b/3rdparty/glm/source/.appveyor.yml
@@ -0,0 +1,92 @@
+shallow_clone: true
+
+platform:
+ - x86
+ - x64
+
+configuration:
+ - Debug
+ - Release
+
+image:
+ - Visual Studio 2013
+ - Visual Studio 2015
+ - Visual Studio 2017
+ - Visual Studio 2019
+
+environment:
+ matrix:
+ - GLM_ARGUMENTS: -DGLM_TEST_FORCE_PURE=ON
+ - GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_SSE2=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON
+ - GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON
+ - GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_14=ON
+ - GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_17=ON
+
+matrix:
+ exclude:
+ - image: Visual Studio 2013
+ GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON
+ - image: Visual Studio 2013
+ GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_14=ON
+ - image: Visual Studio 2013
+ GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_17=ON
+ - image: Visual Studio 2013
+ configuration: Debug
+ - image: Visual Studio 2015
+ GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_SSE2=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON
+ - image: Visual Studio 2015
+ GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_14=ON
+ - image: Visual Studio 2015
+ GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_17=ON
+ - image: Visual Studio 2015
+ platform: x86
+ - image: Visual Studio 2015
+ configuration: Debug
+ - image: Visual Studio 2017
+ platform: x86
+ - image: Visual Studio 2017
+ configuration: Debug
+ - image: Visual Studio 2019
+ platform: x64
+
+branches:
+ only:
+ - master
+
+before_build:
+ - ps: |
+ mkdir build
+ cd build
+
+ if ("$env:APPVEYOR_JOB_NAME" -match "Image: Visual Studio 2013") {
+ $env:generator="Visual Studio 12 2013"
+ }
+ if ("$env:APPVEYOR_JOB_NAME" -match "Image: Visual Studio 2015") {
+ $env:generator="Visual Studio 14 2015"
+ }
+ if ("$env:APPVEYOR_JOB_NAME" -match "Image: Visual Studio 2017") {
+ $env:generator="Visual Studio 15 2017"
+ }
+ if ("$env:APPVEYOR_JOB_NAME" -match "Image: Visual Studio 2019") {
+ $env:generator="Visual Studio 16 2019"
+ }
+ if ($env:PLATFORM -eq "x64") {
+ $env:generator="$env:generator Win64"
+ }
+ echo generator="$env:generator"
+ cmake .. -G "$env:generator" -DCMAKE_INSTALL_PREFIX="$env:APPVEYOR_BUILD_FOLDER/install" -DGLM_QUIET=ON -DGLM_TEST_ENABLE=ON "$env:GLM_ARGUMENTS"
+
+build_script:
+ - cmake --build . --parallel --config %CONFIGURATION% -- /m /v:minimal
+ - cmake --build . --target install --parallel --config %CONFIGURATION% -- /m /v:minimal
+
+test_script:
+ - ctest --parallel 4 --verbose -C %CONFIGURATION%
+ - cd ..
+ - ps: |
+ mkdir build_test_cmake
+ cd build_test_cmake
+ cmake ..\test\cmake\ -G "$env:generator" -DCMAKE_PREFIX_PATH="$env:APPVEYOR_BUILD_FOLDER/install"
+ - cmake --build . --parallel --config %CONFIGURATION% -- /m /v:minimal
+
+deploy: off
diff --git a/3rdparty/glm/source/.gitignore b/3rdparty/glm/source/.gitignore
new file mode 100644
index 0000000..9dbd6d8
--- /dev/null
+++ b/3rdparty/glm/source/.gitignore
@@ -0,0 +1,61 @@
+# Compiled Object files
+*.slo
+*.lo
+*.o
+*.obj
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Compiled Dynamic libraries
+*.so
+*.dylib
+*.dll
+
+# Fortran module files
+*.mod
+
+# Compiled Static libraries
+*.lai
+*.la
+*.a
+*.lib
+
+# Executables
+*.exe
+*.out
+*.app
+
+# CMake
+CMakeCache.txt
+CMakeFiles
+cmake_install.cmake
+install_manifest.txt
+*.cmake
+!glmConfig.cmake
+!glmConfig-version.cmake
+# ^ May need to add future .cmake files as exceptions
+
+# Test logs
+Testing/*
+
+# Test input
+test/gtc/*.dds
+
+# Project Files
+Makefile
+*.cbp
+*.user
+
+# Misc.
+*.log
+
+# local build(s)
+build*
+
+/.vs
+/.vscode
+/CMakeSettings.json
+.DS_Store
+*.swp
diff --git a/3rdparty/glm/source/.travis.yml b/3rdparty/glm/source/.travis.yml
new file mode 100644
index 0000000..1660ec0
--- /dev/null
+++ b/3rdparty/glm/source/.travis.yml
@@ -0,0 +1,388 @@
+language: cpp
+
+branches:
+ only:
+ - master
+ - stable
+
+jobs:
+ include:
+ - name: "Xcode 7.3 C++98 pure release"
+ os: osx
+ osx_image: xcode7.3
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_FORCE_PURE=ON"
+
+ - name: "Xcode 7.3 C++98 sse2 release"
+ os: osx
+ osx_image: xcode7.3
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON"
+
+ - name: "Xcode 7.3 C++98 ms release"
+ os: osx
+ osx_image: xcode7.3
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON"
+
+ - name: "XCode 7.3 C++11 pure release"
+ os: osx
+ osx_image: xcode7.3
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_FORCE_PURE=ON"
+
+ - name: "XCode 7.3 C++11 sse2 release"
+ os: osx
+ osx_image: xcode7.3
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE3=ON"
+
+ - name: "XCode 10.3 C++11 sse2 release"
+ os: osx
+ osx_image: xcode10.3
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE3=ON"
+
+ - name: "XCode 12.2 C++11 sse2 release"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE3=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "XCode 12.2 C++11 sse2 debug"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE3=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "XCode 12.2 C++11 avx debug"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_AVX=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "XCode 12.2 C++14 avx debug"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_AVX=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "XCode 12.2 C++14 pure debug"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "XCode 12.2 C++17 pure debug"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "XCode 12.2 C++17 sse2 debug"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "XCode 12.2 C++17 sse2 release"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "XCode 12.2 C++17 avx release"
+ os: osx
+ osx_image: xcode12.2
+ env:
+ - MATRIX_EVAL=""
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_AVX=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 4.9 C++98 pure release"
+ os: linux
+ dist: Xenial
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-4.9
+ env:
+ - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 4.9 C++98 pure debug"
+ os: linux
+ dist: Xenial
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-4.9
+ env:
+ - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 4.9 C++98 ms debug"
+ os: linux
+ dist: Xenial
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-4.9
+ env:
+ - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 4.9 C++11 ms debug"
+ os: linux
+ dist: Xenial
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-4.9
+ env:
+ - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 4.9 C++11 pure debug"
+ os: linux
+ dist: Xenial
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-4.9
+ env:
+ - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 6 C++14 pure debug"
+ os: linux
+ dist: bionic
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-6
+ env:
+ - MATRIX_EVAL="CC=gcc-6 && CXX=g++-6"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 6 C++14 ms debug"
+ os: linux
+ dist: bionic
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-6
+ env:
+ - MATRIX_EVAL="CC=gcc-6 && CXX=g++-6"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 7 C++17 ms debug"
+ os: linux
+ dist: bionic
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-7
+ env:
+ - MATRIX_EVAL="CC=gcc-7 && CXX=g++-7"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 7 C++17 pure debug"
+ os: linux
+ dist: bionic
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-7
+ env:
+ - MATRIX_EVAL="CC=gcc-7 && CXX=g++-7"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 10 C++17 pure debug"
+ os: linux
+ dist: bionic
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-10
+ env:
+ - MATRIX_EVAL="CC=gcc-10 && CXX=g++-10"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "GCC 10 C++17 pure release"
+ os: linux
+ dist: bionic
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-10
+ env:
+ - MATRIX_EVAL="CC=gcc-10 && CXX=g++-10"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "Clang C++14 pure release"
+ os: linux
+ dist: Xenial
+ env:
+ - MATRIX_EVAL="CC=clang && CXX=clang++"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "Clang C++14 pure debug"
+ os: linux
+ dist: Xenial
+ env:
+ - MATRIX_EVAL="CC=clang && CXX=clang++"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "Clang C++14 sse2 debug"
+ os: linux
+ dist: Xenial
+ env:
+ - MATRIX_EVAL="CC=clang && CXX=clang++"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "Clang C++14 sse2 debug"
+ os: linux
+ dist: focal
+ env:
+ - MATRIX_EVAL="CC=clang && CXX=clang++"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "Clang C++17 sse2 debug"
+ os: linux
+ dist: focal
+ env:
+ - MATRIX_EVAL="CC=clang && CXX=clang++"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "Clang C++17 avx2 debug"
+ os: linux
+ dist: focal
+ env:
+ - MATRIX_EVAL="CC=clang && CXX=clang++"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_AVX2=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "Clang C++17 pure debug"
+ os: linux
+ dist: focal
+ env:
+ - MATRIX_EVAL="CC=clang && CXX=clang++"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+ - name: "Clang C++17 pure release"
+ os: linux
+ dist: focal
+ env:
+ - MATRIX_EVAL="CC=clang && CXX=clang++"
+ - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON"
+ - CTEST_ENV="--parallel 4 --output-on-failure"
+ - CMAKE_ENV="--parallel"
+
+before_script:
+ - cmake --version
+ - eval "${MATRIX_EVAL}"
+
+script:
+ - ${CC} --version
+ - mkdir ./build
+ - cd ./build
+ - cmake -DCMAKE_INSTALL_PREFIX=$TRAVIS_BUILD_DIR/install -DCMAKE_CXX_COMPILER=$COMPILER ${CMAKE_BUILD_ENV} ..
+ - cmake --build . ${CMAKE_ENV}
+ - ctest ${CTEST_ENV}
+ - cmake --build . --target install ${CMAKE_ENV}
+ - cd $TRAVIS_BUILD_DIR
+ - mkdir ./build_test_cmake
+ - cd ./build_test_cmake
+ - cmake -DCMAKE_CXX_COMPILER=$COMPILER $TRAVIS_BUILD_DIR/test/cmake/ -DCMAKE_PREFIX_PATH=$TRAVIS_BUILD_DIR/install
+ - cmake --build .
+
+
diff --git a/3rdparty/glm/source/CMakeLists.txt b/3rdparty/glm/source/CMakeLists.txt
new file mode 100644
index 0000000..b7641a2
--- /dev/null
+++ b/3rdparty/glm/source/CMakeLists.txt
@@ -0,0 +1,45 @@
+cmake_minimum_required(VERSION 3.2 FATAL_ERROR)
+cmake_policy(VERSION 3.2)
+
+
+file(READ "glm/detail/setup.hpp" GLM_SETUP_FILE)
+string(REGEX MATCH "#define[ ]+GLM_VERSION_MAJOR[ ]+([0-9]+)" _ ${GLM_SETUP_FILE})
+set(GLM_VERSION_MAJOR "${CMAKE_MATCH_1}")
+string(REGEX MATCH "#define[ ]+GLM_VERSION_MINOR[ ]+([0-9]+)" _ ${GLM_SETUP_FILE})
+set(GLM_VERSION_MINOR "${CMAKE_MATCH_1}")
+string(REGEX MATCH "#define[ ]+GLM_VERSION_PATCH[ ]+([0-9]+)" _ ${GLM_SETUP_FILE})
+set(GLM_VERSION_PATCH "${CMAKE_MATCH_1}")
+string(REGEX MATCH "#define[ ]+GLM_VERSION_REVISION[ ]+([0-9]+)" _ ${GLM_SETUP_FILE})
+set(GLM_VERSION_REVISION "${CMAKE_MATCH_1}")
+
+set(GLM_VERSION ${GLM_VERSION_MAJOR}.${GLM_VERSION_MINOR}.${GLM_VERSION_PATCH}.${GLM_VERSION_REVISION})
+project(glm VERSION ${GLM_VERSION} LANGUAGES CXX)
+message(STATUS "GLM: Version " ${GLM_VERSION})
+
+add_subdirectory(glm)
+add_library(glm::glm ALIAS glm)
+
+if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
+
+ include(CPack)
+ install(DIRECTORY glm DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} PATTERN "CMakeLists.txt" EXCLUDE)
+ install(EXPORT glm FILE glmConfig.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/glm NAMESPACE glm::)
+ include(CMakePackageConfigHelpers)
+ write_basic_package_version_file("glmConfigVersion.cmake" COMPATIBILITY AnyNewerVersion)
+ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/glmConfigVersion.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/glm)
+
+ include(CTest)
+ if(BUILD_TESTING)
+ add_subdirectory(test)
+ endif()
+
+endif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
+
+if (NOT TARGET uninstall)
+configure_file(cmake/cmake_uninstall.cmake.in
+ cmake_uninstall.cmake IMMEDIATE @ONLY)
+
+add_custom_target(uninstall
+ "${CMAKE_COMMAND}" -P
+ "${CMAKE_BINARY_DIR}/cmake_uninstall.cmake")
+endif()
diff --git a/3rdparty/glm/source/cmake/cmake_uninstall.cmake.in b/3rdparty/glm/source/cmake/cmake_uninstall.cmake.in
new file mode 100644
index 0000000..c2d34d4
--- /dev/null
+++ b/3rdparty/glm/source/cmake/cmake_uninstall.cmake.in
@@ -0,0 +1,21 @@
+if(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt")
+ message(FATAL_ERROR "Cannot find install manifest: @CMAKE_BINARY_DIR@/install_manifest.txt")
+endif()
+
+file(READ "@CMAKE_BINARY_DIR@/install_manifest.txt" files)
+string(REGEX REPLACE "\n" ";" files "${files}")
+foreach(file ${files})
+ message(STATUS "Uninstalling $ENV{DESTDIR}${file}")
+ if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+ exec_program(
+ "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval
+ )
+ if(NOT "${rm_retval}" STREQUAL 0)
+ message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}")
+ endif()
+ else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+ message(STATUS "File $ENV{DESTDIR}${file} does not exist.")
+ endif()
+endforeach()
diff --git a/3rdparty/glm/source/copying.txt b/3rdparty/glm/source/copying.txt
new file mode 100644
index 0000000..779c32f
--- /dev/null
+++ b/3rdparty/glm/source/copying.txt
@@ -0,0 +1,54 @@
+================================================================================
+OpenGL Mathematics (GLM)
+--------------------------------------------------------------------------------
+GLM is licensed under The Happy Bunny License or MIT License
+
+================================================================================
+The Happy Bunny License (Modified MIT License)
+--------------------------------------------------------------------------------
+Copyright (c) 2005 - G-Truc Creation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Restrictions:
+ By making use of the Software for military purposes, you choose to make a
+ Bunny unhappy.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+================================================================================
+The MIT License
+--------------------------------------------------------------------------------
+Copyright (c) 2005 - G-Truc Creation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/3rdparty/glm/source/glm/CMakeLists.txt b/3rdparty/glm/source/glm/CMakeLists.txt
new file mode 100644
index 0000000..2d5cff2
--- /dev/null
+++ b/3rdparty/glm/source/glm/CMakeLists.txt
@@ -0,0 +1,78 @@
+file(GLOB ROOT_SOURCE *.cpp)
+file(GLOB ROOT_INLINE *.inl)
+file(GLOB ROOT_HEADER *.hpp)
+file(GLOB ROOT_TEXT ../*.txt)
+file(GLOB ROOT_MD ../*.md)
+file(GLOB ROOT_NAT ../util/glm.natvis)
+
+file(GLOB_RECURSE CORE_SOURCE ./detail/*.cpp)
+file(GLOB_RECURSE CORE_INLINE ./detail/*.inl)
+file(GLOB_RECURSE CORE_HEADER ./detail/*.hpp)
+
+file(GLOB_RECURSE EXT_SOURCE ./ext/*.cpp)
+file(GLOB_RECURSE EXT_INLINE ./ext/*.inl)
+file(GLOB_RECURSE EXT_HEADER ./ext/*.hpp)
+
+file(GLOB_RECURSE GTC_SOURCE ./gtc/*.cpp)
+file(GLOB_RECURSE GTC_INLINE ./gtc/*.inl)
+file(GLOB_RECURSE GTC_HEADER ./gtc/*.hpp)
+
+file(GLOB_RECURSE GTX_SOURCE ./gtx/*.cpp)
+file(GLOB_RECURSE GTX_INLINE ./gtx/*.inl)
+file(GLOB_RECURSE GTX_HEADER ./gtx/*.hpp)
+
+file(GLOB_RECURSE SIMD_SOURCE ./simd/*.cpp)
+file(GLOB_RECURSE SIMD_INLINE ./simd/*.inl)
+file(GLOB_RECURSE SIMD_HEADER ./simd/*.h)
+
+source_group("Text Files" FILES ${ROOT_TEXT} ${ROOT_MD})
+source_group("Core Files" FILES ${CORE_SOURCE})
+source_group("Core Files" FILES ${CORE_INLINE})
+source_group("Core Files" FILES ${CORE_HEADER})
+source_group("EXT Files" FILES ${EXT_SOURCE})
+source_group("EXT Files" FILES ${EXT_INLINE})
+source_group("EXT Files" FILES ${EXT_HEADER})
+source_group("GTC Files" FILES ${GTC_SOURCE})
+source_group("GTC Files" FILES ${GTC_INLINE})
+source_group("GTC Files" FILES ${GTC_HEADER})
+source_group("GTX Files" FILES ${GTX_SOURCE})
+source_group("GTX Files" FILES ${GTX_INLINE})
+source_group("GTX Files" FILES ${GTX_HEADER})
+source_group("SIMD Files" FILES ${SIMD_SOURCE})
+source_group("SIMD Files" FILES ${SIMD_INLINE})
+source_group("SIMD Files" FILES ${SIMD_HEADER})
+
+add_library(glm INTERFACE)
+
+include(GNUInstallDirs)
+
+target_include_directories(glm INTERFACE
+ $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+)
+
+install(TARGETS glm EXPORT glm)
+
+if(BUILD_STATIC_LIBS)
+add_library(glm_static STATIC ${ROOT_TEXT} ${ROOT_MD} ${ROOT_NAT}
+ ${ROOT_SOURCE} ${ROOT_INLINE} ${ROOT_HEADER}
+ ${CORE_SOURCE} ${CORE_INLINE} ${CORE_HEADER}
+ ${EXT_SOURCE} ${EXT_INLINE} ${EXT_HEADER}
+ ${GTC_SOURCE} ${GTC_INLINE} ${GTC_HEADER}
+ ${GTX_SOURCE} ${GTX_INLINE} ${GTX_HEADER}
+ ${SIMD_SOURCE} ${SIMD_INLINE} ${SIMD_HEADER})
+ target_link_libraries(glm_static PUBLIC glm)
+ add_library(glm::glm_static ALIAS glm_static)
+endif()
+
+if(BUILD_SHARED_LIBS)
+add_library(glm_shared SHARED ${ROOT_TEXT} ${ROOT_MD} ${ROOT_NAT}
+ ${ROOT_SOURCE} ${ROOT_INLINE} ${ROOT_HEADER}
+ ${CORE_SOURCE} ${CORE_INLINE} ${CORE_HEADER}
+ ${EXT_SOURCE} ${EXT_INLINE} ${EXT_HEADER}
+ ${GTC_SOURCE} ${GTC_INLINE} ${GTC_HEADER}
+ ${GTX_SOURCE} ${GTX_INLINE} ${GTX_HEADER}
+ ${SIMD_SOURCE} ${SIMD_INLINE} ${SIMD_HEADER})
+ target_link_libraries(glm_shared PUBLIC glm)
+ add_library(glm::glm_shared ALIAS glm_shared)
+endif()
diff --git a/3rdparty/glm/source/glm/common.hpp b/3rdparty/glm/source/glm/common.hpp
new file mode 100644
index 0000000..0328dc9
--- /dev/null
+++ b/3rdparty/glm/source/glm/common.hpp
@@ -0,0 +1,539 @@
+/// @ref core
+/// @file glm/common.hpp
+///
+/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+///
+/// @defgroup core_func_common Common functions
+/// @ingroup core
+///
+/// Provides GLSL common functions
+///
+/// These all operate component-wise. The description is per component.
+///
+/// Include <glm/common.hpp> to use these core features.
+
+#pragma once
+
+#include "detail/qualifier.hpp"
+#include "detail/_fixes.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_func_common
+ /// @{
+
+ /// Returns x if x >= 0; otherwise, it returns -x.
+ ///
+ /// @tparam genType floating-point or signed integer; scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/abs.xml">GLSL abs man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType abs(genType x);
+
+ /// Returns x if x >= 0; otherwise, it returns -x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or signed integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/abs.xml">GLSL abs man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> abs(vec<L, T, Q> const& x);
+
+ /// Returns 1.0 if x > 0, 0.0 if x == 0, or -1.0 if x < 0.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/sign.xml">GLSL sign man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> sign(vec<L, T, Q> const& x);
+
+ /// Returns a value equal to the nearest integer that is less then or equal to x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/floor.xml">GLSL floor man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> floor(vec<L, T, Q> const& x);
+
+ /// Returns a value equal to the nearest integer to x
+ /// whose absolute value is not larger than the absolute value of x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/trunc.xml">GLSL trunc man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> trunc(vec<L, T, Q> const& x);
+
+ /// Returns a value equal to the nearest integer to x.
+ /// The fraction 0.5 will round in a direction chosen by the
+ /// implementation, presumably the direction that is fastest.
+ /// This includes the possibility that round(x) returns the
+ /// same value as roundEven(x) for all values of x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/round.xml">GLSL round man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> round(vec<L, T, Q> const& x);
+
+ /// Returns a value equal to the nearest integer to x.
+ /// A fractional part of 0.5 will round toward the nearest even
+ /// integer. (Both 3.5 and 4.5 for x will return 4.0.)
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/roundEven.xml">GLSL roundEven man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ /// @see <a href="http://developer.amd.com/documentation/articles/pages/New-Round-to-Even-Technique.aspx">New round to even technique</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> roundEven(vec<L, T, Q> const& x);
+
+ /// Returns a value equal to the nearest integer
+ /// that is greater than or equal to x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/ceil.xml">GLSL ceil man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> ceil(vec<L, T, Q> const& x);
+
+ /// Return x - floor(x).
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/fract.xml">GLSL fract man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL genType fract(genType x);
+
+ /// Return x - floor(x).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/fract.xml">GLSL fract man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fract(vec<L, T, Q> const& x);
+
+ template<typename genType>
+ GLM_FUNC_DECL genType mod(genType x, genType y);
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> mod(vec<L, T, Q> const& x, T y);
+
+ /// Modulus. Returns x - y * floor(x / y)
+ /// for each component in x using the floating point value y.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types, include glm/gtc/integer for integer scalar types support
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/mod.xml">GLSL mod man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> mod(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns the fractional part of x and sets i to the integer
+ /// part (as a whole number floating point value). Both the
+ /// return value and the output parameter will have the same
+ /// sign as x.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/modf.xml">GLSL modf man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL genType modf(genType x, genType& i);
+
+ /// Returns y if y < x; otherwise, it returns x.
+ ///
+ /// @tparam genType Floating-point or integer; scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/min.xml">GLSL min man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType min(genType x, genType y);
+
+ /// Returns y if y < x; otherwise, it returns x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/min.xml">GLSL min man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& x, T y);
+
+ /// Returns y if y < x; otherwise, it returns x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/min.xml">GLSL min man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns y if x < y; otherwise, it returns x.
+ ///
+ /// @tparam genType Floating-point or integer; scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/max.xml">GLSL max man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType max(genType x, genType y);
+
+ /// Returns y if x < y; otherwise, it returns x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/max.xml">GLSL max man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& x, T y);
+
+ /// Returns y if x < y; otherwise, it returns x.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/max.xml">GLSL max man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns min(max(x, minVal), maxVal) for each component in x
+ /// using the floating-point values minVal and maxVal.
+ ///
+ /// @tparam genType Floating-point or integer; scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/clamp.xml">GLSL clamp man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal);
+
+ /// Returns min(max(x, minVal), maxVal) for each component in x
+ /// using the floating-point values minVal and maxVal.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/clamp.xml">GLSL clamp man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> clamp(vec<L, T, Q> const& x, T minVal, T maxVal);
+
+ /// Returns min(max(x, minVal), maxVal) for each component in x
+ /// using the floating-point values minVal and maxVal.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/clamp.xml">GLSL clamp man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> clamp(vec<L, T, Q> const& x, vec<L, T, Q> const& minVal, vec<L, T, Q> const& maxVal);
+
+ /// If genTypeU is a floating scalar or vector:
+ /// Returns x * (1.0 - a) + y * a, i.e., the linear blend of
+ /// x and y using the floating-point value a.
+ /// The value for a is not restricted to the range [0, 1].
+ ///
+ /// If genTypeU is a boolean scalar or vector:
+ /// Selects which vector each returned component comes
+ /// from. For a component of 'a' that is false, the
+ /// corresponding component of 'x' is returned. For a
+ /// component of 'a' that is true, the corresponding
+ /// component of 'y' is returned. Components of 'x' and 'y' that
+ /// are not selected are allowed to be invalid floating point
+ /// values and will have no effect on the results. Thus, this
+ /// provides different functionality than
+ /// genType mix(genType x, genType y, genType(a))
+ /// where a is a Boolean vector.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/mix.xml">GLSL mix man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ ///
+ /// @param[in] x Value to interpolate.
+ /// @param[in] y Value to interpolate.
+ /// @param[in] a Interpolant.
+ ///
+ /// @tparam genTypeT Floating point scalar or vector.
+ /// @tparam genTypeU Floating point or boolean scalar or vector. It can't be a vector if it is the length of genTypeT.
+ ///
+ /// @code
+ /// #include <glm/glm.hpp>
+ /// ...
+ /// float a;
+ /// bool b;
+ /// glm::dvec3 e;
+ /// glm::dvec3 f;
+ /// glm::vec4 g;
+ /// glm::vec4 h;
+ /// ...
+ /// glm::vec4 r = glm::mix(g, h, a); // Interpolate with a floating-point scalar two vectors.
+ /// glm::vec4 s = glm::mix(g, h, b); // Returns g or h;
+ /// glm::dvec3 t = glm::mix(e, f, a); // Types of the third parameter is not required to match with the first and the second.
+ /// glm::vec4 u = glm::mix(g, h, r); // Interpolations can be perform per component with a vector for the last parameter.
+ /// @endcode
+ template<typename genTypeT, typename genTypeU>
+ GLM_FUNC_DECL genTypeT mix(genTypeT x, genTypeT y, genTypeU a);
+
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> mix(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, U, Q> const& a);
+
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> mix(vec<L, T, Q> const& x, vec<L, T, Q> const& y, U a);
+
+ /// Returns 0.0 if x < edge, otherwise it returns 1.0 for each component of a genType.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/step.xml">GLSL step man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL genType step(genType edge, genType x);
+
+ /// Returns 0.0 if x < edge, otherwise it returns 1.0.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/step.xml">GLSL step man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> step(T edge, vec<L, T, Q> const& x);
+
+ /// Returns 0.0 if x < edge, otherwise it returns 1.0.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/step.xml">GLSL step man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> step(vec<L, T, Q> const& edge, vec<L, T, Q> const& x);
+
+ /// Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
+ /// performs smooth Hermite interpolation between 0 and 1
+ /// when edge0 < x < edge1. This is useful in cases where
+ /// you would want a threshold function with a smooth
+ /// transition. This is equivalent to:
+ /// genType t;
+ /// t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
+ /// return t * t * (3 - 2 * t);
+ /// Results are undefined if edge0 >= edge1.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/smoothstep.xml">GLSL smoothstep man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL genType smoothstep(genType edge0, genType edge1, genType x);
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> smoothstep(T edge0, T edge1, vec<L, T, Q> const& x);
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> smoothstep(vec<L, T, Q> const& edge0, vec<L, T, Q> const& edge1, vec<L, T, Q> const& x);
+
+ /// Returns true if x holds a NaN (not a number)
+ /// representation in the underlying implementation's set of
+ /// floating point representations. Returns false otherwise,
+ /// including for implementations with no NaN
+ /// representations.
+ ///
+ /// /!\ When using compiler fast math, this function may fail.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/isnan.xml">GLSL isnan man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> isnan(vec<L, T, Q> const& x);
+
+ /// Returns true if x holds a positive infinity or negative
+ /// infinity representation in the underlying implementation's
+ /// set of floating point representations. Returns false
+ /// otherwise, including for implementations with no infinity
+ /// representations.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/isinf.xml">GLSL isinf man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> isinf(vec<L, T, Q> const& x);
+
+ /// Returns a signed integer value representing
+ /// the encoding of a floating-point value. The floating-point
+ /// value's bit-level representation is preserved.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/floatBitsToInt.xml">GLSL floatBitsToInt man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ GLM_FUNC_DECL int floatBitsToInt(float const& v);
+
+ /// Returns a signed integer value representing
+ /// the encoding of a floating-point value. The floatingpoint
+ /// value's bit-level representation is preserved.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/floatBitsToInt.xml">GLSL floatBitsToInt man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL vec<L, int, Q> floatBitsToInt(vec<L, float, Q> const& v);
+
+ /// Returns a unsigned integer value representing
+ /// the encoding of a floating-point value. The floatingpoint
+ /// value's bit-level representation is preserved.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/floatBitsToUint.xml">GLSL floatBitsToUint man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ GLM_FUNC_DECL uint floatBitsToUint(float const& v);
+
+ /// Returns a unsigned integer value representing
+ /// the encoding of a floating-point value. The floatingpoint
+ /// value's bit-level representation is preserved.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/floatBitsToUint.xml">GLSL floatBitsToUint man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL vec<L, uint, Q> floatBitsToUint(vec<L, float, Q> const& v);
+
+ /// Returns a floating-point value corresponding to a signed
+ /// integer encoding of a floating-point value.
+ /// If an inf or NaN is passed in, it will not signal, and the
+ /// resulting floating point value is unspecified. Otherwise,
+ /// the bit-level representation is preserved.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/intBitsToFloat.xml">GLSL intBitsToFloat man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ GLM_FUNC_DECL float intBitsToFloat(int const& v);
+
+ /// Returns a floating-point value corresponding to a signed
+ /// integer encoding of a floating-point value.
+ /// If an inf or NaN is passed in, it will not signal, and the
+ /// resulting floating point value is unspecified. Otherwise,
+ /// the bit-level representation is preserved.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/intBitsToFloat.xml">GLSL intBitsToFloat man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL vec<L, float, Q> intBitsToFloat(vec<L, int, Q> const& v);
+
+ /// Returns a floating-point value corresponding to a
+ /// unsigned integer encoding of a floating-point value.
+ /// If an inf or NaN is passed in, it will not signal, and the
+ /// resulting floating point value is unspecified. Otherwise,
+ /// the bit-level representation is preserved.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/uintBitsToFloat.xml">GLSL uintBitsToFloat man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ GLM_FUNC_DECL float uintBitsToFloat(uint const& v);
+
+ /// Returns a floating-point value corresponding to a
+ /// unsigned integer encoding of a floating-point value.
+ /// If an inf or NaN is passed in, it will not signal, and the
+ /// resulting floating point value is unspecified. Otherwise,
+ /// the bit-level representation is preserved.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/uintBitsToFloat.xml">GLSL uintBitsToFloat man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL vec<L, float, Q> uintBitsToFloat(vec<L, uint, Q> const& v);
+
+ /// Computes and returns a * b + c.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/fma.xml">GLSL fma man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL genType fma(genType const& a, genType const& b, genType const& c);
+
+ /// Splits x into a floating-point significand in the range
+ /// [0.5, 1.0) and an integral exponent of two, such that:
+ /// x = significand * exp(2, exponent)
+ ///
+ /// The significand is returned by the function and the
+ /// exponent is returned in the parameter exp. For a
+ /// floating-point value of zero, the significant and exponent
+ /// are both zero. For a floating-point value that is an
+ /// infinity or is not a number, the results are undefined.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/frexp.xml">GLSL frexp man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL genType frexp(genType x, int& exp);
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> frexp(vec<L, T, Q> const& v, vec<L, int, Q>& exp);
+
+ /// Builds a floating-point number from x and the
+ /// corresponding integral exponent of two in exp, returning:
+ /// significand * exp(2, exponent)
+ ///
+ /// If this product is too large to be represented in the
+ /// floating-point type, the result is undefined.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/ldexp.xml">GLSL ldexp man page</a>;
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL genType ldexp(genType const& x, int const& exp);
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> ldexp(vec<L, T, Q> const& v, vec<L, int, Q> const& exp);
+
+ /// @}
+}//namespace glm
+
+#include "detail/func_common.inl"
+
diff --git a/3rdparty/glm/source/glm/detail/_features.hpp b/3rdparty/glm/source/glm/detail/_features.hpp
new file mode 100644
index 0000000..b0cbe9f
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/_features.hpp
@@ -0,0 +1,394 @@
+#pragma once
+
+// #define GLM_CXX98_EXCEPTIONS
+// #define GLM_CXX98_RTTI
+
+// #define GLM_CXX11_RVALUE_REFERENCES
+// Rvalue references - GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2118.html
+
+// GLM_CXX11_TRAILING_RETURN
+// Rvalue references for *this - GCC not supported
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2439.htm
+
+// GLM_CXX11_NONSTATIC_MEMBER_INIT
+// Initialization of class objects by rvalues - GCC any
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1610.html
+
+// GLM_CXX11_NONSTATIC_MEMBER_INIT
+// Non-static data member initializers - GCC 4.7
+// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm
+
+// #define GLM_CXX11_VARIADIC_TEMPLATE
+// Variadic templates - GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2242.pdf
+
+//
+// Extending variadic template template parameters - GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2555.pdf
+
+// #define GLM_CXX11_GENERALIZED_INITIALIZERS
+// Initializer lists - GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm
+
+// #define GLM_CXX11_STATIC_ASSERT
+// Static assertions - GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1720.html
+
+// #define GLM_CXX11_AUTO_TYPE
+// auto-typed variables - GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1984.pdf
+
+// #define GLM_CXX11_AUTO_TYPE
+// Multi-declarator auto - GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1737.pdf
+
+// #define GLM_CXX11_AUTO_TYPE
+// Removal of auto as a storage-class specifier - GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2546.htm
+
+// #define GLM_CXX11_AUTO_TYPE
+// New function declarator syntax - GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2541.htm
+
+// #define GLM_CXX11_LAMBDAS
+// New wording for C++0x lambdas - GCC 4.5
+// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2927.pdf
+
+// #define GLM_CXX11_DECLTYPE
+// Declared type of an expression - GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2343.pdf
+
+//
+// Right angle brackets - GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1757.html
+
+//
+// Default template arguments for function templates DR226 GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#226
+
+//
+// Solving the SFINAE problem for expressions DR339 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2634.html
+
+// #define GLM_CXX11_ALIAS_TEMPLATE
+// Template aliases N2258 GCC 4.7
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf
+
+//
+// Extern templates N1987 Yes
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1987.htm
+
+// #define GLM_CXX11_NULLPTR
+// Null pointer constant N2431 GCC 4.6
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2431.pdf
+
+// #define GLM_CXX11_STRONG_ENUMS
+// Strongly-typed enums N2347 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2347.pdf
+
+//
+// Forward declarations for enums N2764 GCC 4.6
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2764.pdf
+
+//
+// Generalized attributes N2761 GCC 4.8
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2761.pdf
+
+//
+// Generalized constant expressions N2235 GCC 4.6
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf
+
+//
+// Alignment support N2341 GCC 4.8
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf
+
+// #define GLM_CXX11_DELEGATING_CONSTRUCTORS
+// Delegating constructors N1986 GCC 4.7
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf
+
+//
+// Inheriting constructors N2540 GCC 4.8
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm
+
+// #define GLM_CXX11_EXPLICIT_CONVERSIONS
+// Explicit conversion operators N2437 GCC 4.5
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf
+
+//
+// New character types N2249 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2249.html
+
+//
+// Unicode string literals N2442 GCC 4.5
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm
+
+//
+// Raw string literals N2442 GCC 4.5
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm
+
+//
+// Universal character name literals N2170 GCC 4.5
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html
+
+// #define GLM_CXX11_USER_LITERALS
+// User-defined literals N2765 GCC 4.7
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2765.pdf
+
+//
+// Standard Layout Types N2342 GCC 4.5
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm
+
+// #define GLM_CXX11_DEFAULTED_FUNCTIONS
+// #define GLM_CXX11_DELETED_FUNCTIONS
+// Defaulted and deleted functions N2346 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2346.htm
+
+//
+// Extended friend declarations N1791 GCC 4.7
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf
+
+//
+// Extending sizeof N2253 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html
+
+// #define GLM_CXX11_INLINE_NAMESPACES
+// Inline namespaces N2535 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm
+
+// #define GLM_CXX11_UNRESTRICTED_UNIONS
+// Unrestricted unions N2544 GCC 4.6
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf
+
+// #define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS
+// Local and unnamed types as template arguments N2657 GCC 4.5
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm
+
+// #define GLM_CXX11_RANGE_FOR
+// Range-based for N2930 GCC 4.6
+// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2930.html
+
+// #define GLM_CXX11_OVERRIDE_CONTROL
+// Explicit virtual overrides N2928 N3206 N3272 GCC 4.7
+// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2928.htm
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3206.htm
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3272.htm
+
+//
+// Minimal support for garbage collection and reachability-based leak detection N2670 No
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2670.htm
+
+// #define GLM_CXX11_NOEXCEPT
+// Allowing move constructors to throw [noexcept] N3050 GCC 4.6 (core language only)
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3050.html
+
+//
+// Defining move special member functions N3053 GCC 4.6
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3053.html
+
+//
+// Sequence points N2239 Yes
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html
+
+//
+// Atomic operations N2427 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html
+
+//
+// Strong Compare and Exchange N2748 GCC 4.5
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
+
+//
+// Bidirectional Fences N2752 GCC 4.8
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2752.htm
+
+//
+// Memory model N2429 GCC 4.8
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2429.htm
+
+//
+// Data-dependency ordering: atomics and memory model N2664 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm
+
+//
+// Propagating exceptions N2179 GCC 4.4
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html
+
+//
+// Abandoning a process and at_quick_exit N2440 GCC 4.8
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2440.htm
+
+//
+// Allow atomics use in signal handlers N2547 Yes
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2547.htm
+
+//
+// Thread-local storage N2659 GCC 4.8
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2659.htm
+
+//
+// Dynamic initialization and destruction with concurrency N2660 GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm
+
+//
+// __func__ predefined identifier N2340 GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2340.htm
+
+//
+// C99 preprocessor N1653 GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1653.htm
+
+//
+// long long N1811 GCC 4.3
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1811.pdf
+
+//
+// Extended integral types N1988 Yes
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1988.pdf
+
+#if(GLM_COMPILER & GLM_COMPILER_GCC)
+
+# define GLM_CXX11_STATIC_ASSERT
+
+#elif(GLM_COMPILER & GLM_COMPILER_CLANG)
+# if(__has_feature(cxx_exceptions))
+# define GLM_CXX98_EXCEPTIONS
+# endif
+
+# if(__has_feature(cxx_rtti))
+# define GLM_CXX98_RTTI
+# endif
+
+# if(__has_feature(cxx_access_control_sfinae))
+# define GLM_CXX11_ACCESS_CONTROL_SFINAE
+# endif
+
+# if(__has_feature(cxx_alias_templates))
+# define GLM_CXX11_ALIAS_TEMPLATE
+# endif
+
+# if(__has_feature(cxx_alignas))
+# define GLM_CXX11_ALIGNAS
+# endif
+
+# if(__has_feature(cxx_attributes))
+# define GLM_CXX11_ATTRIBUTES
+# endif
+
+# if(__has_feature(cxx_constexpr))
+# define GLM_CXX11_CONSTEXPR
+# endif
+
+# if(__has_feature(cxx_decltype))
+# define GLM_CXX11_DECLTYPE
+# endif
+
+# if(__has_feature(cxx_default_function_template_args))
+# define GLM_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS
+# endif
+
+# if(__has_feature(cxx_defaulted_functions))
+# define GLM_CXX11_DEFAULTED_FUNCTIONS
+# endif
+
+# if(__has_feature(cxx_delegating_constructors))
+# define GLM_CXX11_DELEGATING_CONSTRUCTORS
+# endif
+
+# if(__has_feature(cxx_deleted_functions))
+# define GLM_CXX11_DELETED_FUNCTIONS
+# endif
+
+# if(__has_feature(cxx_explicit_conversions))
+# define GLM_CXX11_EXPLICIT_CONVERSIONS
+# endif
+
+# if(__has_feature(cxx_generalized_initializers))
+# define GLM_CXX11_GENERALIZED_INITIALIZERS
+# endif
+
+# if(__has_feature(cxx_implicit_moves))
+# define GLM_CXX11_IMPLICIT_MOVES
+# endif
+
+# if(__has_feature(cxx_inheriting_constructors))
+# define GLM_CXX11_INHERITING_CONSTRUCTORS
+# endif
+
+# if(__has_feature(cxx_inline_namespaces))
+# define GLM_CXX11_INLINE_NAMESPACES
+# endif
+
+# if(__has_feature(cxx_lambdas))
+# define GLM_CXX11_LAMBDAS
+# endif
+
+# if(__has_feature(cxx_local_type_template_args))
+# define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS
+# endif
+
+# if(__has_feature(cxx_noexcept))
+# define GLM_CXX11_NOEXCEPT
+# endif
+
+# if(__has_feature(cxx_nonstatic_member_init))
+# define GLM_CXX11_NONSTATIC_MEMBER_INIT
+# endif
+
+# if(__has_feature(cxx_nullptr))
+# define GLM_CXX11_NULLPTR
+# endif
+
+# if(__has_feature(cxx_override_control))
+# define GLM_CXX11_OVERRIDE_CONTROL
+# endif
+
+# if(__has_feature(cxx_reference_qualified_functions))
+# define GLM_CXX11_REFERENCE_QUALIFIED_FUNCTIONS
+# endif
+
+# if(__has_feature(cxx_range_for))
+# define GLM_CXX11_RANGE_FOR
+# endif
+
+# if(__has_feature(cxx_raw_string_literals))
+# define GLM_CXX11_RAW_STRING_LITERALS
+# endif
+
+# if(__has_feature(cxx_rvalue_references))
+# define GLM_CXX11_RVALUE_REFERENCES
+# endif
+
+# if(__has_feature(cxx_static_assert))
+# define GLM_CXX11_STATIC_ASSERT
+# endif
+
+# if(__has_feature(cxx_auto_type))
+# define GLM_CXX11_AUTO_TYPE
+# endif
+
+# if(__has_feature(cxx_strong_enums))
+# define GLM_CXX11_STRONG_ENUMS
+# endif
+
+# if(__has_feature(cxx_trailing_return))
+# define GLM_CXX11_TRAILING_RETURN
+# endif
+
+# if(__has_feature(cxx_unicode_literals))
+# define GLM_CXX11_UNICODE_LITERALS
+# endif
+
+# if(__has_feature(cxx_unrestricted_unions))
+# define GLM_CXX11_UNRESTRICTED_UNIONS
+# endif
+
+# if(__has_feature(cxx_user_literals))
+# define GLM_CXX11_USER_LITERALS
+# endif
+
+# if(__has_feature(cxx_variadic_templates))
+# define GLM_CXX11_VARIADIC_TEMPLATES
+# endif
+
+#endif//(GLM_COMPILER & GLM_COMPILER_CLANG)
diff --git a/3rdparty/glm/source/glm/detail/_fixes.hpp b/3rdparty/glm/source/glm/detail/_fixes.hpp
new file mode 100644
index 0000000..a503c7c
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/_fixes.hpp
@@ -0,0 +1,27 @@
+#include <cmath>
+
+//! Workaround for compatibility with other libraries
+#ifdef max
+#undef max
+#endif
+
+//! Workaround for compatibility with other libraries
+#ifdef min
+#undef min
+#endif
+
+//! Workaround for Android
+#ifdef isnan
+#undef isnan
+#endif
+
+//! Workaround for Android
+#ifdef isinf
+#undef isinf
+#endif
+
+//! Workaround for Chrone Native Client
+#ifdef log2
+#undef log2
+#endif
+
diff --git a/3rdparty/glm/source/glm/detail/_noise.hpp b/3rdparty/glm/source/glm/detail/_noise.hpp
new file mode 100644
index 0000000..5a874a0
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/_noise.hpp
@@ -0,0 +1,81 @@
+#pragma once
+
+#include "../common.hpp"
+
+namespace glm{
+namespace detail
+{
+ template<typename T>
+ GLM_FUNC_QUALIFIER T mod289(T const& x)
+ {
+ return x - floor(x * (static_cast<T>(1.0) / static_cast<T>(289.0))) * static_cast<T>(289.0);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T permute(T const& x)
+ {
+ return mod289(((x * static_cast<T>(34)) + static_cast<T>(1)) * x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, T, Q> permute(vec<2, T, Q> const& x)
+ {
+ return mod289(((x * static_cast<T>(34)) + static_cast<T>(1)) * x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> permute(vec<3, T, Q> const& x)
+ {
+ return mod289(((x * static_cast<T>(34)) + static_cast<T>(1)) * x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> permute(vec<4, T, Q> const& x)
+ {
+ return mod289(((x * static_cast<T>(34)) + static_cast<T>(1)) * x);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T taylorInvSqrt(T const& r)
+ {
+ return static_cast<T>(1.79284291400159) - static_cast<T>(0.85373472095314) * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, T, Q> taylorInvSqrt(vec<2, T, Q> const& r)
+ {
+ return static_cast<T>(1.79284291400159) - static_cast<T>(0.85373472095314) * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> taylorInvSqrt(vec<3, T, Q> const& r)
+ {
+ return static_cast<T>(1.79284291400159) - static_cast<T>(0.85373472095314) * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> taylorInvSqrt(vec<4, T, Q> const& r)
+ {
+ return static_cast<T>(1.79284291400159) - static_cast<T>(0.85373472095314) * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, T, Q> fade(vec<2, T, Q> const& t)
+ {
+ return (t * t * t) * (t * (t * static_cast<T>(6) - static_cast<T>(15)) + static_cast<T>(10));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> fade(vec<3, T, Q> const& t)
+ {
+ return (t * t * t) * (t * (t * static_cast<T>(6) - static_cast<T>(15)) + static_cast<T>(10));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> fade(vec<4, T, Q> const& t)
+ {
+ return (t * t * t) * (t * (t * static_cast<T>(6) - static_cast<T>(15)) + static_cast<T>(10));
+ }
+}//namespace detail
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/detail/_swizzle.hpp b/3rdparty/glm/source/glm/detail/_swizzle.hpp
new file mode 100644
index 0000000..87896ef
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/_swizzle.hpp
@@ -0,0 +1,804 @@
+#pragma once
+
+namespace glm{
+namespace detail
+{
+ // Internal class for implementing swizzle operators
+ template<typename T, int N>
+ struct _swizzle_base0
+ {
+ protected:
+ GLM_FUNC_QUALIFIER T& elem(size_t i){ return (reinterpret_cast<T*>(_buffer))[i]; }
+ GLM_FUNC_QUALIFIER T const& elem(size_t i) const{ return (reinterpret_cast<const T*>(_buffer))[i]; }
+
+ // Use an opaque buffer to *ensure* the compiler doesn't call a constructor.
+ // The size 1 buffer is assumed to aligned to the actual members so that the
+ // elem()
+ char _buffer[1];
+ };
+
+ template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3, bool Aligned>
+ struct _swizzle_base1 : public _swizzle_base0<T, N>
+ {
+ };
+
+ template<typename T, qualifier Q, int E0, int E1, bool Aligned>
+ struct _swizzle_base1<2, T, Q, E0,E1,-1,-2, Aligned> : public _swizzle_base0<T, 2>
+ {
+ GLM_FUNC_QUALIFIER vec<2, T, Q> operator ()() const { return vec<2, T, Q>(this->elem(E0), this->elem(E1)); }
+ };
+
+ template<typename T, qualifier Q, int E0, int E1, int E2, bool Aligned>
+ struct _swizzle_base1<3, T, Q, E0,E1,E2,-1, Aligned> : public _swizzle_base0<T, 3>
+ {
+ GLM_FUNC_QUALIFIER vec<3, T, Q> operator ()() const { return vec<3, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2)); }
+ };
+
+ template<typename T, qualifier Q, int E0, int E1, int E2, int E3, bool Aligned>
+ struct _swizzle_base1<4, T, Q, E0,E1,E2,E3, Aligned> : public _swizzle_base0<T, 4>
+ {
+ GLM_FUNC_QUALIFIER vec<4, T, Q> operator ()() const { return vec<4, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2), this->elem(E3)); }
+ };
+
+ // Internal class for implementing swizzle operators
+ /*
+ Template parameters:
+
+ T = type of scalar values (e.g. float, double)
+ N = number of components in the vector (e.g. 3)
+ E0...3 = what index the n-th element of this swizzle refers to in the unswizzled vec
+
+ DUPLICATE_ELEMENTS = 1 if there is a repeated element, 0 otherwise (used to specialize swizzles
+ containing duplicate elements so that they cannot be used as r-values).
+ */
+ template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3, int DUPLICATE_ELEMENTS>
+ struct _swizzle_base2 : public _swizzle_base1<N, T, Q, E0,E1,E2,E3, detail::is_aligned<Q>::value>
+ {
+ struct op_equal
+ {
+ GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e = t; }
+ };
+
+ struct op_minus
+ {
+ GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e -= t; }
+ };
+
+ struct op_plus
+ {
+ GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e += t; }
+ };
+
+ struct op_mul
+ {
+ GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e *= t; }
+ };
+
+ struct op_div
+ {
+ GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e /= t; }
+ };
+
+ public:
+ GLM_FUNC_QUALIFIER _swizzle_base2& operator= (const T& t)
+ {
+ for (int i = 0; i < N; ++i)
+ (*this)[i] = t;
+ return *this;
+ }
+
+ GLM_FUNC_QUALIFIER _swizzle_base2& operator= (vec<N, T, Q> const& that)
+ {
+ _apply_op(that, op_equal());
+ return *this;
+ }
+
+ GLM_FUNC_QUALIFIER void operator -= (vec<N, T, Q> const& that)
+ {
+ _apply_op(that, op_minus());
+ }
+
+ GLM_FUNC_QUALIFIER void operator += (vec<N, T, Q> const& that)
+ {
+ _apply_op(that, op_plus());
+ }
+
+ GLM_FUNC_QUALIFIER void operator *= (vec<N, T, Q> const& that)
+ {
+ _apply_op(that, op_mul());
+ }
+
+ GLM_FUNC_QUALIFIER void operator /= (vec<N, T, Q> const& that)
+ {
+ _apply_op(that, op_div());
+ }
+
+ GLM_FUNC_QUALIFIER T& operator[](size_t i)
+ {
+ const int offset_dst[4] = { E0, E1, E2, E3 };
+ return this->elem(offset_dst[i]);
+ }
+ GLM_FUNC_QUALIFIER T operator[](size_t i) const
+ {
+ const int offset_dst[4] = { E0, E1, E2, E3 };
+ return this->elem(offset_dst[i]);
+ }
+
+ protected:
+ template<typename U>
+ GLM_FUNC_QUALIFIER void _apply_op(vec<N, T, Q> const& that, const U& op)
+ {
+ // Make a copy of the data in this == &that.
+ // The copier should optimize out the copy in cases where the function is
+ // properly inlined and the copy is not necessary.
+ T t[N];
+ for (int i = 0; i < N; ++i)
+ t[i] = that[i];
+ for (int i = 0; i < N; ++i)
+ op( (*this)[i], t[i] );
+ }
+ };
+
+ // Specialization for swizzles containing duplicate elements. These cannot be modified.
+ template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3>
+ struct _swizzle_base2<N, T, Q, E0,E1,E2,E3, 1> : public _swizzle_base1<N, T, Q, E0,E1,E2,E3, detail::is_aligned<Q>::value>
+ {
+ struct Stub {};
+
+ GLM_FUNC_QUALIFIER _swizzle_base2& operator= (Stub const&) { return *this; }
+
+ GLM_FUNC_QUALIFIER T operator[] (size_t i) const
+ {
+ const int offset_dst[4] = { E0, E1, E2, E3 };
+ return this->elem(offset_dst[i]);
+ }
+ };
+
+ template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3>
+ struct _swizzle : public _swizzle_base2<N, T, Q, E0, E1, E2, E3, (E0 == E1 || E0 == E2 || E0 == E3 || E1 == E2 || E1 == E3 || E2 == E3)>
+ {
+ typedef _swizzle_base2<N, T, Q, E0, E1, E2, E3, (E0 == E1 || E0 == E2 || E0 == E3 || E1 == E2 || E1 == E3 || E2 == E3)> base_type;
+
+ using base_type::operator=;
+
+ GLM_FUNC_QUALIFIER operator vec<N, T, Q> () const { return (*this)(); }
+ };
+
+//
+// To prevent the C++ syntax from getting entirely overwhelming, define some alias macros
+//
+#define GLM_SWIZZLE_TEMPLATE1 template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3>
+#define GLM_SWIZZLE_TEMPLATE2 template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3, int F0, int F1, int F2, int F3>
+#define GLM_SWIZZLE_TYPE1 _swizzle<N, T, Q, E0, E1, E2, E3>
+#define GLM_SWIZZLE_TYPE2 _swizzle<N, T, Q, F0, F1, F2, F3>
+
+//
+// Wrapper for a binary operator (e.g. u.yy + v.zy)
+//
+#define GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \
+ GLM_SWIZZLE_TEMPLATE2 \
+ GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \
+ { \
+ return a() OPERAND b(); \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const vec<N, T, Q>& b) \
+ { \
+ return a() OPERAND b; \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const vec<N, T, Q>& a, const GLM_SWIZZLE_TYPE1& b) \
+ { \
+ return a OPERAND b(); \
+ }
+
+//
+// Wrapper for a operand between a swizzle and a binary (e.g. 1.0f - u.xyz)
+//
+#define GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const T& b) \
+ { \
+ return a() OPERAND b; \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const T& a, const GLM_SWIZZLE_TYPE1& b) \
+ { \
+ return a OPERAND b(); \
+ }
+
+//
+// Macro for wrapping a function taking one argument (e.g. abs())
+//
+#define GLM_SWIZZLE_FUNCTION_1_ARGS(RETURN_TYPE,FUNCTION) \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a) \
+ { \
+ return FUNCTION(a()); \
+ }
+
+//
+// Macro for wrapping a function taking two vector arguments (e.g. dot()).
+//
+#define GLM_SWIZZLE_FUNCTION_2_ARGS(RETURN_TYPE,FUNCTION) \
+ GLM_SWIZZLE_TEMPLATE2 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \
+ { \
+ return FUNCTION(a(), b()); \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b) \
+ { \
+ return FUNCTION(a(), b()); \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename V& b) \
+ { \
+ return FUNCTION(a(), b); \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const V& a, const GLM_SWIZZLE_TYPE1& b) \
+ { \
+ return FUNCTION(a, b()); \
+ }
+
+//
+// Macro for wrapping a function take 2 vec arguments followed by a scalar (e.g. mix()).
+//
+#define GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(RETURN_TYPE,FUNCTION) \
+ GLM_SWIZZLE_TEMPLATE2 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b, const T& c) \
+ { \
+ return FUNCTION(a(), b(), c); \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \
+ { \
+ return FUNCTION(a(), b(), c); \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename S0::vec_type& b, const T& c)\
+ { \
+ return FUNCTION(a(), b, c); \
+ } \
+ GLM_SWIZZLE_TEMPLATE1 \
+ GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const typename V& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \
+ { \
+ return FUNCTION(a, b(), c); \
+ }
+
+}//namespace detail
+}//namespace glm
+
+namespace glm
+{
+ namespace detail
+ {
+ GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(-)
+ GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(*)
+ GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(+)
+ GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(-)
+ GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(*)
+ GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(/)
+ }
+
+ //
+ // Swizzles are distinct types from the unswizzled type. The below macros will
+ // provide template specializations for the swizzle types for the given functions
+ // so that the compiler does not have any ambiguity to choosing how to handle
+ // the function.
+ //
+ // The alternative is to use the operator()() when calling the function in order
+ // to explicitly convert the swizzled type to the unswizzled type.
+ //
+
+ //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, abs);
+ //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acos);
+ //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acosh);
+ //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, all);
+ //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, any);
+
+ //GLM_SWIZZLE_FUNCTION_2_ARGS(value_type, dot);
+ //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, cross);
+ //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, step);
+ //GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(vec_type, mix);
+}
+
+#define GLM_SWIZZLE2_2_MEMBERS(T, Q, E0,E1) \
+ struct { detail::_swizzle<2, T, Q, 0,0,-1,-2> E0 ## E0; }; \
+ struct { detail::_swizzle<2, T, Q, 0,1,-1,-2> E0 ## E1; }; \
+ struct { detail::_swizzle<2, T, Q, 1,0,-1,-2> E1 ## E0; }; \
+ struct { detail::_swizzle<2, T, Q, 1,1,-1,-2> E1 ## E1; };
+
+#define GLM_SWIZZLE2_3_MEMBERS(T, Q, E0,E1) \
+ struct { detail::_swizzle<3,T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3,T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3,T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3,T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<3,T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3,T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3,T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3,T, Q, 1,1,1,-1> E1 ## E1 ## E1; };
+
+#define GLM_SWIZZLE2_4_MEMBERS(T, Q, E0,E1) \
+ struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; };
+
+#define GLM_SWIZZLE3_2_MEMBERS(T, Q, E0,E1,E2) \
+ struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \
+ struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \
+ struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \
+ struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \
+ struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \
+ struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \
+ struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \
+ struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \
+ struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; };
+
+#define GLM_SWIZZLE3_3_MEMBERS(T, Q ,E0,E1,E2) \
+ struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; };
+
+#define GLM_SWIZZLE3_4_MEMBERS(T, Q, E0,E1,E2) \
+ struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4,T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; };
+
+#define GLM_SWIZZLE4_2_MEMBERS(T, Q, E0,E1,E2,E3) \
+ struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \
+ struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \
+ struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \
+ struct { detail::_swizzle<2,T, Q, 0,3,-1,-2> E0 ## E3; }; \
+ struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \
+ struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \
+ struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \
+ struct { detail::_swizzle<2,T, Q, 1,3,-1,-2> E1 ## E3; }; \
+ struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \
+ struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \
+ struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; \
+ struct { detail::_swizzle<2,T, Q, 2,3,-1,-2> E2 ## E3; }; \
+ struct { detail::_swizzle<2,T, Q, 3,0,-1,-2> E3 ## E0; }; \
+ struct { detail::_swizzle<2,T, Q, 3,1,-1,-2> E3 ## E1; }; \
+ struct { detail::_swizzle<2,T, Q, 3,2,-1,-2> E3 ## E2; }; \
+ struct { detail::_swizzle<2,T, Q, 3,3,-1,-2> E3 ## E3; };
+
+#define GLM_SWIZZLE4_3_MEMBERS(T, Q, E0,E1,E2,E3) \
+ struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 0,0,3,-1> E0 ## E0 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 0,1,3,-1> E0 ## E1 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 0,2,3,-1> E0 ## E2 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 0,3,0,-1> E0 ## E3 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 0,3,1,-1> E0 ## E3 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 0,3,2,-1> E0 ## E3 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 0,3,3,-1> E0 ## E3 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 1,0,3,-1> E1 ## E0 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 1,1,3,-1> E1 ## E1 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 1,2,3,-1> E1 ## E2 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 1,3,0,-1> E1 ## E3 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 1,3,1,-1> E1 ## E3 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 1,3,2,-1> E1 ## E3 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 1,3,3,-1> E1 ## E3 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 2,0,3,-1> E2 ## E0 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 2,1,3,-1> E2 ## E1 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 2,2,3,-1> E2 ## E2 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 2,3,0,-1> E2 ## E3 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 2,3,1,-1> E2 ## E3 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 2,3,2,-1> E2 ## E3 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 2,3,3,-1> E2 ## E3 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 3,0,0,-1> E3 ## E0 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 3,0,1,-1> E3 ## E0 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 3,0,2,-1> E3 ## E0 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 3,0,3,-1> E3 ## E0 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 3,1,0,-1> E3 ## E1 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 3,1,1,-1> E3 ## E1 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 3,1,2,-1> E3 ## E1 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 3,1,3,-1> E3 ## E1 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 3,2,0,-1> E3 ## E2 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 3,2,1,-1> E3 ## E2 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 3,2,2,-1> E3 ## E2 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 3,2,3,-1> E3 ## E2 ## E3; }; \
+ struct { detail::_swizzle<3, T, Q, 3,3,0,-1> E3 ## E3 ## E0; }; \
+ struct { detail::_swizzle<3, T, Q, 3,3,1,-1> E3 ## E3 ## E1; }; \
+ struct { detail::_swizzle<3, T, Q, 3,3,2,-1> E3 ## E3 ## E2; }; \
+ struct { detail::_swizzle<3, T, Q, 3,3,3,-1> E3 ## E3 ## E3; };
+
+#define GLM_SWIZZLE4_4_MEMBERS(T, Q, E0,E1,E2,E3) \
+ struct { detail::_swizzle<4, T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,0,3> E0 ## E0 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,1,3> E0 ## E0 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,2,3> E0 ## E0 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,3,0> E0 ## E0 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,3,1> E0 ## E0 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,3,2> E0 ## E0 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,0,3,3> E0 ## E0 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,0,3> E0 ## E1 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,1,3> E0 ## E1 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,2,3> E0 ## E1 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,3,0> E0 ## E1 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,3,1> E0 ## E1 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,3,2> E0 ## E1 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,1,3,3> E0 ## E1 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,0,3> E0 ## E2 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,1,3> E0 ## E2 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,2,3> E0 ## E2 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,3,0> E0 ## E2 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,3,1> E0 ## E2 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,3,2> E0 ## E2 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,2,3,3> E0 ## E2 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,0,0> E0 ## E3 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,0,1> E0 ## E3 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,0,2> E0 ## E3 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,0,3> E0 ## E3 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,1,0> E0 ## E3 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,1,1> E0 ## E3 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,1,2> E0 ## E3 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,1,3> E0 ## E3 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,2,0> E0 ## E3 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,2,1> E0 ## E3 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,2,2> E0 ## E3 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,2,3> E0 ## E3 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,3,0> E0 ## E3 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,3,1> E0 ## E3 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,3,2> E0 ## E3 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 0,3,3,3> E0 ## E3 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,0,3> E1 ## E0 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,1,3> E1 ## E0 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,2,3> E1 ## E0 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,3,0> E1 ## E0 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,3,1> E1 ## E0 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,3,2> E1 ## E0 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,0,3,3> E1 ## E0 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,0,3> E1 ## E1 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,1,3> E1 ## E1 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,2,3> E1 ## E1 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,3,0> E1 ## E1 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,3,1> E1 ## E1 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,3,2> E1 ## E1 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,1,3,3> E1 ## E1 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,0,3> E1 ## E2 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,1,3> E1 ## E2 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,2,3> E1 ## E2 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,3,0> E1 ## E2 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,3,1> E1 ## E2 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,3,2> E1 ## E2 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,2,3,3> E1 ## E2 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,0,0> E1 ## E3 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,0,1> E1 ## E3 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,0,2> E1 ## E3 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,0,3> E1 ## E3 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,1,0> E1 ## E3 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,1,1> E1 ## E3 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,1,2> E1 ## E3 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,1,3> E1 ## E3 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,2,0> E1 ## E3 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,2,1> E1 ## E3 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,2,2> E1 ## E3 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,2,3> E1 ## E3 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,3,0> E1 ## E3 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,3,1> E1 ## E3 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,3,2> E1 ## E3 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 1,3,3,3> E1 ## E3 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,0,3> E2 ## E0 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,1,3> E2 ## E0 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,2,3> E2 ## E0 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,3,0> E2 ## E0 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,3,1> E2 ## E0 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,3,2> E2 ## E0 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,0,3,3> E2 ## E0 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,0,3> E2 ## E1 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,1,3> E2 ## E1 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,2,3> E2 ## E1 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,3,0> E2 ## E1 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,3,1> E2 ## E1 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,3,2> E2 ## E1 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,1,3,3> E2 ## E1 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,0,3> E2 ## E2 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,1,3> E2 ## E2 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,2,3> E2 ## E2 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,3,0> E2 ## E2 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,3,1> E2 ## E2 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,3,2> E2 ## E2 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,2,3,3> E2 ## E2 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,0,0> E2 ## E3 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,0,1> E2 ## E3 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,0,2> E2 ## E3 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,0,3> E2 ## E3 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,1,0> E2 ## E3 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,1,1> E2 ## E3 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,1,2> E2 ## E3 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,1,3> E2 ## E3 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,2,0> E2 ## E3 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,2,1> E2 ## E3 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,2,2> E2 ## E3 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,2,3> E2 ## E3 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,3,0> E2 ## E3 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,3,1> E2 ## E3 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,3,2> E2 ## E3 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 2,3,3,3> E2 ## E3 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,0,0> E3 ## E0 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,0,1> E3 ## E0 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,0,2> E3 ## E0 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,0,3> E3 ## E0 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,1,0> E3 ## E0 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,1,1> E3 ## E0 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,1,2> E3 ## E0 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,1,3> E3 ## E0 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,2,0> E3 ## E0 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,2,1> E3 ## E0 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,2,2> E3 ## E0 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,2,3> E3 ## E0 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,3,0> E3 ## E0 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,3,1> E3 ## E0 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,3,2> E3 ## E0 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,0,3,3> E3 ## E0 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,0,0> E3 ## E1 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,0,1> E3 ## E1 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,0,2> E3 ## E1 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,0,3> E3 ## E1 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,1,0> E3 ## E1 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,1,1> E3 ## E1 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,1,2> E3 ## E1 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,1,3> E3 ## E1 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,2,0> E3 ## E1 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,2,1> E3 ## E1 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,2,2> E3 ## E1 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,2,3> E3 ## E1 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,3,0> E3 ## E1 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,3,1> E3 ## E1 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,3,2> E3 ## E1 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,1,3,3> E3 ## E1 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,0,0> E3 ## E2 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,0,1> E3 ## E2 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,0,2> E3 ## E2 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,0,3> E3 ## E2 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,1,0> E3 ## E2 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,1,1> E3 ## E2 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,1,2> E3 ## E2 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,1,3> E3 ## E2 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,2,0> E3 ## E2 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,2,1> E3 ## E2 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,2,2> E3 ## E2 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,2,3> E3 ## E2 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,3,0> E3 ## E2 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,3,1> E3 ## E2 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,3,2> E3 ## E2 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,2,3,3> E3 ## E2 ## E3 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,0,0> E3 ## E3 ## E0 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,0,1> E3 ## E3 ## E0 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,0,2> E3 ## E3 ## E0 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,0,3> E3 ## E3 ## E0 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,1,0> E3 ## E3 ## E1 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,1,1> E3 ## E3 ## E1 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,1,2> E3 ## E3 ## E1 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,1,3> E3 ## E3 ## E1 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,2,0> E3 ## E3 ## E2 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,2,1> E3 ## E3 ## E2 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,2,2> E3 ## E3 ## E2 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,2,3> E3 ## E3 ## E2 ## E3; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,3,0> E3 ## E3 ## E3 ## E0; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,3,1> E3 ## E3 ## E3 ## E1; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,3,2> E3 ## E3 ## E3 ## E2; }; \
+ struct { detail::_swizzle<4, T, Q, 3,3,3,3> E3 ## E3 ## E3 ## E3; };
diff --git a/3rdparty/glm/source/glm/detail/_swizzle_func.hpp b/3rdparty/glm/source/glm/detail/_swizzle_func.hpp
new file mode 100644
index 0000000..d93c6af
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/_swizzle_func.hpp
@@ -0,0 +1,682 @@
+#pragma once
+
+#define GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, CONST, A, B) \
+ vec<2, T, Q> A ## B() CONST \
+ { \
+ return vec<2, T, Q>(this->A, this->B); \
+ }
+
+#define GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, CONST, A, B, C) \
+ vec<3, T, Q> A ## B ## C() CONST \
+ { \
+ return vec<3, T, Q>(this->A, this->B, this->C); \
+ }
+
+#define GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, CONST, A, B, C, D) \
+ vec<4, T, Q> A ## B ## C ## D() CONST \
+ { \
+ return vec<4, T, Q>(this->A, this->B, this->C, this->D); \
+ }
+
+#define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(T, P, L, CONST, A, B) \
+ template<typename T> \
+ vec<L, T, Q> vec<L, T, Q>::A ## B() CONST \
+ { \
+ return vec<2, T, Q>(this->A, this->B); \
+ }
+
+#define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(T, P, L, CONST, A, B, C) \
+ template<typename T> \
+ vec<3, T, Q> vec<L, T, Q>::A ## B ## C() CONST \
+ { \
+ return vec<3, T, Q>(this->A, this->B, this->C); \
+ }
+
+#define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(T, P, L, CONST, A, B, C, D) \
+ template<typename T> \
+ vec<4, T, Q> vec<L, T, Q>::A ## B ## C ## D() CONST \
+ { \
+ return vec<4, T, Q>(this->A, this->B, this->C, this->D); \
+ }
+
+#define GLM_MUTABLE
+
+#define GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, B, A)
+
+#define GLM_SWIZZLE_GEN_REF_FROM_VEC2(T, P) \
+ GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, x, y) \
+ GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, r, g) \
+ GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, s, t)
+
+#define GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B)
+
+#define GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, B, A)
+
+#define GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C)
+
+#define GLM_SWIZZLE_GEN_REF_FROM_VEC3(T, P) \
+ GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, x, y, z) \
+ GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, r, g, b) \
+ GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, s, t, p)
+
+#define GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, D) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, D) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, D) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, C)
+
+#define GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, B)
+
+#define GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, C, A)
+
+#define GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D)
+
+#define GLM_SWIZZLE_GEN_REF_FROM_VEC4(T, P) \
+ GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, x, y, z, w) \
+ GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, r, g, b, a) \
+ GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, s, t, p, q)
+
+#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B)
+
+#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B)
+
+#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B)
+
+#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B)
+
+#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, P) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, x, y) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, r, g) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, s, t)
+
+#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C)
+
+#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C)
+
+#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C)
+
+#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C)
+
+#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, P) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, x, y, z) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, r, g, b) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, s, t, p)
+
+#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, D) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, D) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, D) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, A) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, B) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, C) \
+ GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, D)
+
+#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, A) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, B) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, C) \
+ GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, D)
+
+#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, A) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, B) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, C) \
+ GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, D)
+
+#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+ GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D)
+
+#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, P) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, x, y, z, w) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, r, g, b, a) \
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, s, t, p, q)
+
diff --git a/3rdparty/glm/source/glm/detail/_vectorize.hpp b/3rdparty/glm/source/glm/detail/_vectorize.hpp
new file mode 100644
index 0000000..1fcaec3
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/_vectorize.hpp
@@ -0,0 +1,162 @@
+#pragma once
+
+namespace glm{
+namespace detail
+{
+ template<template<length_t L, typename T, qualifier Q> class vec, length_t L, typename R, typename T, qualifier Q>
+ struct functor1{};
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename R, typename T, qualifier Q>
+ struct functor1<vec, 1, R, T, Q>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, R, Q> call(R (*Func) (T x), vec<1, T, Q> const& v)
+ {
+ return vec<1, R, Q>(Func(v.x));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename R, typename T, qualifier Q>
+ struct functor1<vec, 2, R, T, Q>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, R, Q> call(R (*Func) (T x), vec<2, T, Q> const& v)
+ {
+ return vec<2, R, Q>(Func(v.x), Func(v.y));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename R, typename T, qualifier Q>
+ struct functor1<vec, 3, R, T, Q>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, R, Q> call(R (*Func) (T x), vec<3, T, Q> const& v)
+ {
+ return vec<3, R, Q>(Func(v.x), Func(v.y), Func(v.z));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename R, typename T, qualifier Q>
+ struct functor1<vec, 4, R, T, Q>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, R, Q> call(R (*Func) (T x), vec<4, T, Q> const& v)
+ {
+ return vec<4, R, Q>(Func(v.x), Func(v.y), Func(v.z), Func(v.w));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, length_t L, typename T, qualifier Q>
+ struct functor2{};
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+ struct functor2<vec, 1, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, vec<1, T, Q> const& b)
+ {
+ return vec<1, T, Q>(Func(a.x, b.x));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+ struct functor2<vec, 2, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, vec<2, T, Q> const& b)
+ {
+ return vec<2, T, Q>(Func(a.x, b.x), Func(a.y, b.y));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+ struct functor2<vec, 3, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, vec<3, T, Q> const& b)
+ {
+ return vec<3, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+ struct functor2<vec, 4, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, length_t L, typename T, qualifier Q>
+ struct functor2_vec_sca{};
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+ struct functor2_vec_sca<vec, 1, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, T b)
+ {
+ return vec<1, T, Q>(Func(a.x, b));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+ struct functor2_vec_sca<vec, 2, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, T b)
+ {
+ return vec<2, T, Q>(Func(a.x, b), Func(a.y, b));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+ struct functor2_vec_sca<vec, 3, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, T b)
+ {
+ return vec<3, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b));
+ }
+ };
+
+ template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+ struct functor2_vec_sca<vec, 4, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, T b)
+ {
+ return vec<4, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b), Func(a.w, b));
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q>
+ struct functor2_vec_int {};
+
+ template<typename T, qualifier Q>
+ struct functor2_vec_int<1, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<1, int, Q> call(int (*Func) (T x, int y), vec<1, T, Q> const& a, vec<1, int, Q> const& b)
+ {
+ return vec<1, int, Q>(Func(a.x, b.x));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct functor2_vec_int<2, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<2, int, Q> call(int (*Func) (T x, int y), vec<2, T, Q> const& a, vec<2, int, Q> const& b)
+ {
+ return vec<2, int, Q>(Func(a.x, b.x), Func(a.y, b.y));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct functor2_vec_int<3, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<3, int, Q> call(int (*Func) (T x, int y), vec<3, T, Q> const& a, vec<3, int, Q> const& b)
+ {
+ return vec<3, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct functor2_vec_int<4, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, int, Q> call(int (*Func) (T x, int y), vec<4, T, Q> const& a, vec<4, int, Q> const& b)
+ {
+ return vec<4, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w));
+ }
+ };
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/compute_common.hpp b/3rdparty/glm/source/glm/detail/compute_common.hpp
new file mode 100644
index 0000000..7267a49
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/compute_common.hpp
@@ -0,0 +1,50 @@
+#pragma once
+
+#include "setup.hpp"
+#include <limits>
+
+namespace glm{
+namespace detail
+{
+ template<typename genFIType, bool /*signed*/>
+ struct compute_abs
+ {};
+
+ template<typename genFIType>
+ struct compute_abs<genFIType, true>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x)
+ {
+ GLM_STATIC_ASSERT(
+ std::numeric_limits<genFIType>::is_iec559 || std::numeric_limits<genFIType>::is_signed,
+ "'abs' only accept floating-point and integer scalar or vector inputs");
+
+ return x >= genFIType(0) ? x : -x;
+ // TODO, perf comp with: *(((int *) &x) + 1) &= 0x7fffffff;
+ }
+ };
+
+#if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)
+ template<>
+ struct compute_abs<float, true>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static float call(float x)
+ {
+ return fabsf(x);
+ }
+ };
+#endif
+
+ template<typename genFIType>
+ struct compute_abs<genFIType, false>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x)
+ {
+ GLM_STATIC_ASSERT(
+ (!std::numeric_limits<genFIType>::is_signed && std::numeric_limits<genFIType>::is_integer),
+ "'abs' only accept floating-point and integer scalar or vector inputs");
+ return x;
+ }
+ };
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/compute_vector_relational.hpp b/3rdparty/glm/source/glm/detail/compute_vector_relational.hpp
new file mode 100644
index 0000000..167b634
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/compute_vector_relational.hpp
@@ -0,0 +1,30 @@
+#pragma once
+
+//#include "compute_common.hpp"
+#include "setup.hpp"
+#include <limits>
+
+namespace glm{
+namespace detail
+{
+ template <typename T, bool isFloat>
+ struct compute_equal
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b)
+ {
+ return a == b;
+ }
+ };
+/*
+ template <typename T>
+ struct compute_equal<T, true>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b)
+ {
+ return detail::compute_abs<T, std::numeric_limits<T>::is_signed>::call(b - a) <= static_cast<T>(0);
+ //return std::memcmp(&a, &b, sizeof(T)) == 0;
+ }
+ };
+*/
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/func_common.inl b/3rdparty/glm/source/glm/detail/func_common.inl
new file mode 100644
index 0000000..86560be
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_common.inl
@@ -0,0 +1,792 @@
+/// @ref core
+/// @file glm/detail/func_common.inl
+
+#include "../vector_relational.hpp"
+#include "compute_common.hpp"
+#include "type_vec1.hpp"
+#include "type_vec2.hpp"
+#include "type_vec3.hpp"
+#include "type_vec4.hpp"
+#include "_vectorize.hpp"
+#include <limits>
+
+namespace glm
+{
+ // min
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType min(genType x, genType y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559 || std::numeric_limits<genType>::is_integer, "'min' only accept floating-point or integer inputs");
+ return (y < x) ? y : x;
+ }
+
+ // max
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType max(genType x, genType y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559 || std::numeric_limits<genType>::is_integer, "'max' only accept floating-point or integer inputs");
+
+ return (x < y) ? y : x;
+ }
+
+ // abs
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR int abs(int x)
+ {
+ int const y = x >> (sizeof(int) * 8 - 1);
+ return (x ^ y) - y;
+ }
+
+ // round
+# if GLM_HAS_CXX11_STL
+ using ::std::round;
+# else
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType round(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'round' only accept floating-point inputs");
+
+ return x < static_cast<genType>(0) ? static_cast<genType>(int(x - static_cast<genType>(0.5))) : static_cast<genType>(int(x + static_cast<genType>(0.5)));
+ }
+# endif
+
+ // trunc
+# if GLM_HAS_CXX11_STL
+ using ::std::trunc;
+# else
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType trunc(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'trunc' only accept floating-point inputs");
+
+ return x < static_cast<genType>(0) ? -std::floor(-x) : std::floor(x);
+ }
+# endif
+
+}//namespace glm
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_abs_vector
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(abs, x);
+ }
+ };
+
+ template<length_t L, typename T, typename U, qualifier Q, bool Aligned>
+ struct compute_mix_vector
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, U, Q> const& a)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<U>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a");
+
+ return vec<L, T, Q>(vec<L, U, Q>(x) * (static_cast<U>(1) - a) + vec<L, U, Q>(y) * a);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_mix_vector<L, T, bool, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, bool, Q> const& a)
+ {
+ vec<L, T, Q> Result;
+ for(length_t i = 0; i < x.length(); ++i)
+ Result[i] = a[i] ? y[i] : x[i];
+ return Result;
+ }
+ };
+
+ template<length_t L, typename T, typename U, qualifier Q, bool Aligned>
+ struct compute_mix_scalar
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, vec<L, T, Q> const& y, U const& a)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<U>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a");
+
+ return vec<L, T, Q>(vec<L, U, Q>(x) * (static_cast<U>(1) - a) + vec<L, U, Q>(y) * a);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_mix_scalar<L, T, bool, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, vec<L, T, Q> const& y, bool const& a)
+ {
+ return a ? y : x;
+ }
+ };
+
+ template<typename T, typename U>
+ struct compute_mix
+ {
+ GLM_FUNC_QUALIFIER static T call(T const& x, T const& y, U const& a)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<U>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a");
+
+ return static_cast<T>(static_cast<U>(x) * (static_cast<U>(1) - a) + static_cast<U>(y) * a);
+ }
+ };
+
+ template<typename T>
+ struct compute_mix<T, bool>
+ {
+ GLM_FUNC_QUALIFIER static T call(T const& x, T const& y, bool const& a)
+ {
+ return a ? y : x;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool isFloat, bool Aligned>
+ struct compute_sign
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return vec<L, T, Q>(glm::lessThan(vec<L, T, Q>(0), x)) - vec<L, T, Q>(glm::lessThan(x, vec<L, T, Q>(0)));
+ }
+ };
+
+# if GLM_ARCH == GLM_ARCH_X86
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_sign<L, T, Q, false, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ T const Shift(static_cast<T>(sizeof(T) * 8 - 1));
+ vec<L, T, Q> const y(vec<L, typename detail::make_unsigned<T>::type, Q>(-x) >> typename detail::make_unsigned<T>::type(Shift));
+
+ return (x >> Shift) | y;
+ }
+ };
+# endif
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_floor
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(std::floor, x);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_ceil
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(std::ceil, x);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_fract
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return x - floor(x);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_trunc
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(trunc, x);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_round
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(round, x);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_mod
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& a, vec<L, T, Q> const& b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'mod' only accept floating-point inputs. Include <glm/gtc/integer.hpp> for integer inputs.");
+ return a - b * floor(a / b);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_min_vector
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ return detail::functor2<vec, L, T, Q>::call(min, x, y);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_max_vector
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ return detail::functor2<vec, L, T, Q>::call(max, x, y);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_clamp_vector
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, vec<L, T, Q> const& minVal, vec<L, T, Q> const& maxVal)
+ {
+ return min(max(x, minVal), maxVal);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_step_vector
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& edge, vec<L, T, Q> const& x)
+ {
+ return mix(vec<L, T, Q>(1), vec<L, T, Q>(0), glm::lessThan(x, edge));
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_smoothstep_vector
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& edge0, vec<L, T, Q> const& edge1, vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs");
+ vec<L, T, Q> const tmp(clamp((x - edge0) / (edge1 - edge0), static_cast<T>(0), static_cast<T>(1)));
+ return tmp * tmp * (static_cast<T>(3) - static_cast<T>(2) * tmp);
+ }
+ };
+}//namespace detail
+
+ template<typename genFIType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genFIType abs(genFIType x)
+ {
+ return detail::compute_abs<genFIType, std::numeric_limits<genFIType>::is_signed>::call(x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> abs(vec<L, T, Q> const& x)
+ {
+ return detail::compute_abs_vector<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ // sign
+ // fast and works for any type
+ template<typename genFIType>
+ GLM_FUNC_QUALIFIER genFIType sign(genFIType x)
+ {
+ GLM_STATIC_ASSERT(
+ std::numeric_limits<genFIType>::is_iec559 || (std::numeric_limits<genFIType>::is_signed && std::numeric_limits<genFIType>::is_integer),
+ "'sign' only accept signed inputs");
+
+ return detail::compute_sign<1, genFIType, defaultp,
+ std::numeric_limits<genFIType>::is_iec559, detail::is_aligned<highp>::value>::call(vec<1, genFIType>(x)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> sign(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(
+ std::numeric_limits<T>::is_iec559 || (std::numeric_limits<T>::is_signed && std::numeric_limits<T>::is_integer),
+ "'sign' only accept signed inputs");
+
+ return detail::compute_sign<L, T, Q, std::numeric_limits<T>::is_iec559, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ // floor
+ using ::std::floor;
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> floor(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'floor' only accept floating-point inputs.");
+ return detail::compute_floor<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> trunc(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'trunc' only accept floating-point inputs");
+ return detail::compute_trunc<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> round(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'round' only accept floating-point inputs");
+ return detail::compute_round<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+/*
+ // roundEven
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType roundEven(genType const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'roundEven' only accept floating-point inputs");
+
+ return genType(int(x + genType(int(x) % 2)));
+ }
+*/
+
+ // roundEven
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType roundEven(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'roundEven' only accept floating-point inputs");
+
+ int Integer = static_cast<int>(x);
+ genType IntegerPart = static_cast<genType>(Integer);
+ genType FractionalPart = fract(x);
+
+ if(FractionalPart > static_cast<genType>(0.5) || FractionalPart < static_cast<genType>(0.5))
+ {
+ return round(x);
+ }
+ else if((Integer % 2) == 0)
+ {
+ return IntegerPart;
+ }
+ else if(x <= static_cast<genType>(0)) // Work around...
+ {
+ return IntegerPart - static_cast<genType>(1);
+ }
+ else
+ {
+ return IntegerPart + static_cast<genType>(1);
+ }
+ //else // Bug on MinGW 4.5.2
+ //{
+ // return mix(IntegerPart + genType(-1), IntegerPart + genType(1), x <= genType(0));
+ //}
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> roundEven(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'roundEven' only accept floating-point inputs");
+ return detail::functor1<vec, L, T, T, Q>::call(roundEven, x);
+ }
+
+ // ceil
+ using ::std::ceil;
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> ceil(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'ceil' only accept floating-point inputs");
+ return detail::compute_ceil<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ // fract
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fract(genType x)
+ {
+ return fract(vec<1, genType>(x)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fract(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fract' only accept floating-point inputs");
+ return detail::compute_fract<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ // mod
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType mod(genType x, genType y)
+ {
+# if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)
+ // Another Cuda compiler bug https://github.com/g-truc/glm/issues/530
+ vec<1, genType, defaultp> Result(mod(vec<1, genType, defaultp>(x), y));
+ return Result.x;
+# else
+ return mod(vec<1, genType, defaultp>(x), y).x;
+# endif
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> mod(vec<L, T, Q> const& x, T y)
+ {
+ return detail::compute_mod<L, T, Q, detail::is_aligned<Q>::value>::call(x, vec<L, T, Q>(y));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> mod(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ return detail::compute_mod<L, T, Q, detail::is_aligned<Q>::value>::call(x, y);
+ }
+
+ // modf
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType modf(genType x, genType & i)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'modf' only accept floating-point inputs");
+ return std::modf(x, &i);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<1, T, Q> modf(vec<1, T, Q> const& x, vec<1, T, Q> & i)
+ {
+ return vec<1, T, Q>(
+ modf(x.x, i.x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, T, Q> modf(vec<2, T, Q> const& x, vec<2, T, Q> & i)
+ {
+ return vec<2, T, Q>(
+ modf(x.x, i.x),
+ modf(x.y, i.y));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> modf(vec<3, T, Q> const& x, vec<3, T, Q> & i)
+ {
+ return vec<3, T, Q>(
+ modf(x.x, i.x),
+ modf(x.y, i.y),
+ modf(x.z, i.z));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> modf(vec<4, T, Q> const& x, vec<4, T, Q> & i)
+ {
+ return vec<4, T, Q>(
+ modf(x.x, i.x),
+ modf(x.y, i.y),
+ modf(x.z, i.z),
+ modf(x.w, i.w));
+ }
+
+ //// Only valid if (INT_MIN <= x-y <= INT_MAX)
+ //// min(x,y)
+ //r = y + ((x - y) & ((x - y) >> (sizeof(int) *
+ //CHAR_BIT - 1)));
+ //// max(x,y)
+ //r = x - ((x - y) & ((x - y) >> (sizeof(int) *
+ //CHAR_BIT - 1)));
+
+ // min
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& a, T b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || std::numeric_limits<T>::is_integer, "'min' only accept floating-point or integer inputs");
+ return detail::compute_min_vector<L, T, Q, detail::is_aligned<Q>::value>::call(a, vec<L, T, Q>(b));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& a, vec<L, T, Q> const& b)
+ {
+ return detail::compute_min_vector<L, T, Q, detail::is_aligned<Q>::value>::call(a, b);
+ }
+
+ // max
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& a, T b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || std::numeric_limits<T>::is_integer, "'max' only accept floating-point or integer inputs");
+ return detail::compute_max_vector<L, T, Q, detail::is_aligned<Q>::value>::call(a, vec<L, T, Q>(b));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& a, vec<L, T, Q> const& b)
+ {
+ return detail::compute_max_vector<L, T, Q, detail::is_aligned<Q>::value>::call(a, b);
+ }
+
+ // clamp
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559 || std::numeric_limits<genType>::is_integer, "'clamp' only accept floating-point or integer inputs");
+ return min(max(x, minVal), maxVal);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> clamp(vec<L, T, Q> const& x, T minVal, T maxVal)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || std::numeric_limits<T>::is_integer, "'clamp' only accept floating-point or integer inputs");
+ return detail::compute_clamp_vector<L, T, Q, detail::is_aligned<Q>::value>::call(x, vec<L, T, Q>(minVal), vec<L, T, Q>(maxVal));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> clamp(vec<L, T, Q> const& x, vec<L, T, Q> const& minVal, vec<L, T, Q> const& maxVal)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || std::numeric_limits<T>::is_integer, "'clamp' only accept floating-point or integer inputs");
+ return detail::compute_clamp_vector<L, T, Q, detail::is_aligned<Q>::value>::call(x, minVal, maxVal);
+ }
+
+ template<typename genTypeT, typename genTypeU>
+ GLM_FUNC_QUALIFIER genTypeT mix(genTypeT x, genTypeT y, genTypeU a)
+ {
+ return detail::compute_mix<genTypeT, genTypeU>::call(x, y, a);
+ }
+
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> mix(vec<L, T, Q> const& x, vec<L, T, Q> const& y, U a)
+ {
+ return detail::compute_mix_scalar<L, T, U, Q, detail::is_aligned<Q>::value>::call(x, y, a);
+ }
+
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> mix(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, U, Q> const& a)
+ {
+ return detail::compute_mix_vector<L, T, U, Q, detail::is_aligned<Q>::value>::call(x, y, a);
+ }
+
+ // step
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType step(genType edge, genType x)
+ {
+ return mix(static_cast<genType>(1), static_cast<genType>(0), x < edge);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> step(T edge, vec<L, T, Q> const& x)
+ {
+ return detail::compute_step_vector<L, T, Q, detail::is_aligned<Q>::value>::call(vec<L, T, Q>(edge), x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> step(vec<L, T, Q> const& edge, vec<L, T, Q> const& x)
+ {
+ return detail::compute_step_vector<L, T, Q, detail::is_aligned<Q>::value>::call(edge, x);
+ }
+
+ // smoothstep
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType smoothstep(genType edge0, genType edge1, genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs");
+
+ genType const tmp(clamp((x - edge0) / (edge1 - edge0), genType(0), genType(1)));
+ return tmp * tmp * (genType(3) - genType(2) * tmp);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> smoothstep(T edge0, T edge1, vec<L, T, Q> const& x)
+ {
+ return detail::compute_smoothstep_vector<L, T, Q, detail::is_aligned<Q>::value>::call(vec<L, T, Q>(edge0), vec<L, T, Q>(edge1), x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> smoothstep(vec<L, T, Q> const& edge0, vec<L, T, Q> const& edge1, vec<L, T, Q> const& x)
+ {
+ return detail::compute_smoothstep_vector<L, T, Q, detail::is_aligned<Q>::value>::call(edge0, edge1, x);
+ }
+
+# if GLM_HAS_CXX11_STL
+ using std::isnan;
+# else
+ template<typename genType>
+ GLM_FUNC_QUALIFIER bool isnan(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'isnan' only accept floating-point inputs");
+
+# if GLM_HAS_CXX11_STL
+ return std::isnan(x);
+# elif GLM_COMPILER & GLM_COMPILER_VC
+ return _isnan(x) != 0;
+# elif GLM_COMPILER & GLM_COMPILER_INTEL
+# if GLM_PLATFORM & GLM_PLATFORM_WINDOWS
+ return _isnan(x) != 0;
+# else
+ return ::isnan(x) != 0;
+# endif
+# elif (GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG)) && (GLM_PLATFORM & GLM_PLATFORM_ANDROID) && __cplusplus < 201103L
+ return _isnan(x) != 0;
+# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)
+ return ::isnan(x) != 0;
+# else
+ return std::isnan(x);
+# endif
+ }
+# endif
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> isnan(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isnan' only accept floating-point inputs");
+
+ vec<L, bool, Q> Result;
+ for (length_t l = 0; l < v.length(); ++l)
+ Result[l] = glm::isnan(v[l]);
+ return Result;
+ }
+
+# if GLM_HAS_CXX11_STL
+ using std::isinf;
+# else
+ template<typename genType>
+ GLM_FUNC_QUALIFIER bool isinf(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'isinf' only accept floating-point inputs");
+
+# if GLM_HAS_CXX11_STL
+ return std::isinf(x);
+# elif GLM_COMPILER & (GLM_COMPILER_INTEL | GLM_COMPILER_VC)
+# if(GLM_PLATFORM & GLM_PLATFORM_WINDOWS)
+ return _fpclass(x) == _FPCLASS_NINF || _fpclass(x) == _FPCLASS_PINF;
+# else
+ return ::isinf(x);
+# endif
+# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG)
+# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID && __cplusplus < 201103L)
+ return _isinf(x) != 0;
+# else
+ return std::isinf(x);
+# endif
+# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)
+ // http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/group__CUDA__MATH__DOUBLE_g13431dd2b40b51f9139cbb7f50c18fab.html#g13431dd2b40b51f9139cbb7f50c18fab
+ return ::isinf(double(x)) != 0;
+# else
+ return std::isinf(x);
+# endif
+ }
+# endif
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> isinf(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isinf' only accept floating-point inputs");
+
+ vec<L, bool, Q> Result;
+ for (length_t l = 0; l < v.length(); ++l)
+ Result[l] = glm::isinf(v[l]);
+ return Result;
+ }
+
+ GLM_FUNC_QUALIFIER int floatBitsToInt(float const& v)
+ {
+ union
+ {
+ float in;
+ int out;
+ } u;
+
+ u.in = v;
+
+ return u.out;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int, Q> floatBitsToInt(vec<L, float, Q> const& v)
+ {
+ return reinterpret_cast<vec<L, int, Q>&>(const_cast<vec<L, float, Q>&>(v));
+ }
+
+ GLM_FUNC_QUALIFIER uint floatBitsToUint(float const& v)
+ {
+ union
+ {
+ float in;
+ uint out;
+ } u;
+
+ u.in = v;
+
+ return u.out;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, uint, Q> floatBitsToUint(vec<L, float, Q> const& v)
+ {
+ return reinterpret_cast<vec<L, uint, Q>&>(const_cast<vec<L, float, Q>&>(v));
+ }
+
+ GLM_FUNC_QUALIFIER float intBitsToFloat(int const& v)
+ {
+ union
+ {
+ int in;
+ float out;
+ } u;
+
+ u.in = v;
+
+ return u.out;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, float, Q> intBitsToFloat(vec<L, int, Q> const& v)
+ {
+ return reinterpret_cast<vec<L, float, Q>&>(const_cast<vec<L, int, Q>&>(v));
+ }
+
+ GLM_FUNC_QUALIFIER float uintBitsToFloat(uint const& v)
+ {
+ union
+ {
+ uint in;
+ float out;
+ } u;
+
+ u.in = v;
+
+ return u.out;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, float, Q> uintBitsToFloat(vec<L, uint, Q> const& v)
+ {
+ return reinterpret_cast<vec<L, float, Q>&>(const_cast<vec<L, uint, Q>&>(v));
+ }
+
+# if GLM_HAS_CXX11_STL
+ using std::fma;
+# else
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fma(genType const& a, genType const& b, genType const& c)
+ {
+ return a * b + c;
+ }
+# endif
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType frexp(genType x, int& exp)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'frexp' only accept floating-point inputs");
+
+ return std::frexp(x, &exp);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> frexp(vec<L, T, Q> const& v, vec<L, int, Q>& exp)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'frexp' only accept floating-point inputs");
+
+ vec<L, T, Q> Result;
+ for (length_t l = 0; l < v.length(); ++l)
+ Result[l] = std::frexp(v[l], &exp[l]);
+ return Result;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType ldexp(genType const& x, int const& exp)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'ldexp' only accept floating-point inputs");
+
+ return std::ldexp(x, exp);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> ldexp(vec<L, T, Q> const& v, vec<L, int, Q> const& exp)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'ldexp' only accept floating-point inputs");
+
+ vec<L, T, Q> Result;
+ for (length_t l = 0; l < v.length(); ++l)
+ Result[l] = std::ldexp(v[l], exp[l]);
+ return Result;
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "func_common_simd.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/func_common_simd.inl b/3rdparty/glm/source/glm/detail/func_common_simd.inl
new file mode 100644
index 0000000..ce0032d
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_common_simd.inl
@@ -0,0 +1,231 @@
+/// @ref core
+/// @file glm/detail/func_common_simd.inl
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#include "../simd/common.h"
+
+#include <immintrin.h>
+
+namespace glm{
+namespace detail
+{
+ template<qualifier Q>
+ struct compute_abs_vector<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v)
+ {
+ vec<4, float, Q> result;
+ result.data = glm_vec4_abs(v.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_abs_vector<4, int, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v)
+ {
+ vec<4, int, Q> result;
+ result.data = glm_ivec4_abs(v.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_floor<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v)
+ {
+ vec<4, float, Q> result;
+ result.data = glm_vec4_floor(v.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_ceil<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v)
+ {
+ vec<4, float, Q> result;
+ result.data = glm_vec4_ceil(v.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_fract<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v)
+ {
+ vec<4, float, Q> result;
+ result.data = glm_vec4_fract(v.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_round<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v)
+ {
+ vec<4, float, Q> result;
+ result.data = glm_vec4_round(v.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_mod<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y)
+ {
+ vec<4, float, Q> result;
+ result.data = glm_vec4_mod(x.data, y.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_min_vector<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
+ {
+ vec<4, float, Q> result;
+ result.data = _mm_min_ps(v1.data, v2.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_min_vector<4, int, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
+ {
+ vec<4, int, Q> result;
+ result.data = _mm_min_epi32(v1.data, v2.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_min_vector<4, uint, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2)
+ {
+ vec<4, uint, Q> result;
+ result.data = _mm_min_epu32(v1.data, v2.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_max_vector<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
+ {
+ vec<4, float, Q> result;
+ result.data = _mm_max_ps(v1.data, v2.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_max_vector<4, int, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
+ {
+ vec<4, int, Q> result;
+ result.data = _mm_max_epi32(v1.data, v2.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_max_vector<4, uint, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2)
+ {
+ vec<4, uint, Q> result;
+ result.data = _mm_max_epu32(v1.data, v2.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_clamp_vector<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& minVal, vec<4, float, Q> const& maxVal)
+ {
+ vec<4, float, Q> result;
+ result.data = _mm_min_ps(_mm_max_ps(x.data, minVal.data), maxVal.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_clamp_vector<4, int, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& x, vec<4, int, Q> const& minVal, vec<4, int, Q> const& maxVal)
+ {
+ vec<4, int, Q> result;
+ result.data = _mm_min_epi32(_mm_max_epi32(x.data, minVal.data), maxVal.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_clamp_vector<4, uint, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& x, vec<4, uint, Q> const& minVal, vec<4, uint, Q> const& maxVal)
+ {
+ vec<4, uint, Q> result;
+ result.data = _mm_min_epu32(_mm_max_epu32(x.data, minVal.data), maxVal.data);
+ return result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_mix_vector<4, float, bool, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y, vec<4, bool, Q> const& a)
+ {
+ __m128i const Load = _mm_set_epi32(-static_cast<int>(a.w), -static_cast<int>(a.z), -static_cast<int>(a.y), -static_cast<int>(a.x));
+ __m128 const Mask = _mm_castsi128_ps(Load);
+
+ vec<4, float, Q> Result;
+# if 0 && GLM_ARCH & GLM_ARCH_AVX
+ Result.data = _mm_blendv_ps(x.data, y.data, Mask);
+# else
+ Result.data = _mm_or_ps(_mm_and_ps(Mask, y.data), _mm_andnot_ps(Mask, x.data));
+# endif
+ return Result;
+ }
+ };
+/* FIXME
+ template<qualifier Q>
+ struct compute_step_vector<float, Q, tvec4>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge, vec<4, float, Q> const& x)
+ {
+ vec<4, float, Q> Result;
+ result.data = glm_vec4_step(edge.data, x.data);
+ return result;
+ }
+ };
+*/
+ template<qualifier Q>
+ struct compute_smoothstep_vector<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge0, vec<4, float, Q> const& edge1, vec<4, float, Q> const& x)
+ {
+ vec<4, float, Q> Result;
+ Result.data = glm_vec4_smoothstep(edge0.data, edge1.data, x.data);
+ return Result;
+ }
+ };
+}//namespace detail
+}//namespace glm
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/detail/func_exponential.inl b/3rdparty/glm/source/glm/detail/func_exponential.inl
new file mode 100644
index 0000000..2040d41
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_exponential.inl
@@ -0,0 +1,152 @@
+/// @ref core
+/// @file glm/detail/func_exponential.inl
+
+#include "../vector_relational.hpp"
+#include "_vectorize.hpp"
+#include <limits>
+#include <cmath>
+#include <cassert>
+
+namespace glm{
+namespace detail
+{
+# if GLM_HAS_CXX11_STL
+ using std::log2;
+# else
+ template<typename genType>
+ genType log2(genType Value)
+ {
+ return std::log(Value) * static_cast<genType>(1.4426950408889634073599246810019);
+ }
+# endif
+
+ template<length_t L, typename T, qualifier Q, bool isFloat, bool Aligned>
+ struct compute_log2
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'log2' only accept floating-point inputs. Include <glm/gtc/integer.hpp> for integer inputs.");
+
+ return detail::functor1<vec, L, T, T, Q>::call(log2, v);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_sqrt
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(std::sqrt, x);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_inversesqrt
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ return static_cast<T>(1) / sqrt(x);
+ }
+ };
+
+ template<length_t L, bool Aligned>
+ struct compute_inversesqrt<L, float, lowp, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, float, lowp> call(vec<L, float, lowp> const& x)
+ {
+ vec<L, float, lowp> tmp(x);
+ vec<L, float, lowp> xhalf(tmp * 0.5f);
+ vec<L, uint, lowp>* p = reinterpret_cast<vec<L, uint, lowp>*>(const_cast<vec<L, float, lowp>*>(&x));
+ vec<L, uint, lowp> i = vec<L, uint, lowp>(0x5f375a86) - (*p >> vec<L, uint, lowp>(1));
+ vec<L, float, lowp>* ptmp = reinterpret_cast<vec<L, float, lowp>*>(&i);
+ tmp = *ptmp;
+ tmp = tmp * (1.5f - xhalf * tmp * tmp);
+ return tmp;
+ }
+ };
+}//namespace detail
+
+ // pow
+ using std::pow;
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> pow(vec<L, T, Q> const& base, vec<L, T, Q> const& exponent)
+ {
+ return detail::functor2<vec, L, T, Q>::call(pow, base, exponent);
+ }
+
+ // exp
+ using std::exp;
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> exp(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(exp, x);
+ }
+
+ // log
+ using std::log;
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> log(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(log, x);
+ }
+
+# if GLM_HAS_CXX11_STL
+ using std::exp2;
+# else
+ //exp2, ln2 = 0.69314718055994530941723212145818f
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType exp2(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'exp2' only accept floating-point inputs");
+
+ return std::exp(static_cast<genType>(0.69314718055994530941723212145818) * x);
+ }
+# endif
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> exp2(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(exp2, x);
+ }
+
+ // log2, ln2 = 0.69314718055994530941723212145818f
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType log2(genType x)
+ {
+ return log2(vec<1, genType>(x)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> log2(vec<L, T, Q> const& x)
+ {
+ return detail::compute_log2<L, T, Q, std::numeric_limits<T>::is_iec559, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ // sqrt
+ using std::sqrt;
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> sqrt(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'sqrt' only accept floating-point inputs");
+ return detail::compute_sqrt<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ // inversesqrt
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType inversesqrt(genType x)
+ {
+ return static_cast<genType>(1) / sqrt(x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> inversesqrt(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'inversesqrt' only accept floating-point inputs");
+ return detail::compute_inversesqrt<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "func_exponential_simd.inl"
+#endif
+
diff --git a/3rdparty/glm/source/glm/detail/func_exponential_simd.inl b/3rdparty/glm/source/glm/detail/func_exponential_simd.inl
new file mode 100644
index 0000000..fb78951
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_exponential_simd.inl
@@ -0,0 +1,37 @@
+/// @ref core
+/// @file glm/detail/func_exponential_simd.inl
+
+#include "../simd/exponential.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+namespace glm{
+namespace detail
+{
+ template<qualifier Q>
+ struct compute_sqrt<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_sqrt_ps(v.data);
+ return Result;
+ }
+ };
+
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+ template<>
+ struct compute_sqrt<4, float, aligned_lowp, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& v)
+ {
+ vec<4, float, aligned_lowp> Result;
+ Result.data = glm_vec4_sqrt_lowp(v.data);
+ return Result;
+ }
+ };
+# endif
+}//namespace detail
+}//namespace glm
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/detail/func_geometric.inl b/3rdparty/glm/source/glm/detail/func_geometric.inl
new file mode 100644
index 0000000..404c990
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_geometric.inl
@@ -0,0 +1,243 @@
+#include "../exponential.hpp"
+#include "../common.hpp"
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_length
+ {
+ GLM_FUNC_QUALIFIER static T call(vec<L, T, Q> const& v)
+ {
+ return sqrt(dot(v, v));
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_distance
+ {
+ GLM_FUNC_QUALIFIER static T call(vec<L, T, Q> const& p0, vec<L, T, Q> const& p1)
+ {
+ return length(p1 - p0);
+ }
+ };
+
+ template<typename V, typename T, bool Aligned>
+ struct compute_dot{};
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_dot<vec<1, T, Q>, T, Aligned>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<1, T, Q> const& a, vec<1, T, Q> const& b)
+ {
+ return a.x * b.x;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_dot<vec<2, T, Q>, T, Aligned>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<2, T, Q> const& a, vec<2, T, Q> const& b)
+ {
+ vec<2, T, Q> tmp(a * b);
+ return tmp.x + tmp.y;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_dot<vec<3, T, Q>, T, Aligned>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<3, T, Q> const& a, vec<3, T, Q> const& b)
+ {
+ vec<3, T, Q> tmp(a * b);
+ return tmp.x + tmp.y + tmp.z;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_dot<vec<4, T, Q>, T, Aligned>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> tmp(a * b);
+ return (tmp.x + tmp.y) + (tmp.z + tmp.w);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_cross
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, T, Q> call(vec<3, T, Q> const& x, vec<3, T, Q> const& y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'cross' accepts only floating-point inputs");
+
+ return vec<3, T, Q>(
+ x.y * y.z - y.y * x.z,
+ x.z * y.x - y.z * x.x,
+ x.x * y.y - y.x * x.y);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_normalize
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'normalize' accepts only floating-point inputs");
+
+ return v * inversesqrt(dot(v, v));
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_faceforward
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& N, vec<L, T, Q> const& I, vec<L, T, Q> const& Nref)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'normalize' accepts only floating-point inputs");
+
+ return dot(Nref, I) < static_cast<T>(0) ? N : -N;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_reflect
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& I, vec<L, T, Q> const& N)
+ {
+ return I - N * dot(N, I) * static_cast<T>(2);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_refract
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& I, vec<L, T, Q> const& N, T eta)
+ {
+ T const dotValue(dot(N, I));
+ T const k(static_cast<T>(1) - eta * eta * (static_cast<T>(1) - dotValue * dotValue));
+ vec<L, T, Q> const Result =
+ (k >= static_cast<T>(0)) ? (eta * I - (eta * dotValue + std::sqrt(k)) * N) : vec<L, T, Q>(0);
+ return Result;
+ }
+ };
+}//namespace detail
+
+ // length
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType length(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'length' accepts only floating-point inputs");
+
+ return abs(x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T length(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'length' accepts only floating-point inputs");
+
+ return detail::compute_length<L, T, Q, detail::is_aligned<Q>::value>::call(v);
+ }
+
+ // distance
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType distance(genType const& p0, genType const& p1)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'distance' accepts only floating-point inputs");
+
+ return length(p1 - p0);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T distance(vec<L, T, Q> const& p0, vec<L, T, Q> const& p1)
+ {
+ return detail::compute_distance<L, T, Q, detail::is_aligned<Q>::value>::call(p0, p1);
+ }
+
+ // dot
+ template<typename T>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T dot(T x, T y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'dot' accepts only floating-point inputs");
+ return x * y;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T dot(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'dot' accepts only floating-point inputs");
+ return detail::compute_dot<vec<L, T, Q>, T, detail::is_aligned<Q>::value>::call(x, y);
+ }
+
+ // cross
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y)
+ {
+ return detail::compute_cross<T, Q, detail::is_aligned<Q>::value>::call(x, y);
+ }
+/*
+ // normalize
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType normalize(genType const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'normalize' accepts only floating-point inputs");
+
+ return x < genType(0) ? genType(-1) : genType(1);
+ }
+*/
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> normalize(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'normalize' accepts only floating-point inputs");
+
+ return detail::compute_normalize<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ // faceforward
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType faceforward(genType const& N, genType const& I, genType const& Nref)
+ {
+ return dot(Nref, I) < static_cast<genType>(0) ? N : -N;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> faceforward(vec<L, T, Q> const& N, vec<L, T, Q> const& I, vec<L, T, Q> const& Nref)
+ {
+ return detail::compute_faceforward<L, T, Q, detail::is_aligned<Q>::value>::call(N, I, Nref);
+ }
+
+ // reflect
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType reflect(genType const& I, genType const& N)
+ {
+ return I - N * dot(N, I) * genType(2);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> reflect(vec<L, T, Q> const& I, vec<L, T, Q> const& N)
+ {
+ return detail::compute_reflect<L, T, Q, detail::is_aligned<Q>::value>::call(I, N);
+ }
+
+ // refract
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType refract(genType const& I, genType const& N, genType eta)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'refract' accepts only floating-point inputs");
+ genType const dotValue(dot(N, I));
+ genType const k(static_cast<genType>(1) - eta * eta * (static_cast<genType>(1) - dotValue * dotValue));
+ return (eta * I - (eta * dotValue + sqrt(k)) * N) * static_cast<genType>(k >= static_cast<genType>(0));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> refract(vec<L, T, Q> const& I, vec<L, T, Q> const& N, T eta)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'refract' accepts only floating-point inputs");
+ return detail::compute_refract<L, T, Q, detail::is_aligned<Q>::value>::call(I, N, eta);
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "func_geometric_simd.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/func_geometric_simd.inl b/3rdparty/glm/source/glm/detail/func_geometric_simd.inl
new file mode 100644
index 0000000..2076dae
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_geometric_simd.inl
@@ -0,0 +1,163 @@
+/// @ref core
+/// @file glm/detail/func_geometric_simd.inl
+
+#include "../simd/geometric.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+namespace glm{
+namespace detail
+{
+ template<qualifier Q>
+ struct compute_length<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v)
+ {
+ return _mm_cvtss_f32(glm_vec4_length(v.data));
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_distance<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1)
+ {
+ return _mm_cvtss_f32(glm_vec4_distance(p0.data, p1.data));
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_dot<vec<4, float, Q>, float, true>
+ {
+ GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y)
+ {
+ return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data));
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_cross<float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<3, float, Q> call(vec<3, float, Q> const& a, vec<3, float, Q> const& b)
+ {
+ __m128 const set0 = _mm_set_ps(0.0f, a.z, a.y, a.x);
+ __m128 const set1 = _mm_set_ps(0.0f, b.z, b.y, b.x);
+ __m128 const xpd0 = glm_vec4_cross(set0, set1);
+
+ vec<4, float, Q> Result;
+ Result.data = xpd0;
+ return vec<3, float, Q>(Result);
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_normalize<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v)
+ {
+ vec<4, float, Q> Result;
+ Result.data = glm_vec4_normalize(v.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_faceforward<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& N, vec<4, float, Q> const& I, vec<4, float, Q> const& Nref)
+ {
+ vec<4, float, Q> Result;
+ Result.data = glm_vec4_faceforward(N.data, I.data, Nref.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_reflect<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N)
+ {
+ vec<4, float, Q> Result;
+ Result.data = glm_vec4_reflect(I.data, N.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_refract<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N, float eta)
+ {
+ vec<4, float, Q> Result;
+ Result.data = glm_vec4_refract(I.data, N.data, _mm_set1_ps(eta));
+ return Result;
+ }
+ };
+}//namespace detail
+}//namespace glm
+
+#elif GLM_ARCH & GLM_ARCH_NEON_BIT
+namespace glm{
+namespace detail
+{
+ template<qualifier Q>
+ struct compute_length<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v)
+ {
+ return sqrt(compute_dot<vec<4, float, Q>, float, true>::call(v, v));
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_distance<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1)
+ {
+ return compute_length<4, float, Q, true>::call(p1 - p0);
+ }
+ };
+
+
+ template<qualifier Q>
+ struct compute_dot<vec<4, float, Q>, float, true>
+ {
+ GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y)
+ {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ float32x4_t v = vmulq_f32(x.data, y.data);
+ return vaddvq_f32(v);
+#else // Armv7a with Neon
+ float32x4_t p = vmulq_f32(x.data, y.data);
+ float32x2_t v = vpadd_f32(vget_low_f32(p), vget_high_f32(p));
+ v = vpadd_f32(v, v);
+ return vget_lane_f32(v, 0);
+#endif
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_normalize<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v)
+ {
+ float32x4_t p = vmulq_f32(v.data, v.data);
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ p = vpaddq_f32(p, p);
+ p = vpaddq_f32(p, p);
+#else
+ float32x2_t t = vpadd_f32(vget_low_f32(p), vget_high_f32(p));
+ t = vpadd_f32(t, t);
+ p = vcombine_f32(t, t);
+#endif
+
+ float32x4_t vd = vrsqrteq_f32(p);
+ vec<4, float, Q> Result;
+ Result.data = vmulq_f32(v.data, vd);
+ return Result;
+ }
+ };
+}//namespace detail
+}//namespace glm
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/detail/func_integer.inl b/3rdparty/glm/source/glm/detail/func_integer.inl
new file mode 100644
index 0000000..091e1e0
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_integer.inl
@@ -0,0 +1,372 @@
+/// @ref core
+
+#include "_vectorize.hpp"
+#if(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC)
+# include <intrin.h>
+# pragma intrinsic(_BitScanReverse)
+#endif//(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC)
+#include <limits>
+
+#if !GLM_HAS_EXTENDED_INTEGER_TYPE
+# if GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic ignored "-Wlong-long"
+# endif
+# if (GLM_COMPILER & GLM_COMPILER_CLANG)
+# pragma clang diagnostic ignored "-Wc++11-long-long"
+# endif
+#endif
+
+namespace glm{
+namespace detail
+{
+ template<typename T>
+ GLM_FUNC_QUALIFIER T mask(T Bits)
+ {
+ return Bits >= static_cast<T>(sizeof(T) * 8) ? ~static_cast<T>(0) : (static_cast<T>(1) << Bits) - static_cast<T>(1);
+ }
+
+ template<length_t L, typename T, qualifier Q, bool Aligned, bool EXEC>
+ struct compute_bitfieldReverseStep
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T, T)
+ {
+ return v;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_bitfieldReverseStep<L, T, Q, Aligned, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T Mask, T Shift)
+ {
+ return (v & Mask) << Shift | (v & (~Mask)) >> Shift;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned, bool EXEC>
+ struct compute_bitfieldBitCountStep
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T, T)
+ {
+ return v;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_bitfieldBitCountStep<L, T, Q, Aligned, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T Mask, T Shift)
+ {
+ return (v & Mask) + ((v >> Shift) & Mask);
+ }
+ };
+
+ template<typename genIUType, size_t Bits>
+ struct compute_findLSB
+ {
+ GLM_FUNC_QUALIFIER static int call(genIUType Value)
+ {
+ if(Value == 0)
+ return -1;
+
+ return glm::bitCount(~Value & (Value - static_cast<genIUType>(1)));
+ }
+ };
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ template<typename genIUType>
+ struct compute_findLSB<genIUType, 32>
+ {
+ GLM_FUNC_QUALIFIER static int call(genIUType Value)
+ {
+ unsigned long Result(0);
+ unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast<unsigned long*>(&Value));
+ return IsNotNull ? int(Result) : -1;
+ }
+ };
+
+# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32))
+ template<typename genIUType>
+ struct compute_findLSB<genIUType, 64>
+ {
+ GLM_FUNC_QUALIFIER static int call(genIUType Value)
+ {
+ unsigned long Result(0);
+ unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast<unsigned __int64*>(&Value));
+ return IsNotNull ? int(Result) : -1;
+ }
+ };
+# endif
+# endif//GLM_HAS_BITSCAN_WINDOWS
+
+ template<length_t L, typename T, qualifier Q, bool EXEC = true>
+ struct compute_findMSB_step_vec
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, T Shift)
+ {
+ return x | (x >> Shift);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q>
+ struct compute_findMSB_step_vec<L, T, Q, false>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, T)
+ {
+ return x;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, int>
+ struct compute_findMSB_vec
+ {
+ GLM_FUNC_QUALIFIER static vec<L, int, Q> call(vec<L, T, Q> const& v)
+ {
+ vec<L, T, Q> x(v);
+ x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 1));
+ x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 2));
+ x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 4));
+ x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 16>::call(x, static_cast<T>( 8));
+ x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 32>::call(x, static_cast<T>(16));
+ x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 64>::call(x, static_cast<T>(32));
+ return vec<L, int, Q>(sizeof(T) * 8 - 1) - glm::bitCount(~x);
+ }
+ };
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value)
+ {
+ unsigned long Result(0);
+ unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast<unsigned long*>(&Value));
+ return IsNotNull ? int(Result) : -1;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ struct compute_findMSB_vec<L, T, Q, 32>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, int, Q> call(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, int, T, Q>::call(compute_findMSB_32, x);
+ }
+ };
+
+# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32))
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value)
+ {
+ unsigned long Result(0);
+ unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast<unsigned __int64*>(&Value));
+ return IsNotNull ? int(Result) : -1;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ struct compute_findMSB_vec<L, T, Q, 64>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, int, Q> call(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, int, T, Q>::call(compute_findMSB_64, x);
+ }
+ };
+# endif
+# endif//GLM_HAS_BITSCAN_WINDOWS
+}//namespace detail
+
+ // uaddCarry
+ GLM_FUNC_QUALIFIER uint uaddCarry(uint const& x, uint const& y, uint & Carry)
+ {
+ detail::uint64 const Value64(static_cast<detail::uint64>(x) + static_cast<detail::uint64>(y));
+ detail::uint64 const Max32((static_cast<detail::uint64>(1) << static_cast<detail::uint64>(32)) - static_cast<detail::uint64>(1));
+ Carry = Value64 > Max32 ? 1u : 0u;
+ return static_cast<uint>(Value64 % (Max32 + static_cast<detail::uint64>(1)));
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, uint, Q> uaddCarry(vec<L, uint, Q> const& x, vec<L, uint, Q> const& y, vec<L, uint, Q>& Carry)
+ {
+ vec<L, detail::uint64, Q> Value64(vec<L, detail::uint64, Q>(x) + vec<L, detail::uint64, Q>(y));
+ vec<L, detail::uint64, Q> Max32((static_cast<detail::uint64>(1) << static_cast<detail::uint64>(32)) - static_cast<detail::uint64>(1));
+ Carry = mix(vec<L, uint, Q>(0), vec<L, uint, Q>(1), greaterThan(Value64, Max32));
+ return vec<L, uint, Q>(Value64 % (Max32 + static_cast<detail::uint64>(1)));
+ }
+
+ // usubBorrow
+ GLM_FUNC_QUALIFIER uint usubBorrow(uint const& x, uint const& y, uint & Borrow)
+ {
+ Borrow = x >= y ? static_cast<uint>(0) : static_cast<uint>(1);
+ if(y >= x)
+ return y - x;
+ else
+ return static_cast<uint>((static_cast<detail::int64>(1) << static_cast<detail::int64>(32)) + (static_cast<detail::int64>(y) - static_cast<detail::int64>(x)));
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, uint, Q> usubBorrow(vec<L, uint, Q> const& x, vec<L, uint, Q> const& y, vec<L, uint, Q>& Borrow)
+ {
+ Borrow = mix(vec<L, uint, Q>(1), vec<L, uint, Q>(0), greaterThanEqual(x, y));
+ vec<L, uint, Q> const YgeX(y - x);
+ vec<L, uint, Q> const XgeY(vec<L, uint, Q>((static_cast<detail::int64>(1) << static_cast<detail::int64>(32)) + (vec<L, detail::int64, Q>(y) - vec<L, detail::int64, Q>(x))));
+ return mix(XgeY, YgeX, greaterThanEqual(y, x));
+ }
+
+ // umulExtended
+ GLM_FUNC_QUALIFIER void umulExtended(uint const& x, uint const& y, uint & msb, uint & lsb)
+ {
+ detail::uint64 Value64 = static_cast<detail::uint64>(x) * static_cast<detail::uint64>(y);
+ msb = static_cast<uint>(Value64 >> static_cast<detail::uint64>(32));
+ lsb = static_cast<uint>(Value64);
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER void umulExtended(vec<L, uint, Q> const& x, vec<L, uint, Q> const& y, vec<L, uint, Q>& msb, vec<L, uint, Q>& lsb)
+ {
+ vec<L, detail::uint64, Q> Value64(vec<L, detail::uint64, Q>(x) * vec<L, detail::uint64, Q>(y));
+ msb = vec<L, uint, Q>(Value64 >> static_cast<detail::uint64>(32));
+ lsb = vec<L, uint, Q>(Value64);
+ }
+
+ // imulExtended
+ GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int& msb, int& lsb)
+ {
+ detail::int64 Value64 = static_cast<detail::int64>(x) * static_cast<detail::int64>(y);
+ msb = static_cast<int>(Value64 >> static_cast<detail::int64>(32));
+ lsb = static_cast<int>(Value64);
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER void imulExtended(vec<L, int, Q> const& x, vec<L, int, Q> const& y, vec<L, int, Q>& msb, vec<L, int, Q>& lsb)
+ {
+ vec<L, detail::int64, Q> Value64(vec<L, detail::int64, Q>(x) * vec<L, detail::int64, Q>(y));
+ lsb = vec<L, int, Q>(Value64 & static_cast<detail::int64>(0xFFFFFFFF));
+ msb = vec<L, int, Q>((Value64 >> static_cast<detail::int64>(32)) & static_cast<detail::int64>(0xFFFFFFFF));
+ }
+
+ // bitfieldExtract
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits)
+ {
+ return bitfieldExtract(vec<1, genIUType>(Value), Offset, Bits).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldExtract(vec<L, T, Q> const& Value, int Offset, int Bits)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldExtract' only accept integer inputs");
+
+ return (Value >> static_cast<T>(Offset)) & static_cast<T>(detail::mask(Bits));
+ }
+
+ // bitfieldInsert
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const& Base, genIUType const& Insert, int Offset, int Bits)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'bitfieldInsert' only accept integer values");
+
+ return bitfieldInsert(vec<1, genIUType>(Base), vec<1, genIUType>(Insert), Offset, Bits).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldInsert(vec<L, T, Q> const& Base, vec<L, T, Q> const& Insert, int Offset, int Bits)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldInsert' only accept integer values");
+
+ T const Mask = static_cast<T>(detail::mask(Bits) << Offset);
+ return (Base & ~Mask) | ((Insert << static_cast<T>(Offset)) & Mask);
+ }
+
+ // bitfieldReverse
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType bitfieldReverse(genIUType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'bitfieldReverse' only accept integer values");
+
+ return bitfieldReverse(glm::vec<1, genIUType, glm::defaultp>(x)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldReverse(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldReverse' only accept integer values");
+
+ vec<L, T, Q> x(v);
+ x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 2>::call(x, static_cast<T>(0x5555555555555555ull), static_cast<T>( 1));
+ x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 4>::call(x, static_cast<T>(0x3333333333333333ull), static_cast<T>( 2));
+ x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 8>::call(x, static_cast<T>(0x0F0F0F0F0F0F0F0Full), static_cast<T>( 4));
+ x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 16>::call(x, static_cast<T>(0x00FF00FF00FF00FFull), static_cast<T>( 8));
+ x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 32>::call(x, static_cast<T>(0x0000FFFF0000FFFFull), static_cast<T>(16));
+ x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 64>::call(x, static_cast<T>(0x00000000FFFFFFFFull), static_cast<T>(32));
+ return x;
+ }
+
+ // bitCount
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER int bitCount(genIUType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'bitCount' only accept integer values");
+
+ return bitCount(glm::vec<1, genIUType, glm::defaultp>(x)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int, Q> bitCount(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitCount' only accept integer values");
+
+# if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(push)
+# pragma warning(disable : 4310) //cast truncates constant value
+# endif
+
+ vec<L, typename detail::make_unsigned<T>::type, Q> x(*reinterpret_cast<vec<L, typename detail::make_unsigned<T>::type, Q> const *>(&v));
+ x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 2>::call(x, typename detail::make_unsigned<T>::type(0x5555555555555555ull), typename detail::make_unsigned<T>::type( 1));
+ x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 4>::call(x, typename detail::make_unsigned<T>::type(0x3333333333333333ull), typename detail::make_unsigned<T>::type( 2));
+ x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 8>::call(x, typename detail::make_unsigned<T>::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned<T>::type( 4));
+ x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 16>::call(x, typename detail::make_unsigned<T>::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned<T>::type( 8));
+ x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 32>::call(x, typename detail::make_unsigned<T>::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned<T>::type(16));
+ x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 64>::call(x, typename detail::make_unsigned<T>::type(0x00000000FFFFFFFFull), typename detail::make_unsigned<T>::type(32));
+ return vec<L, int, Q>(x);
+
+# if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(pop)
+# endif
+ }
+
+ // findLSB
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER int findLSB(genIUType Value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findLSB' only accept integer values");
+
+ return detail::compute_findLSB<genIUType, sizeof(genIUType) * 8>::call(Value);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int, Q> findLSB(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'findLSB' only accept integer values");
+
+ return detail::functor1<vec, L, int, T, Q>::call(findLSB, x);
+ }
+
+ // findMSB
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER int findMSB(genIUType v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findMSB' only accept integer values");
+
+ return findMSB(vec<1, genIUType>(v)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int, Q> findMSB(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'findMSB' only accept integer values");
+
+ return detail::compute_findMSB_vec<L, T, Q, sizeof(T) * 8>::call(v);
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "func_integer_simd.inl"
+#endif
+
diff --git a/3rdparty/glm/source/glm/detail/func_integer_simd.inl b/3rdparty/glm/source/glm/detail/func_integer_simd.inl
new file mode 100644
index 0000000..8be6c9c
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_integer_simd.inl
@@ -0,0 +1,65 @@
+#include "../simd/integer.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+namespace glm{
+namespace detail
+{
+ template<qualifier Q>
+ struct compute_bitfieldReverseStep<4, uint, Q, true, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift)
+ {
+ __m128i const set0 = v.data;
+
+ __m128i const set1 = _mm_set1_epi32(static_cast<int>(Mask));
+ __m128i const and1 = _mm_and_si128(set0, set1);
+ __m128i const sft1 = _mm_slli_epi32(and1, Shift);
+
+ __m128i const set2 = _mm_andnot_si128(set0, _mm_set1_epi32(-1));
+ __m128i const and2 = _mm_and_si128(set0, set2);
+ __m128i const sft2 = _mm_srai_epi32(and2, Shift);
+
+ __m128i const or0 = _mm_or_si128(sft1, sft2);
+
+ return or0;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_bitfieldBitCountStep<4, uint, Q, true, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift)
+ {
+ __m128i const set0 = v.data;
+
+ __m128i const set1 = _mm_set1_epi32(static_cast<int>(Mask));
+ __m128i const and0 = _mm_and_si128(set0, set1);
+ __m128i const sft0 = _mm_slli_epi32(set0, Shift);
+ __m128i const and1 = _mm_and_si128(sft0, set1);
+ __m128i const add0 = _mm_add_epi32(and0, and1);
+
+ return add0;
+ }
+ };
+}//namespace detail
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<>
+ GLM_FUNC_QUALIFIER int bitCount(uint x)
+ {
+ return _mm_popcnt_u32(x);
+ }
+
+# if(GLM_MODEL == GLM_MODEL_64)
+ template<>
+ GLM_FUNC_QUALIFIER int bitCount(detail::uint64 x)
+ {
+ return static_cast<int>(_mm_popcnt_u64(x));
+ }
+# endif//GLM_MODEL
+# endif//GLM_ARCH
+
+}//namespace glm
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/detail/func_matrix.inl b/3rdparty/glm/source/glm/detail/func_matrix.inl
new file mode 100644
index 0000000..c2d568f
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_matrix.inl
@@ -0,0 +1,443 @@
+#include "../geometric.hpp"
+#include <limits>
+
+namespace glm{
+namespace detail
+{
+ template<length_t C, length_t R, typename T, qualifier Q, bool Aligned>
+ struct compute_matrixCompMult
+ {
+ GLM_FUNC_QUALIFIER static mat<C, R, T, Q> call(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y)
+ {
+ mat<C, R, T, Q> Result;
+ for(length_t i = 0; i < Result.length(); ++i)
+ Result[i] = x[i] * y[i];
+ return Result;
+ }
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q, bool IsFloat, bool Aligned>
+ struct compute_matrixCompMult_type {
+ GLM_FUNC_QUALIFIER static mat<C, R, T, Q> call(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE,
+ "'matrixCompMult' only accept floating-point inputs, include <glm/ext/matrix_integer.hpp> to discard this restriction.");
+ return detail::compute_matrixCompMult<C, R, T, Q, detail::is_aligned<Q>::value>::call(x, y);
+ }
+ };
+
+ template<length_t DA, length_t DB, typename T, qualifier Q>
+ struct compute_outerProduct {
+ GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait<DA, DB, T, Q>::type call(vec<DA, T, Q> const& c, vec<DB, T, Q> const& r)
+ {
+ typename detail::outerProduct_trait<DA, DB, T, Q>::type m;
+ for(length_t i = 0; i < m.length(); ++i)
+ m[i] = c * r[i];
+ return m;
+ }
+ };
+
+ template<length_t DA, length_t DB, typename T, qualifier Q, bool IsFloat>
+ struct compute_outerProduct_type {
+ GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait<DA, DB, T, Q>::type call(vec<DA, T, Q> const& c, vec<DB, T, Q> const& r)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE,
+ "'outerProduct' only accept floating-point inputs, include <glm/ext/matrix_integer.hpp> to discard this restriction.");
+
+ return detail::compute_outerProduct<DA, DB, T, Q>::call(c, r);
+ }
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q, bool Aligned>
+ struct compute_transpose{};
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<2, 2, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m)
+ {
+ mat<2, 2, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<2, 3, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<3, 2, T, Q> call(mat<2, 3, T, Q> const& m)
+ {
+ mat<3,2, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[2][0] = m[0][2];
+ Result[2][1] = m[1][2];
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<2, 4, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<4, 2, T, Q> call(mat<2, 4, T, Q> const& m)
+ {
+ mat<4, 2, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[2][0] = m[0][2];
+ Result[2][1] = m[1][2];
+ Result[3][0] = m[0][3];
+ Result[3][1] = m[1][3];
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<3, 2, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<2, 3, T, Q> call(mat<3, 2, T, Q> const& m)
+ {
+ mat<2, 3, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[0][2] = m[2][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[1][2] = m[2][1];
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<3, 3, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m)
+ {
+ mat<3, 3, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[0][2] = m[2][0];
+
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[1][2] = m[2][1];
+
+ Result[2][0] = m[0][2];
+ Result[2][1] = m[1][2];
+ Result[2][2] = m[2][2];
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<3, 4, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<4, 3, T, Q> call(mat<3, 4, T, Q> const& m)
+ {
+ mat<4, 3, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[0][2] = m[2][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[1][2] = m[2][1];
+ Result[2][0] = m[0][2];
+ Result[2][1] = m[1][2];
+ Result[2][2] = m[2][2];
+ Result[3][0] = m[0][3];
+ Result[3][1] = m[1][3];
+ Result[3][2] = m[2][3];
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<4, 2, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<2, 4, T, Q> call(mat<4, 2, T, Q> const& m)
+ {
+ mat<2, 4, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[0][2] = m[2][0];
+ Result[0][3] = m[3][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[1][2] = m[2][1];
+ Result[1][3] = m[3][1];
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<4, 3, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<3, 4, T, Q> call(mat<4, 3, T, Q> const& m)
+ {
+ mat<3, 4, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[0][2] = m[2][0];
+ Result[0][3] = m[3][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[1][2] = m[2][1];
+ Result[1][3] = m[3][1];
+ Result[2][0] = m[0][2];
+ Result[2][1] = m[1][2];
+ Result[2][2] = m[2][2];
+ Result[2][3] = m[3][2];
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_transpose<4, 4, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m)
+ {
+ mat<4, 4, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[0][2] = m[2][0];
+ Result[0][3] = m[3][0];
+
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[1][2] = m[2][1];
+ Result[1][3] = m[3][1];
+
+ Result[2][0] = m[0][2];
+ Result[2][1] = m[1][2];
+ Result[2][2] = m[2][2];
+ Result[2][3] = m[3][2];
+
+ Result[3][0] = m[0][3];
+ Result[3][1] = m[1][3];
+ Result[3][2] = m[2][3];
+ Result[3][3] = m[3][3];
+ return Result;
+ }
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q, bool IsFloat, bool Aligned>
+ struct compute_transpose_type {
+ GLM_FUNC_QUALIFIER static mat<R, C, T, Q> call(mat<C, R, T, Q> const& m)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE,
+ "'transpose' only accept floating-point inputs, include <glm/ext/matrix_integer.hpp> to discard this restriction.");
+ return detail::compute_transpose<C, R, T, Q, detail::is_aligned<Q>::value>::call(m);
+ }
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q, bool Aligned>
+ struct compute_determinant{};
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_determinant<2, 2, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static T call(mat<2, 2, T, Q> const& m)
+ {
+ return m[0][0] * m[1][1] - m[1][0] * m[0][1];
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_determinant<3, 3, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static T call(mat<3, 3, T, Q> const& m)
+ {
+ return
+ + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2])
+ - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2])
+ + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_determinant<4, 4, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static T call(mat<4, 4, T, Q> const& m)
+ {
+ T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+
+ vec<4, T, Q> DetCof(
+ + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02),
+ - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04),
+ + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05),
+ - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05));
+
+ return
+ m[0][0] * DetCof[0] + m[0][1] * DetCof[1] +
+ m[0][2] * DetCof[2] + m[0][3] * DetCof[3];
+ }
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q, bool IsFloat, bool Aligned>
+ struct compute_determinant_type{
+
+ GLM_FUNC_QUALIFIER static T call(mat<C, R, T, Q> const& m)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE,
+ "'determinant' only accept floating-point inputs, include <glm/ext/matrix_integer.hpp> to discard this restriction.");
+ return detail::compute_determinant<C, R, T, Q, detail::is_aligned<Q>::value>::call(m);
+ }
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q, bool Aligned>
+ struct compute_inverse{};
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_inverse<2, 2, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m)
+ {
+ T OneOverDeterminant = static_cast<T>(1) / (
+ + m[0][0] * m[1][1]
+ - m[1][0] * m[0][1]);
+
+ mat<2, 2, T, Q> Inverse(
+ + m[1][1] * OneOverDeterminant,
+ - m[0][1] * OneOverDeterminant,
+ - m[1][0] * OneOverDeterminant,
+ + m[0][0] * OneOverDeterminant);
+
+ return Inverse;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_inverse<3, 3, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m)
+ {
+ T OneOverDeterminant = static_cast<T>(1) / (
+ + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2])
+ - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2])
+ + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]));
+
+ mat<3, 3, T, Q> Inverse;
+ Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]) * OneOverDeterminant;
+ Inverse[1][0] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]) * OneOverDeterminant;
+ Inverse[2][0] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]) * OneOverDeterminant;
+ Inverse[0][1] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]) * OneOverDeterminant;
+ Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]) * OneOverDeterminant;
+ Inverse[2][1] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]) * OneOverDeterminant;
+ Inverse[0][2] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]) * OneOverDeterminant;
+ Inverse[1][2] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]) * OneOverDeterminant;
+ Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]) * OneOverDeterminant;
+
+ return Inverse;
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_inverse<4, 4, T, Q, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m)
+ {
+ T Coef00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ T Coef02 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ T Coef03 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
+
+ T Coef04 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ T Coef06 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ T Coef07 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
+
+ T Coef08 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ T Coef10 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ T Coef11 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
+
+ T Coef12 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ T Coef14 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ T Coef15 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
+
+ T Coef16 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ T Coef18 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ T Coef19 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
+
+ T Coef20 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ T Coef22 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ T Coef23 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ vec<4, T, Q> Fac0(Coef00, Coef00, Coef02, Coef03);
+ vec<4, T, Q> Fac1(Coef04, Coef04, Coef06, Coef07);
+ vec<4, T, Q> Fac2(Coef08, Coef08, Coef10, Coef11);
+ vec<4, T, Q> Fac3(Coef12, Coef12, Coef14, Coef15);
+ vec<4, T, Q> Fac4(Coef16, Coef16, Coef18, Coef19);
+ vec<4, T, Q> Fac5(Coef20, Coef20, Coef22, Coef23);
+
+ vec<4, T, Q> Vec0(m[1][0], m[0][0], m[0][0], m[0][0]);
+ vec<4, T, Q> Vec1(m[1][1], m[0][1], m[0][1], m[0][1]);
+ vec<4, T, Q> Vec2(m[1][2], m[0][2], m[0][2], m[0][2]);
+ vec<4, T, Q> Vec3(m[1][3], m[0][3], m[0][3], m[0][3]);
+
+ vec<4, T, Q> Inv0(Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2);
+ vec<4, T, Q> Inv1(Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4);
+ vec<4, T, Q> Inv2(Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5);
+ vec<4, T, Q> Inv3(Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5);
+
+ vec<4, T, Q> SignA(+1, -1, +1, -1);
+ vec<4, T, Q> SignB(-1, +1, -1, +1);
+ mat<4, 4, T, Q> Inverse(Inv0 * SignA, Inv1 * SignB, Inv2 * SignA, Inv3 * SignB);
+
+ vec<4, T, Q> Row0(Inverse[0][0], Inverse[1][0], Inverse[2][0], Inverse[3][0]);
+
+ vec<4, T, Q> Dot0(m[0] * Row0);
+ T Dot1 = (Dot0.x + Dot0.y) + (Dot0.z + Dot0.w);
+
+ T OneOverDeterminant = static_cast<T>(1) / Dot1;
+
+ return Inverse * OneOverDeterminant;
+ }
+ };
+}//namespace detail
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<C, R, T, Q> matrixCompMult(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y)
+ {
+ return detail::compute_matrixCompMult_type<C, R, T, Q, std::numeric_limits<T>::is_iec559, detail::is_aligned<Q>::value>::call(x, y);
+ }
+
+ template<length_t DA, length_t DB, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename detail::outerProduct_trait<DA, DB, T, Q>::type outerProduct(vec<DA, T, Q> const& c, vec<DB, T, Q> const& r)
+ {
+ return detail::compute_outerProduct_type<DA, DB, T, Q, std::numeric_limits<T>::is_iec559>::call(c, r);
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<C, R, T, Q>::transpose_type transpose(mat<C, R, T, Q> const& m)
+ {
+ return detail::compute_transpose_type<C, R, T, Q, std::numeric_limits<T>::is_iec559, detail::is_aligned<Q>::value>::call(m);
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T determinant(mat<C, R, T, Q> const& m)
+ {
+ return detail::compute_determinant_type<C, R, T, Q, std::numeric_limits<T>::is_iec559, detail::is_aligned<Q>::value>::call(m);
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<C, R, T, Q> inverse(mat<C, R, T, Q> const& m)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'inverse' only accept floating-point inputs");
+ return detail::compute_inverse<C, R, T, Q, detail::is_aligned<Q>::value>::call(m);
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "func_matrix_simd.inl"
+#endif
+
diff --git a/3rdparty/glm/source/glm/detail/func_matrix_simd.inl b/3rdparty/glm/source/glm/detail/func_matrix_simd.inl
new file mode 100644
index 0000000..f67ac66
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_matrix_simd.inl
@@ -0,0 +1,249 @@
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#include "type_mat4x4.hpp"
+#include "../geometric.hpp"
+#include "../simd/matrix.h"
+#include <cstring>
+
+namespace glm{
+namespace detail
+{
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+ template<qualifier Q>
+ struct compute_matrixCompMult<4, 4, float, Q, true>
+ {
+ GLM_STATIC_ASSERT(detail::is_aligned<Q>::value, "Specialization requires aligned");
+
+ GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& x, mat<4, 4, float, Q> const& y)
+ {
+ mat<4, 4, float, Q> Result;
+ glm_mat4_matrixCompMult(
+ *static_cast<glm_vec4 const (*)[4]>(&x[0].data),
+ *static_cast<glm_vec4 const (*)[4]>(&y[0].data),
+ *static_cast<glm_vec4(*)[4]>(&Result[0].data));
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_transpose<4, 4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m)
+ {
+ mat<4, 4, float, Q> Result;
+ glm_mat4_transpose(&m[0].data, &Result[0].data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_determinant<4, 4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static float call(mat<4, 4, float, Q> const& m)
+ {
+ return _mm_cvtss_f32(glm_mat4_determinant(&m[0].data));
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_inverse<4, 4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m)
+ {
+ mat<4, 4, float, Q> Result;
+ glm_mat4_inverse(&m[0].data, &Result[0].data);
+ return Result;
+ }
+ };
+}//namespace detail
+
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+ template<>
+ GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_lowp> outerProduct<4, 4, float, aligned_lowp>(vec<4, float, aligned_lowp> const& c, vec<4, float, aligned_lowp> const& r)
+ {
+ __m128 NativeResult[4];
+ glm_mat4_outerProduct(c.data, r.data, NativeResult);
+ mat<4, 4, float, aligned_lowp> Result;
+ std::memcpy(&Result[0], &NativeResult[0], sizeof(Result));
+ return Result;
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_mediump> outerProduct<4, 4, float, aligned_mediump>(vec<4, float, aligned_mediump> const& c, vec<4, float, aligned_mediump> const& r)
+ {
+ __m128 NativeResult[4];
+ glm_mat4_outerProduct(c.data, r.data, NativeResult);
+ mat<4, 4, float, aligned_mediump> Result;
+ std::memcpy(&Result[0], &NativeResult[0], sizeof(Result));
+ return Result;
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_highp> outerProduct<4, 4, float, aligned_highp>(vec<4, float, aligned_highp> const& c, vec<4, float, aligned_highp> const& r)
+ {
+ __m128 NativeResult[4];
+ glm_mat4_outerProduct(c.data, r.data, NativeResult);
+ mat<4, 4, float, aligned_highp> Result;
+ std::memcpy(&Result[0], &NativeResult[0], sizeof(Result));
+ return Result;
+ }
+# endif
+}//namespace glm
+
+#elif GLM_ARCH & GLM_ARCH_NEON_BIT
+
+namespace glm {
+#if GLM_LANG & GLM_LANG_CXX11_FLAG
+ template <qualifier Q>
+ GLM_FUNC_QUALIFIER
+ typename std::enable_if<detail::is_aligned<Q>::value, mat<4, 4, float, Q>>::type
+ operator*(mat<4, 4, float, Q> const & m1, mat<4, 4, float, Q> const & m2)
+ {
+ auto MulRow = [&](int l) {
+ float32x4_t const SrcA = m2[l].data;
+
+ float32x4_t r = neon::mul_lane(m1[0].data, SrcA, 0);
+ r = neon::madd_lane(r, m1[1].data, SrcA, 1);
+ r = neon::madd_lane(r, m1[2].data, SrcA, 2);
+ r = neon::madd_lane(r, m1[3].data, SrcA, 3);
+
+ return r;
+ };
+
+ mat<4, 4, float, aligned_highp> Result;
+ Result[0].data = MulRow(0);
+ Result[1].data = MulRow(1);
+ Result[2].data = MulRow(2);
+ Result[3].data = MulRow(3);
+
+ return Result;
+ }
+#endif // CXX11
+
+ template<qualifier Q>
+ struct detail::compute_inverse<4, 4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m)
+ {
+ float32x4_t const& m0 = m[0].data;
+ float32x4_t const& m1 = m[1].data;
+ float32x4_t const& m2 = m[2].data;
+ float32x4_t const& m3 = m[3].data;
+
+ // m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ // m[1][2] * m[2][3] - m[2][2] * m[1][3];
+
+ float32x4_t Fac0;
+ {
+ float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2));
+ float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3);
+ float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2);
+ float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3));
+ Fac0 = w0 * w1 - w2 * w3;
+ }
+
+ // m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ // m[1][1] * m[2][3] - m[2][1] * m[1][3];
+
+ float32x4_t Fac1;
+ {
+ float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1));
+ float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3);
+ float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1);
+ float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3));
+ Fac1 = w0 * w1 - w2 * w3;
+ }
+
+ // m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ // m[1][1] * m[2][2] - m[2][1] * m[1][2];
+
+ float32x4_t Fac2;
+ {
+ float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1));
+ float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2);
+ float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1);
+ float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2));
+ Fac2 = w0 * w1 - w2 * w3;
+ }
+
+ // m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ // m[1][0] * m[2][3] - m[2][0] * m[1][3];
+
+ float32x4_t Fac3;
+ {
+ float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0));
+ float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3);
+ float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0);
+ float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3));
+ Fac3 = w0 * w1 - w2 * w3;
+ }
+
+ // m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ // m[1][0] * m[2][2] - m[2][0] * m[1][2];
+
+ float32x4_t Fac4;
+ {
+ float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0));
+ float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2);
+ float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0);
+ float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2));
+ Fac4 = w0 * w1 - w2 * w3;
+ }
+
+ // m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ // m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ float32x4_t Fac5;
+ {
+ float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0));
+ float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1);
+ float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0);
+ float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1));
+ Fac5 = w0 * w1 - w2 * w3;
+ }
+
+ float32x4_t Vec0 = neon::copy_lane(neon::dupq_lane(m0, 0), 0, m1, 0); // (m[1][0], m[0][0], m[0][0], m[0][0]);
+ float32x4_t Vec1 = neon::copy_lane(neon::dupq_lane(m0, 1), 0, m1, 1); // (m[1][1], m[0][1], m[0][1], m[0][1]);
+ float32x4_t Vec2 = neon::copy_lane(neon::dupq_lane(m0, 2), 0, m1, 2); // (m[1][2], m[0][2], m[0][2], m[0][2]);
+ float32x4_t Vec3 = neon::copy_lane(neon::dupq_lane(m0, 3), 0, m1, 3); // (m[1][3], m[0][3], m[0][3], m[0][3]);
+
+ float32x4_t Inv0 = Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2;
+ float32x4_t Inv1 = Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4;
+ float32x4_t Inv2 = Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5;
+ float32x4_t Inv3 = Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5;
+
+ float32x4_t r0 = float32x4_t{-1, +1, -1, +1} * Inv0;
+ float32x4_t r1 = float32x4_t{+1, -1, +1, -1} * Inv1;
+ float32x4_t r2 = float32x4_t{-1, +1, -1, +1} * Inv2;
+ float32x4_t r3 = float32x4_t{+1, -1, +1, -1} * Inv3;
+
+ float32x4_t det = neon::mul_lane(r0, m0, 0);
+ det = neon::madd_lane(det, r1, m0, 1);
+ det = neon::madd_lane(det, r2, m0, 2);
+ det = neon::madd_lane(det, r3, m0, 3);
+
+ float32x4_t rdet = vdupq_n_f32(1 / vgetq_lane_f32(det, 0));
+
+ mat<4, 4, float, Q> r;
+ r[0].data = vmulq_f32(r0, rdet);
+ r[1].data = vmulq_f32(r1, rdet);
+ r[2].data = vmulq_f32(r2, rdet);
+ r[3].data = vmulq_f32(r3, rdet);
+ return r;
+ }
+ };
+}//namespace glm
+#endif
diff --git a/3rdparty/glm/source/glm/detail/func_packing.inl b/3rdparty/glm/source/glm/detail/func_packing.inl
new file mode 100644
index 0000000..234b093
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_packing.inl
@@ -0,0 +1,189 @@
+/// @ref core
+/// @file glm/detail/func_packing.inl
+
+#include "../common.hpp"
+#include "type_half.hpp"
+
+namespace glm
+{
+ GLM_FUNC_QUALIFIER uint packUnorm2x16(vec2 const& v)
+ {
+ union
+ {
+ unsigned short in[2];
+ uint out;
+ } u;
+
+ vec<2, unsigned short, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 65535.0f));
+
+ u.in[0] = result[0];
+ u.in[1] = result[1];
+
+ return u.out;
+ }
+
+ GLM_FUNC_QUALIFIER vec2 unpackUnorm2x16(uint p)
+ {
+ union
+ {
+ uint in;
+ unsigned short out[2];
+ } u;
+
+ u.in = p;
+
+ return vec2(u.out[0], u.out[1]) * 1.5259021896696421759365224689097e-5f;
+ }
+
+ GLM_FUNC_QUALIFIER uint packSnorm2x16(vec2 const& v)
+ {
+ union
+ {
+ signed short in[2];
+ uint out;
+ } u;
+
+ vec<2, short, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 32767.0f));
+
+ u.in[0] = result[0];
+ u.in[1] = result[1];
+
+ return u.out;
+ }
+
+ GLM_FUNC_QUALIFIER vec2 unpackSnorm2x16(uint p)
+ {
+ union
+ {
+ uint in;
+ signed short out[2];
+ } u;
+
+ u.in = p;
+
+ return clamp(vec2(u.out[0], u.out[1]) * 3.0518509475997192297128208258309e-5f, -1.0f, 1.0f);
+ }
+
+ GLM_FUNC_QUALIFIER uint packUnorm4x8(vec4 const& v)
+ {
+ union
+ {
+ unsigned char in[4];
+ uint out;
+ } u;
+
+ vec<4, unsigned char, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 255.0f));
+
+ u.in[0] = result[0];
+ u.in[1] = result[1];
+ u.in[2] = result[2];
+ u.in[3] = result[3];
+
+ return u.out;
+ }
+
+ GLM_FUNC_QUALIFIER vec4 unpackUnorm4x8(uint p)
+ {
+ union
+ {
+ uint in;
+ unsigned char out[4];
+ } u;
+
+ u.in = p;
+
+ return vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0039215686274509803921568627451f;
+ }
+
+ GLM_FUNC_QUALIFIER uint packSnorm4x8(vec4 const& v)
+ {
+ union
+ {
+ signed char in[4];
+ uint out;
+ } u;
+
+ vec<4, signed char, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 127.0f));
+
+ u.in[0] = result[0];
+ u.in[1] = result[1];
+ u.in[2] = result[2];
+ u.in[3] = result[3];
+
+ return u.out;
+ }
+
+ GLM_FUNC_QUALIFIER glm::vec4 unpackSnorm4x8(uint p)
+ {
+ union
+ {
+ uint in;
+ signed char out[4];
+ } u;
+
+ u.in = p;
+
+ return clamp(vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0078740157480315f, -1.0f, 1.0f);
+ }
+
+ GLM_FUNC_QUALIFIER double packDouble2x32(uvec2 const& v)
+ {
+ union
+ {
+ uint in[2];
+ double out;
+ } u;
+
+ u.in[0] = v[0];
+ u.in[1] = v[1];
+
+ return u.out;
+ }
+
+ GLM_FUNC_QUALIFIER uvec2 unpackDouble2x32(double v)
+ {
+ union
+ {
+ double in;
+ uint out[2];
+ } u;
+
+ u.in = v;
+
+ return uvec2(u.out[0], u.out[1]);
+ }
+
+ GLM_FUNC_QUALIFIER uint packHalf2x16(vec2 const& v)
+ {
+ union
+ {
+ signed short in[2];
+ uint out;
+ } u;
+
+ u.in[0] = detail::toFloat16(v.x);
+ u.in[1] = detail::toFloat16(v.y);
+
+ return u.out;
+ }
+
+ GLM_FUNC_QUALIFIER vec2 unpackHalf2x16(uint v)
+ {
+ union
+ {
+ uint in;
+ signed short out[2];
+ } u;
+
+ u.in = v;
+
+ return vec2(
+ detail::toFloat32(u.out[0]),
+ detail::toFloat32(u.out[1]));
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "func_packing_simd.inl"
+#endif
+
diff --git a/3rdparty/glm/source/glm/detail/func_packing_simd.inl b/3rdparty/glm/source/glm/detail/func_packing_simd.inl
new file mode 100644
index 0000000..fd0fe8b
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_packing_simd.inl
@@ -0,0 +1,6 @@
+namespace glm{
+namespace detail
+{
+
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/func_trigonometric.inl b/3rdparty/glm/source/glm/detail/func_trigonometric.inl
new file mode 100644
index 0000000..e129dce
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_trigonometric.inl
@@ -0,0 +1,197 @@
+#include "_vectorize.hpp"
+#include <cmath>
+#include <limits>
+
+namespace glm
+{
+ // radians
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType radians(genType degrees)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'radians' only accept floating-point input");
+
+ return degrees * static_cast<genType>(0.01745329251994329576923690768489);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> radians(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(radians, v);
+ }
+
+ // degrees
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType degrees(genType radians)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'degrees' only accept floating-point input");
+
+ return radians * static_cast<genType>(57.295779513082320876798154814105);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> degrees(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(degrees, v);
+ }
+
+ // sin
+ using ::std::sin;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> sin(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(sin, v);
+ }
+
+ // cos
+ using std::cos;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> cos(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(cos, v);
+ }
+
+ // tan
+ using std::tan;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> tan(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(tan, v);
+ }
+
+ // asin
+ using std::asin;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> asin(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(asin, v);
+ }
+
+ // acos
+ using std::acos;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> acos(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(acos, v);
+ }
+
+ // atan
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType atan(genType y, genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'atan' only accept floating-point input");
+
+ return ::std::atan2(y, x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> atan(vec<L, T, Q> const& a, vec<L, T, Q> const& b)
+ {
+ return detail::functor2<vec, L, T, Q>::call(::std::atan2, a, b);
+ }
+
+ using std::atan;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> atan(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(atan, v);
+ }
+
+ // sinh
+ using std::sinh;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> sinh(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(sinh, v);
+ }
+
+ // cosh
+ using std::cosh;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> cosh(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(cosh, v);
+ }
+
+ // tanh
+ using std::tanh;
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> tanh(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(tanh, v);
+ }
+
+ // asinh
+# if GLM_HAS_CXX11_STL
+ using std::asinh;
+# else
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType asinh(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'asinh' only accept floating-point input");
+
+ return (x < static_cast<genType>(0) ? static_cast<genType>(-1) : (x > static_cast<genType>(0) ? static_cast<genType>(1) : static_cast<genType>(0))) * log(std::abs(x) + sqrt(static_cast<genType>(1) + x * x));
+ }
+# endif
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> asinh(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(asinh, v);
+ }
+
+ // acosh
+# if GLM_HAS_CXX11_STL
+ using std::acosh;
+# else
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType acosh(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'acosh' only accept floating-point input");
+
+ if(x < static_cast<genType>(1))
+ return static_cast<genType>(0);
+ return log(x + sqrt(x * x - static_cast<genType>(1)));
+ }
+# endif
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> acosh(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(acosh, v);
+ }
+
+ // atanh
+# if GLM_HAS_CXX11_STL
+ using std::atanh;
+# else
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType atanh(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'atanh' only accept floating-point input");
+
+ if(std::abs(x) >= static_cast<genType>(1))
+ return 0;
+ return static_cast<genType>(0.5) * log((static_cast<genType>(1) + x) / (static_cast<genType>(1) - x));
+ }
+# endif
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> atanh(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(atanh, v);
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "func_trigonometric_simd.inl"
+#endif
+
diff --git a/3rdparty/glm/source b/3rdparty/glm/source/glm/detail/func_trigonometric_simd.inl
index e69de29..e69de29 100644
--- a/3rdparty/glm/source
+++ b/3rdparty/glm/source/glm/detail/func_trigonometric_simd.inl
diff --git a/3rdparty/glm/source/glm/detail/func_vector_relational.inl b/3rdparty/glm/source/glm/detail/func_vector_relational.inl
new file mode 100644
index 0000000..80c9e87
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_vector_relational.inl
@@ -0,0 +1,87 @@
+namespace glm
+{
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> lessThan(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ vec<L, bool, Q> Result(true);
+ for(length_t i = 0; i < L; ++i)
+ Result[i] = x[i] < y[i];
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> lessThanEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ vec<L, bool, Q> Result(true);
+ for(length_t i = 0; i < L; ++i)
+ Result[i] = x[i] <= y[i];
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> greaterThan(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ vec<L, bool, Q> Result(true);
+ for(length_t i = 0; i < L; ++i)
+ Result[i] = x[i] > y[i];
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> greaterThanEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ vec<L, bool, Q> Result(true);
+ for(length_t i = 0; i < L; ++i)
+ Result[i] = x[i] >= y[i];
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ vec<L, bool, Q> Result(true);
+ for(length_t i = 0; i < L; ++i)
+ Result[i] = x[i] == y[i];
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ vec<L, bool, Q> Result(true);
+ for(length_t i = 0; i < L; ++i)
+ Result[i] = x[i] != y[i];
+ return Result;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool any(vec<L, bool, Q> const& v)
+ {
+ bool Result = false;
+ for(length_t i = 0; i < L; ++i)
+ Result = Result || v[i];
+ return Result;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool all(vec<L, bool, Q> const& v)
+ {
+ bool Result = true;
+ for(length_t i = 0; i < L; ++i)
+ Result = Result && v[i];
+ return Result;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> not_(vec<L, bool, Q> const& v)
+ {
+ vec<L, bool, Q> Result(true);
+ for(length_t i = 0; i < L; ++i)
+ Result[i] = !v[i];
+ return Result;
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "func_vector_relational_simd.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/func_vector_relational_simd.inl b/3rdparty/glm/source/glm/detail/func_vector_relational_simd.inl
new file mode 100644
index 0000000..fd0fe8b
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/func_vector_relational_simd.inl
@@ -0,0 +1,6 @@
+namespace glm{
+namespace detail
+{
+
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/glm.cpp b/3rdparty/glm/source/glm/detail/glm.cpp
new file mode 100644
index 0000000..e0755bd
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/glm.cpp
@@ -0,0 +1,263 @@
+/// @ref core
+/// @file glm/glm.cpp
+
+#ifndef GLM_ENABLE_EXPERIMENTAL
+#define GLM_ENABLE_EXPERIMENTAL
+#endif
+#include <glm/gtx/dual_quaternion.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/ext/scalar_int_sized.hpp>
+#include <glm/ext/scalar_uint_sized.hpp>
+#include <glm/glm.hpp>
+
+namespace glm
+{
+// tvec1 type explicit instantiation
+template struct vec<1, uint8, lowp>;
+template struct vec<1, uint16, lowp>;
+template struct vec<1, uint32, lowp>;
+template struct vec<1, uint64, lowp>;
+template struct vec<1, int8, lowp>;
+template struct vec<1, int16, lowp>;
+template struct vec<1, int32, lowp>;
+template struct vec<1, int64, lowp>;
+template struct vec<1, float32, lowp>;
+template struct vec<1, float64, lowp>;
+
+template struct vec<1, uint8, mediump>;
+template struct vec<1, uint16, mediump>;
+template struct vec<1, uint32, mediump>;
+template struct vec<1, uint64, mediump>;
+template struct vec<1, int8, mediump>;
+template struct vec<1, int16, mediump>;
+template struct vec<1, int32, mediump>;
+template struct vec<1, int64, mediump>;
+template struct vec<1, float32, mediump>;
+template struct vec<1, float64, mediump>;
+
+template struct vec<1, uint8, highp>;
+template struct vec<1, uint16, highp>;
+template struct vec<1, uint32, highp>;
+template struct vec<1, uint64, highp>;
+template struct vec<1, int8, highp>;
+template struct vec<1, int16, highp>;
+template struct vec<1, int32, highp>;
+template struct vec<1, int64, highp>;
+template struct vec<1, float32, highp>;
+template struct vec<1, float64, highp>;
+
+// tvec2 type explicit instantiation
+template struct vec<2, uint8, lowp>;
+template struct vec<2, uint16, lowp>;
+template struct vec<2, uint32, lowp>;
+template struct vec<2, uint64, lowp>;
+template struct vec<2, int8, lowp>;
+template struct vec<2, int16, lowp>;
+template struct vec<2, int32, lowp>;
+template struct vec<2, int64, lowp>;
+template struct vec<2, float32, lowp>;
+template struct vec<2, float64, lowp>;
+
+template struct vec<2, uint8, mediump>;
+template struct vec<2, uint16, mediump>;
+template struct vec<2, uint32, mediump>;
+template struct vec<2, uint64, mediump>;
+template struct vec<2, int8, mediump>;
+template struct vec<2, int16, mediump>;
+template struct vec<2, int32, mediump>;
+template struct vec<2, int64, mediump>;
+template struct vec<2, float32, mediump>;
+template struct vec<2, float64, mediump>;
+
+template struct vec<2, uint8, highp>;
+template struct vec<2, uint16, highp>;
+template struct vec<2, uint32, highp>;
+template struct vec<2, uint64, highp>;
+template struct vec<2, int8, highp>;
+template struct vec<2, int16, highp>;
+template struct vec<2, int32, highp>;
+template struct vec<2, int64, highp>;
+template struct vec<2, float32, highp>;
+template struct vec<2, float64, highp>;
+
+// tvec3 type explicit instantiation
+template struct vec<3, uint8, lowp>;
+template struct vec<3, uint16, lowp>;
+template struct vec<3, uint32, lowp>;
+template struct vec<3, uint64, lowp>;
+template struct vec<3, int8, lowp>;
+template struct vec<3, int16, lowp>;
+template struct vec<3, int32, lowp>;
+template struct vec<3, int64, lowp>;
+template struct vec<3, float32, lowp>;
+template struct vec<3, float64, lowp>;
+
+template struct vec<3, uint8, mediump>;
+template struct vec<3, uint16, mediump>;
+template struct vec<3, uint32, mediump>;
+template struct vec<3, uint64, mediump>;
+template struct vec<3, int8, mediump>;
+template struct vec<3, int16, mediump>;
+template struct vec<3, int32, mediump>;
+template struct vec<3, int64, mediump>;
+template struct vec<3, float32, mediump>;
+template struct vec<3, float64, mediump>;
+
+template struct vec<3, uint8, highp>;
+template struct vec<3, uint16, highp>;
+template struct vec<3, uint32, highp>;
+template struct vec<3, uint64, highp>;
+template struct vec<3, int8, highp>;
+template struct vec<3, int16, highp>;
+template struct vec<3, int32, highp>;
+template struct vec<3, int64, highp>;
+template struct vec<3, float32, highp>;
+template struct vec<3, float64, highp>;
+
+// tvec4 type explicit instantiation
+template struct vec<4, uint8, lowp>;
+template struct vec<4, uint16, lowp>;
+template struct vec<4, uint32, lowp>;
+template struct vec<4, uint64, lowp>;
+template struct vec<4, int8, lowp>;
+template struct vec<4, int16, lowp>;
+template struct vec<4, int32, lowp>;
+template struct vec<4, int64, lowp>;
+template struct vec<4, float32, lowp>;
+template struct vec<4, float64, lowp>;
+
+template struct vec<4, uint8, mediump>;
+template struct vec<4, uint16, mediump>;
+template struct vec<4, uint32, mediump>;
+template struct vec<4, uint64, mediump>;
+template struct vec<4, int8, mediump>;
+template struct vec<4, int16, mediump>;
+template struct vec<4, int32, mediump>;
+template struct vec<4, int64, mediump>;
+template struct vec<4, float32, mediump>;
+template struct vec<4, float64, mediump>;
+
+template struct vec<4, uint8, highp>;
+template struct vec<4, uint16, highp>;
+template struct vec<4, uint32, highp>;
+template struct vec<4, uint64, highp>;
+template struct vec<4, int8, highp>;
+template struct vec<4, int16, highp>;
+template struct vec<4, int32, highp>;
+template struct vec<4, int64, highp>;
+template struct vec<4, float32, highp>;
+template struct vec<4, float64, highp>;
+
+// tmat2x2 type explicit instantiation
+template struct mat<2, 2, float32, lowp>;
+template struct mat<2, 2, float64, lowp>;
+
+template struct mat<2, 2, float32, mediump>;
+template struct mat<2, 2, float64, mediump>;
+
+template struct mat<2, 2, float32, highp>;
+template struct mat<2, 2, float64, highp>;
+
+// tmat2x3 type explicit instantiation
+template struct mat<2, 3, float32, lowp>;
+template struct mat<2, 3, float64, lowp>;
+
+template struct mat<2, 3, float32, mediump>;
+template struct mat<2, 3, float64, mediump>;
+
+template struct mat<2, 3, float32, highp>;
+template struct mat<2, 3, float64, highp>;
+
+// tmat2x4 type explicit instantiation
+template struct mat<2, 4, float32, lowp>;
+template struct mat<2, 4, float64, lowp>;
+
+template struct mat<2, 4, float32, mediump>;
+template struct mat<2, 4, float64, mediump>;
+
+template struct mat<2, 4, float32, highp>;
+template struct mat<2, 4, float64, highp>;
+
+// tmat3x2 type explicit instantiation
+template struct mat<3, 2, float32, lowp>;
+template struct mat<3, 2, float64, lowp>;
+
+template struct mat<3, 2, float32, mediump>;
+template struct mat<3, 2, float64, mediump>;
+
+template struct mat<3, 2, float32, highp>;
+template struct mat<3, 2, float64, highp>;
+
+// tmat3x3 type explicit instantiation
+template struct mat<3, 3, float32, lowp>;
+template struct mat<3, 3, float64, lowp>;
+
+template struct mat<3, 3, float32, mediump>;
+template struct mat<3, 3, float64, mediump>;
+
+template struct mat<3, 3, float32, highp>;
+template struct mat<3, 3, float64, highp>;
+
+// tmat3x4 type explicit instantiation
+template struct mat<3, 4, float32, lowp>;
+template struct mat<3, 4, float64, lowp>;
+
+template struct mat<3, 4, float32, mediump>;
+template struct mat<3, 4, float64, mediump>;
+
+template struct mat<3, 4, float32, highp>;
+template struct mat<3, 4, float64, highp>;
+
+// tmat4x2 type explicit instantiation
+template struct mat<4, 2, float32, lowp>;
+template struct mat<4, 2, float64, lowp>;
+
+template struct mat<4, 2, float32, mediump>;
+template struct mat<4, 2, float64, mediump>;
+
+template struct mat<4, 2, float32, highp>;
+template struct mat<4, 2, float64, highp>;
+
+// tmat4x3 type explicit instantiation
+template struct mat<4, 3, float32, lowp>;
+template struct mat<4, 3, float64, lowp>;
+
+template struct mat<4, 3, float32, mediump>;
+template struct mat<4, 3, float64, mediump>;
+
+template struct mat<4, 3, float32, highp>;
+template struct mat<4, 3, float64, highp>;
+
+// tmat4x4 type explicit instantiation
+template struct mat<4, 4, float32, lowp>;
+template struct mat<4, 4, float64, lowp>;
+
+template struct mat<4, 4, float32, mediump>;
+template struct mat<4, 4, float64, mediump>;
+
+template struct mat<4, 4, float32, highp>;
+template struct mat<4, 4, float64, highp>;
+
+// tquat type explicit instantiation
+template struct qua<float32, lowp>;
+template struct qua<float64, lowp>;
+
+template struct qua<float32, mediump>;
+template struct qua<float64, mediump>;
+
+template struct qua<float32, highp>;
+template struct qua<float64, highp>;
+
+//tdualquat type explicit instantiation
+template struct tdualquat<float32, lowp>;
+template struct tdualquat<float64, lowp>;
+
+template struct tdualquat<float32, mediump>;
+template struct tdualquat<float64, mediump>;
+
+template struct tdualquat<float32, highp>;
+template struct tdualquat<float64, highp>;
+
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/detail/qualifier.hpp b/3rdparty/glm/source/glm/detail/qualifier.hpp
new file mode 100644
index 0000000..b6c9df0
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/qualifier.hpp
@@ -0,0 +1,230 @@
+#pragma once
+
+#include "setup.hpp"
+
+namespace glm
+{
+ /// Qualify GLM types in term of alignment (packed, aligned) and precision in term of ULPs (lowp, mediump, highp)
+ enum qualifier
+ {
+ packed_highp, ///< Typed data is tightly packed in memory and operations are executed with high precision in term of ULPs
+ packed_mediump, ///< Typed data is tightly packed in memory and operations are executed with medium precision in term of ULPs for higher performance
+ packed_lowp, ///< Typed data is tightly packed in memory and operations are executed with low precision in term of ULPs to maximize performance
+
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+ aligned_highp, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs
+ aligned_mediump, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs for higher performance
+ aligned_lowp, // ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs to maximize performance
+ aligned = aligned_highp, ///< By default aligned qualifier is also high precision
+# endif
+
+ highp = packed_highp, ///< By default highp qualifier is also packed
+ mediump = packed_mediump, ///< By default mediump qualifier is also packed
+ lowp = packed_lowp, ///< By default lowp qualifier is also packed
+ packed = packed_highp, ///< By default packed qualifier is also high precision
+
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE && defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES)
+ defaultp = aligned_highp
+# else
+ defaultp = highp
+# endif
+ };
+
+ typedef qualifier precision;
+
+ template<length_t L, typename T, qualifier Q = defaultp> struct vec;
+ template<length_t C, length_t R, typename T, qualifier Q = defaultp> struct mat;
+ template<typename T, qualifier Q = defaultp> struct qua;
+
+# if GLM_HAS_TEMPLATE_ALIASES
+ template <typename T, qualifier Q = defaultp> using tvec1 = vec<1, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tvec2 = vec<2, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tvec3 = vec<3, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tvec4 = vec<4, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat2x2 = mat<2, 2, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat2x3 = mat<2, 3, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat2x4 = mat<2, 4, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat3x2 = mat<3, 2, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat3x3 = mat<3, 3, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat3x4 = mat<3, 4, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat4x2 = mat<4, 2, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat4x3 = mat<4, 3, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tmat4x4 = mat<4, 4, T, Q>;
+ template <typename T, qualifier Q = defaultp> using tquat = qua<T, Q>;
+# endif
+
+namespace detail
+{
+ template<glm::qualifier P>
+ struct is_aligned
+ {
+ static const bool value = false;
+ };
+
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+ template<>
+ struct is_aligned<glm::aligned_lowp>
+ {
+ static const bool value = true;
+ };
+
+ template<>
+ struct is_aligned<glm::aligned_mediump>
+ {
+ static const bool value = true;
+ };
+
+ template<>
+ struct is_aligned<glm::aligned_highp>
+ {
+ static const bool value = true;
+ };
+# endif
+
+ template<length_t L, typename T, bool is_aligned>
+ struct storage
+ {
+ typedef struct type {
+ T data[L];
+ } type;
+ };
+
+# if GLM_HAS_ALIGNOF
+ template<length_t L, typename T>
+ struct storage<L, T, true>
+ {
+ typedef struct alignas(L * sizeof(T)) type {
+ T data[L];
+ } type;
+ };
+
+ template<typename T>
+ struct storage<3, T, true>
+ {
+ typedef struct alignas(4 * sizeof(T)) type {
+ T data[4];
+ } type;
+ };
+# endif
+
+# if GLM_ARCH & GLM_ARCH_SSE2_BIT
+ template<>
+ struct storage<4, float, true>
+ {
+ typedef glm_f32vec4 type;
+ };
+
+ template<>
+ struct storage<4, int, true>
+ {
+ typedef glm_i32vec4 type;
+ };
+
+ template<>
+ struct storage<4, unsigned int, true>
+ {
+ typedef glm_u32vec4 type;
+ };
+
+ template<>
+ struct storage<2, double, true>
+ {
+ typedef glm_f64vec2 type;
+ };
+
+ template<>
+ struct storage<2, detail::int64, true>
+ {
+ typedef glm_i64vec2 type;
+ };
+
+ template<>
+ struct storage<2, detail::uint64, true>
+ {
+ typedef glm_u64vec2 type;
+ };
+# endif
+
+# if (GLM_ARCH & GLM_ARCH_AVX_BIT)
+ template<>
+ struct storage<4, double, true>
+ {
+ typedef glm_f64vec4 type;
+ };
+# endif
+
+# if (GLM_ARCH & GLM_ARCH_AVX2_BIT)
+ template<>
+ struct storage<4, detail::int64, true>
+ {
+ typedef glm_i64vec4 type;
+ };
+
+ template<>
+ struct storage<4, detail::uint64, true>
+ {
+ typedef glm_u64vec4 type;
+ };
+# endif
+
+# if GLM_ARCH & GLM_ARCH_NEON_BIT
+ template<>
+ struct storage<4, float, true>
+ {
+ typedef glm_f32vec4 type;
+ };
+
+ template<>
+ struct storage<4, int, true>
+ {
+ typedef glm_i32vec4 type;
+ };
+
+ template<>
+ struct storage<4, unsigned int, true>
+ {
+ typedef glm_u32vec4 type;
+ };
+# endif
+
+ enum genTypeEnum
+ {
+ GENTYPE_VEC,
+ GENTYPE_MAT,
+ GENTYPE_QUAT
+ };
+
+ template <typename genType>
+ struct genTypeTrait
+ {};
+
+ template <length_t C, length_t R, typename T>
+ struct genTypeTrait<mat<C, R, T> >
+ {
+ static const genTypeEnum GENTYPE = GENTYPE_MAT;
+ };
+
+ template<typename genType, genTypeEnum type>
+ struct init_gentype
+ {
+ };
+
+ template<typename genType>
+ struct init_gentype<genType, GENTYPE_QUAT>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity()
+ {
+ return genType(1, 0, 0, 0);
+ }
+ };
+
+ template<typename genType>
+ struct init_gentype<genType, GENTYPE_MAT>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity()
+ {
+ return genType(1);
+ }
+ };
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/setup.hpp b/3rdparty/glm/source/glm/detail/setup.hpp
new file mode 100644
index 0000000..51a6f49
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/setup.hpp
@@ -0,0 +1,1156 @@
+#ifndef GLM_SETUP_INCLUDED
+
+#include <cassert>
+#include <cstddef>
+
+#define GLM_VERSION_MAJOR 0
+#define GLM_VERSION_MINOR 9
+#define GLM_VERSION_PATCH 9
+#define GLM_VERSION_REVISION 9
+#define GLM_VERSION 999
+#define GLM_VERSION_MESSAGE "GLM: version 0.9.9.9"
+
+#define GLM_SETUP_INCLUDED GLM_VERSION
+
+///////////////////////////////////////////////////////////////////////////////////
+// Active states
+
+#define GLM_DISABLE 0
+#define GLM_ENABLE 1
+
+///////////////////////////////////////////////////////////////////////////////////
+// Messages
+
+#if defined(GLM_FORCE_MESSAGES)
+# define GLM_MESSAGES GLM_ENABLE
+#else
+# define GLM_MESSAGES GLM_DISABLE
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Detect the platform
+
+#include "../simd/platform.h"
+
+///////////////////////////////////////////////////////////////////////////////////
+// Build model
+
+#if defined(_M_ARM64) || defined(__LP64__) || defined(_M_X64) || defined(__ppc64__) || defined(__x86_64__)
+# define GLM_MODEL GLM_MODEL_64
+#elif defined(__i386__) || defined(__ppc__) || defined(__ILP32__) || defined(_M_ARM)
+# define GLM_MODEL GLM_MODEL_32
+#else
+# define GLM_MODEL GLM_MODEL_32
+#endif//
+
+#if !defined(GLM_MODEL) && GLM_COMPILER != 0
+# error "GLM_MODEL undefined, your compiler may not be supported by GLM. Add #define GLM_MODEL 0 to ignore this message."
+#endif//GLM_MODEL
+
+///////////////////////////////////////////////////////////////////////////////////
+// C++ Version
+
+// User defines: GLM_FORCE_CXX98, GLM_FORCE_CXX03, GLM_FORCE_CXX11, GLM_FORCE_CXX14, GLM_FORCE_CXX17, GLM_FORCE_CXX2A
+
+#define GLM_LANG_CXX98_FLAG (1 << 1)
+#define GLM_LANG_CXX03_FLAG (1 << 2)
+#define GLM_LANG_CXX0X_FLAG (1 << 3)
+#define GLM_LANG_CXX11_FLAG (1 << 4)
+#define GLM_LANG_CXX14_FLAG (1 << 5)
+#define GLM_LANG_CXX17_FLAG (1 << 6)
+#define GLM_LANG_CXX2A_FLAG (1 << 7)
+#define GLM_LANG_CXXMS_FLAG (1 << 8)
+#define GLM_LANG_CXXGNU_FLAG (1 << 9)
+
+#define GLM_LANG_CXX98 GLM_LANG_CXX98_FLAG
+#define GLM_LANG_CXX03 (GLM_LANG_CXX98 | GLM_LANG_CXX03_FLAG)
+#define GLM_LANG_CXX0X (GLM_LANG_CXX03 | GLM_LANG_CXX0X_FLAG)
+#define GLM_LANG_CXX11 (GLM_LANG_CXX0X | GLM_LANG_CXX11_FLAG)
+#define GLM_LANG_CXX14 (GLM_LANG_CXX11 | GLM_LANG_CXX14_FLAG)
+#define GLM_LANG_CXX17 (GLM_LANG_CXX14 | GLM_LANG_CXX17_FLAG)
+#define GLM_LANG_CXX2A (GLM_LANG_CXX17 | GLM_LANG_CXX2A_FLAG)
+#define GLM_LANG_CXXMS GLM_LANG_CXXMS_FLAG
+#define GLM_LANG_CXXGNU GLM_LANG_CXXGNU_FLAG
+
+#if (defined(_MSC_EXTENSIONS))
+# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG
+#elif ((GLM_COMPILER & (GLM_COMPILER_CLANG | GLM_COMPILER_GCC)) && (GLM_ARCH & GLM_ARCH_SIMD_BIT))
+# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG
+#else
+# define GLM_LANG_EXT 0
+#endif
+
+#if (defined(GLM_FORCE_CXX_UNKNOWN))
+# define GLM_LANG 0
+#elif defined(GLM_FORCE_CXX2A)
+# define GLM_LANG (GLM_LANG_CXX2A | GLM_LANG_EXT)
+# define GLM_LANG_STL11_FORCED
+#elif defined(GLM_FORCE_CXX17)
+# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT)
+# define GLM_LANG_STL11_FORCED
+#elif defined(GLM_FORCE_CXX14)
+# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT)
+# define GLM_LANG_STL11_FORCED
+#elif defined(GLM_FORCE_CXX11)
+# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT)
+# define GLM_LANG_STL11_FORCED
+#elif defined(GLM_FORCE_CXX03)
+# define GLM_LANG (GLM_LANG_CXX03 | GLM_LANG_EXT)
+#elif defined(GLM_FORCE_CXX98)
+# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT)
+#else
+# if GLM_COMPILER & GLM_COMPILER_VC && defined(_MSVC_LANG)
+# if GLM_COMPILER >= GLM_COMPILER_VC15_7
+# define GLM_LANG_PLATFORM _MSVC_LANG
+# elif GLM_COMPILER >= GLM_COMPILER_VC15
+# if _MSVC_LANG > 201402L
+# define GLM_LANG_PLATFORM 201402L
+# else
+# define GLM_LANG_PLATFORM _MSVC_LANG
+# endif
+# else
+# define GLM_LANG_PLATFORM 0
+# endif
+# else
+# define GLM_LANG_PLATFORM 0
+# endif
+
+# if __cplusplus > 201703L || GLM_LANG_PLATFORM > 201703L
+# define GLM_LANG (GLM_LANG_CXX2A | GLM_LANG_EXT)
+# elif __cplusplus == 201703L || GLM_LANG_PLATFORM == 201703L
+# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT)
+# elif __cplusplus == 201402L || __cplusplus == 201406L || __cplusplus == 201500L || GLM_LANG_PLATFORM == 201402L
+# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT)
+# elif __cplusplus == 201103L || GLM_LANG_PLATFORM == 201103L
+# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT)
+# elif defined(__INTEL_CXX11_MODE__) || defined(_MSC_VER) || defined(__GXX_EXPERIMENTAL_CXX0X__)
+# define GLM_LANG (GLM_LANG_CXX0X | GLM_LANG_EXT)
+# elif __cplusplus == 199711L
+# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT)
+# else
+# define GLM_LANG (0 | GLM_LANG_EXT)
+# endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Has of C++ features
+
+// http://clang.llvm.org/cxx_status.html
+// http://gcc.gnu.org/projects/cxx0x.html
+// http://msdn.microsoft.com/en-us/library/vstudio/hh567368(v=vs.120).aspx
+
+// Android has multiple STLs but C++11 STL detection doesn't always work #284 #564
+#if GLM_PLATFORM == GLM_PLATFORM_ANDROID && !defined(GLM_LANG_STL11_FORCED)
+# define GLM_HAS_CXX11_STL 0
+#elif (GLM_COMPILER & GLM_COMPILER_CUDA_RTC) == GLM_COMPILER_CUDA_RTC
+# define GLM_HAS_CXX11_STL 0
+#elif (GLM_COMPILER & GLM_COMPILER_HIP)
+# define GLM_HAS_CXX11_STL 0
+#elif GLM_COMPILER & GLM_COMPILER_CLANG
+# if (defined(_LIBCPP_VERSION) || (GLM_LANG & GLM_LANG_CXX11_FLAG) || defined(GLM_LANG_STL11_FORCED))
+# define GLM_HAS_CXX11_STL 1
+# else
+# define GLM_HAS_CXX11_STL 0
+# endif
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_CXX11_STL 1
+#else
+# define GLM_HAS_CXX11_STL ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC48)) || \
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \
+ ((GLM_PLATFORM != GLM_PLATFORM_WINDOWS) && (GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15))))
+#endif
+
+// N1720
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_STATIC_ASSERT __has_feature(cxx_static_assert)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_STATIC_ASSERT 1
+#else
+# define GLM_HAS_STATIC_ASSERT ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_VC)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP))))
+#endif
+
+// N1988
+#if GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_EXTENDED_INTEGER_TYPE 1
+#else
+# define GLM_HAS_EXTENDED_INTEGER_TYPE (\
+ ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_VC)) || \
+ ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CLANG)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP)))
+#endif
+
+// N2672 Initializer lists http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_INITIALIZER_LISTS __has_feature(cxx_generalized_initializers)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_INITIALIZER_LISTS 1
+#else
+# define GLM_HAS_INITIALIZER_LISTS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \
+ ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP))))
+#endif
+
+// N2544 Unrestricted unions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_UNRESTRICTED_UNIONS __has_feature(cxx_unrestricted_unions)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_UNRESTRICTED_UNIONS 1
+#else
+# define GLM_HAS_UNRESTRICTED_UNIONS (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ (GLM_COMPILER & GLM_COMPILER_VC) || \
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP)))
+#endif
+
+// N2346
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_DEFAULTED_FUNCTIONS __has_feature(cxx_defaulted_functions)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_DEFAULTED_FUNCTIONS 1
+#else
+# define GLM_HAS_DEFAULTED_FUNCTIONS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \
+ ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \
+ (GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP)))
+#endif
+
+// N2118
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_RVALUE_REFERENCES __has_feature(cxx_rvalue_references)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_RVALUE_REFERENCES 1
+#else
+# define GLM_HAS_RVALUE_REFERENCES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_VC)) || \
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP))))
+#endif
+
+// N2437 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS __has_feature(cxx_explicit_conversions)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS 1
+#else
+# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP))))
+#endif
+
+// N2258 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_TEMPLATE_ALIASES __has_feature(cxx_alias_templates)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_TEMPLATE_ALIASES 1
+#else
+# define GLM_HAS_TEMPLATE_ALIASES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP))))
+#endif
+
+// N2930 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2009/n2930.html
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_RANGE_FOR __has_feature(cxx_range_for)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_RANGE_FOR 1
+#else
+# define GLM_HAS_RANGE_FOR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \
+ ((GLM_COMPILER & GLM_COMPILER_VC)) || \
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP))))
+#endif
+
+// N2341 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf
+#if GLM_COMPILER & GLM_COMPILER_CLANG
+# define GLM_HAS_ALIGNOF __has_feature(cxx_alignas)
+#elif GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_ALIGNOF 1
+#else
+# define GLM_HAS_ALIGNOF ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)) || \
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14)) || \
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP))))
+#endif
+
+// N2235 Generalized Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf
+// N3652 Extended Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3652.html
+#if (GLM_ARCH & GLM_ARCH_SIMD_BIT) // Compiler SIMD intrinsics don't support constexpr...
+# define GLM_HAS_CONSTEXPR 0
+#elif (GLM_COMPILER & GLM_COMPILER_CLANG)
+# define GLM_HAS_CONSTEXPR __has_feature(cxx_relaxed_constexpr)
+#elif (GLM_LANG & GLM_LANG_CXX14_FLAG)
+# define GLM_HAS_CONSTEXPR 1
+#else
+# define GLM_HAS_CONSTEXPR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && GLM_HAS_INITIALIZER_LISTS && (\
+ ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL17)) || \
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15))))
+#endif
+
+#if GLM_HAS_CONSTEXPR
+# define GLM_CONSTEXPR constexpr
+#else
+# define GLM_CONSTEXPR
+#endif
+
+//
+#if GLM_HAS_CONSTEXPR
+# if (GLM_COMPILER & GLM_COMPILER_CLANG)
+# if __has_feature(cxx_if_constexpr)
+# define GLM_HAS_IF_CONSTEXPR 1
+# else
+# define GLM_HAS_IF_CONSTEXPR 0
+# endif
+# elif (GLM_LANG & GLM_LANG_CXX17_FLAG)
+# define GLM_HAS_IF_CONSTEXPR 1
+# else
+# define GLM_HAS_IF_CONSTEXPR 0
+# endif
+#else
+# define GLM_HAS_IF_CONSTEXPR 0
+#endif
+
+#if GLM_HAS_IF_CONSTEXPR
+# define GLM_IF_CONSTEXPR if constexpr
+#else
+# define GLM_IF_CONSTEXPR if
+#endif
+
+//
+#if GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_ASSIGNABLE 1
+#else
+# define GLM_HAS_ASSIGNABLE ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \
+ ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC49))))
+#endif
+
+//
+#define GLM_HAS_TRIVIAL_QUERIES 0
+
+//
+#if GLM_LANG & GLM_LANG_CXX11_FLAG
+# define GLM_HAS_MAKE_SIGNED 1
+#else
+# define GLM_HAS_MAKE_SIGNED ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \
+ ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \
+ ((GLM_COMPILER & GLM_COMPILER_HIP))))
+#endif
+
+//
+#if defined(GLM_FORCE_INTRINSICS)
+# define GLM_HAS_BITSCAN_WINDOWS ((GLM_PLATFORM & GLM_PLATFORM_WINDOWS) && (\
+ ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \
+ ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14) && (GLM_ARCH & GLM_ARCH_X86_BIT))))
+#else
+# define GLM_HAS_BITSCAN_WINDOWS 0
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// OpenMP
+#ifdef _OPENMP
+# if GLM_COMPILER & GLM_COMPILER_GCC
+# if GLM_COMPILER >= GLM_COMPILER_GCC61
+# define GLM_HAS_OPENMP 45
+# elif GLM_COMPILER >= GLM_COMPILER_GCC49
+# define GLM_HAS_OPENMP 40
+# elif GLM_COMPILER >= GLM_COMPILER_GCC47
+# define GLM_HAS_OPENMP 31
+# else
+# define GLM_HAS_OPENMP 0
+# endif
+# elif GLM_COMPILER & GLM_COMPILER_CLANG
+# if GLM_COMPILER >= GLM_COMPILER_CLANG38
+# define GLM_HAS_OPENMP 31
+# else
+# define GLM_HAS_OPENMP 0
+# endif
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# define GLM_HAS_OPENMP 20
+# elif GLM_COMPILER & GLM_COMPILER_INTEL
+# if GLM_COMPILER >= GLM_COMPILER_INTEL16
+# define GLM_HAS_OPENMP 40
+# else
+# define GLM_HAS_OPENMP 0
+# endif
+# else
+# define GLM_HAS_OPENMP 0
+# endif
+#else
+# define GLM_HAS_OPENMP 0
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// nullptr
+
+#if GLM_LANG & GLM_LANG_CXX0X_FLAG
+# define GLM_CONFIG_NULLPTR GLM_ENABLE
+#else
+# define GLM_CONFIG_NULLPTR GLM_DISABLE
+#endif
+
+#if GLM_CONFIG_NULLPTR == GLM_ENABLE
+# define GLM_NULLPTR nullptr
+#else
+# define GLM_NULLPTR 0
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Static assert
+
+#if GLM_HAS_STATIC_ASSERT
+# define GLM_STATIC_ASSERT(x, message) static_assert(x, message)
+#elif GLM_COMPILER & GLM_COMPILER_VC
+# define GLM_STATIC_ASSERT(x, message) typedef char __CASSERT__##__LINE__[(x) ? 1 : -1]
+#else
+# define GLM_STATIC_ASSERT(x, message) assert(x)
+#endif//GLM_LANG
+
+///////////////////////////////////////////////////////////////////////////////////
+// Qualifiers
+
+#if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)
+# define GLM_CUDA_FUNC_DEF __device__ __host__
+# define GLM_CUDA_FUNC_DECL __device__ __host__
+#else
+# define GLM_CUDA_FUNC_DEF
+# define GLM_CUDA_FUNC_DECL
+#endif
+
+#if defined(GLM_FORCE_INLINE)
+# if GLM_COMPILER & GLM_COMPILER_VC
+# define GLM_INLINE __forceinline
+# define GLM_NEVER_INLINE __declspec((noinline))
+# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG)
+# define GLM_INLINE inline __attribute__((__always_inline__))
+# define GLM_NEVER_INLINE __attribute__((__noinline__))
+# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)
+# define GLM_INLINE __forceinline__
+# define GLM_NEVER_INLINE __noinline__
+# else
+# define GLM_INLINE inline
+# define GLM_NEVER_INLINE
+# endif//GLM_COMPILER
+#else
+# define GLM_INLINE inline
+# define GLM_NEVER_INLINE
+#endif//defined(GLM_FORCE_INLINE)
+
+#define GLM_FUNC_DECL GLM_CUDA_FUNC_DECL
+#define GLM_FUNC_QUALIFIER GLM_CUDA_FUNC_DEF GLM_INLINE
+
+///////////////////////////////////////////////////////////////////////////////////
+// Swizzle operators
+
+// User defines: GLM_FORCE_SWIZZLE
+
+#define GLM_SWIZZLE_DISABLED 0
+#define GLM_SWIZZLE_OPERATOR 1
+#define GLM_SWIZZLE_FUNCTION 2
+
+#if defined(GLM_SWIZZLE)
+# pragma message("GLM: GLM_SWIZZLE is deprecated, use GLM_FORCE_SWIZZLE instead.")
+# define GLM_FORCE_SWIZZLE
+#endif
+
+#if defined(GLM_FORCE_SWIZZLE) && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && !defined(GLM_FORCE_XYZW_ONLY)
+# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_OPERATOR
+#elif defined(GLM_FORCE_SWIZZLE)
+# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_FUNCTION
+#else
+# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_DISABLED
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Allows using not basic types as genType
+
+// #define GLM_FORCE_UNRESTRICTED_GENTYPE
+
+#ifdef GLM_FORCE_UNRESTRICTED_GENTYPE
+# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_ENABLE
+#else
+# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_DISABLE
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Clip control, define GLM_FORCE_DEPTH_ZERO_TO_ONE before including GLM
+// to use a clip space between 0 to 1.
+// Coordinate system, define GLM_FORCE_LEFT_HANDED before including GLM
+// to use left handed coordinate system by default.
+
+#define GLM_CLIP_CONTROL_ZO_BIT (1 << 0) // ZERO_TO_ONE
+#define GLM_CLIP_CONTROL_NO_BIT (1 << 1) // NEGATIVE_ONE_TO_ONE
+#define GLM_CLIP_CONTROL_LH_BIT (1 << 2) // LEFT_HANDED, For DirectX, Metal, Vulkan
+#define GLM_CLIP_CONTROL_RH_BIT (1 << 3) // RIGHT_HANDED, For OpenGL, default in GLM
+
+#define GLM_CLIP_CONTROL_LH_ZO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_ZO_BIT)
+#define GLM_CLIP_CONTROL_LH_NO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_NO_BIT)
+#define GLM_CLIP_CONTROL_RH_ZO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_ZO_BIT)
+#define GLM_CLIP_CONTROL_RH_NO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_NO_BIT)
+
+#ifdef GLM_FORCE_DEPTH_ZERO_TO_ONE
+# ifdef GLM_FORCE_LEFT_HANDED
+# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_ZO
+# else
+# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_ZO
+# endif
+#else
+# ifdef GLM_FORCE_LEFT_HANDED
+# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_NO
+# else
+# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_NO
+# endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Qualifiers
+
+#if (GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))
+# define GLM_DEPRECATED __declspec(deprecated)
+# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef __declspec(align(alignment)) type name
+#elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG | GLM_COMPILER_INTEL)
+# define GLM_DEPRECATED __attribute__((__deprecated__))
+# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __attribute__((aligned(alignment)))
+#elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)
+# define GLM_DEPRECATED
+# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __align__(x)
+#else
+# define GLM_DEPRECATED
+# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GLM_FORCE_EXPLICIT_CTOR
+# define GLM_EXPLICIT explicit
+#else
+# define GLM_EXPLICIT
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// SYCL
+
+#if GLM_COMPILER==GLM_COMPILER_SYCL
+
+#include <CL/sycl.hpp>
+#include <limits>
+
+namespace glm {
+namespace std {
+ // Import SYCL's functions into the namespace glm::std to force their usages.
+ // It's important to use the math built-in function (sin, exp, ...)
+ // of SYCL instead the std ones.
+ using namespace cl::sycl;
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // Import some "harmless" std's stuffs used by glm into
+ // the new glm::std namespace.
+ template<typename T>
+ using numeric_limits = ::std::numeric_limits<T>;
+
+ using ::std::size_t;
+
+ using ::std::uint8_t;
+ using ::std::uint16_t;
+ using ::std::uint32_t;
+ using ::std::uint64_t;
+
+ using ::std::int8_t;
+ using ::std::int16_t;
+ using ::std::int32_t;
+ using ::std::int64_t;
+
+ using ::std::make_unsigned;
+ ///////////////////////////////////////////////////////////////////////////////
+} //namespace std
+} //namespace glm
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////////
+// Length type: all length functions returns a length_t type.
+// When GLM_FORCE_SIZE_T_LENGTH is defined, length_t is a typedef of size_t otherwise
+// length_t is a typedef of int like GLSL defines it.
+
+#define GLM_LENGTH_INT 1
+#define GLM_LENGTH_SIZE_T 2
+
+#ifdef GLM_FORCE_SIZE_T_LENGTH
+# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_SIZE_T
+#else
+# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_INT
+#endif
+
+namespace glm
+{
+ using std::size_t;
+# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T
+ typedef size_t length_t;
+# else
+ typedef int length_t;
+# endif
+}//namespace glm
+
+///////////////////////////////////////////////////////////////////////////////////
+// constexpr
+
+#if GLM_HAS_CONSTEXPR
+# define GLM_CONFIG_CONSTEXP GLM_ENABLE
+
+ namespace glm
+ {
+ template<typename T, std::size_t N>
+ constexpr std::size_t countof(T const (&)[N])
+ {
+ return N;
+ }
+ }//namespace glm
+# define GLM_COUNTOF(arr) glm::countof(arr)
+#elif defined(_MSC_VER)
+# define GLM_CONFIG_CONSTEXP GLM_DISABLE
+
+# define GLM_COUNTOF(arr) _countof(arr)
+#else
+# define GLM_CONFIG_CONSTEXP GLM_DISABLE
+
+# define GLM_COUNTOF(arr) sizeof(arr) / sizeof(arr[0])
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// uint
+
+namespace glm{
+namespace detail
+{
+ template<typename T>
+ struct is_int
+ {
+ enum test {value = 0};
+ };
+
+ template<>
+ struct is_int<unsigned int>
+ {
+ enum test {value = ~0};
+ };
+
+ template<>
+ struct is_int<signed int>
+ {
+ enum test {value = ~0};
+ };
+}//namespace detail
+
+ typedef unsigned int uint;
+}//namespace glm
+
+///////////////////////////////////////////////////////////////////////////////////
+// 64-bit int
+
+#if GLM_HAS_EXTENDED_INTEGER_TYPE
+# include <cstdint>
+#endif
+
+namespace glm{
+namespace detail
+{
+# if GLM_HAS_EXTENDED_INTEGER_TYPE
+ typedef std::uint64_t uint64;
+ typedef std::int64_t int64;
+# elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) // C99 detected, 64 bit types available
+ typedef uint64_t uint64;
+ typedef int64_t int64;
+# elif GLM_COMPILER & GLM_COMPILER_VC
+ typedef unsigned __int64 uint64;
+ typedef signed __int64 int64;
+# elif GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic ignored "-Wlong-long"
+ __extension__ typedef unsigned long long uint64;
+ __extension__ typedef signed long long int64;
+# elif (GLM_COMPILER & GLM_COMPILER_CLANG)
+# pragma clang diagnostic ignored "-Wc++11-long-long"
+ typedef unsigned long long uint64;
+ typedef signed long long int64;
+# else//unknown compiler
+ typedef unsigned long long uint64;
+ typedef signed long long int64;
+# endif
+}//namespace detail
+}//namespace glm
+
+///////////////////////////////////////////////////////////////////////////////////
+// make_unsigned
+
+#if GLM_HAS_MAKE_SIGNED
+# include <type_traits>
+
+namespace glm{
+namespace detail
+{
+ using std::make_unsigned;
+}//namespace detail
+}//namespace glm
+
+#else
+
+namespace glm{
+namespace detail
+{
+ template<typename genType>
+ struct make_unsigned
+ {};
+
+ template<>
+ struct make_unsigned<char>
+ {
+ typedef unsigned char type;
+ };
+
+ template<>
+ struct make_unsigned<signed char>
+ {
+ typedef unsigned char type;
+ };
+
+ template<>
+ struct make_unsigned<short>
+ {
+ typedef unsigned short type;
+ };
+
+ template<>
+ struct make_unsigned<int>
+ {
+ typedef unsigned int type;
+ };
+
+ template<>
+ struct make_unsigned<long>
+ {
+ typedef unsigned long type;
+ };
+
+ template<>
+ struct make_unsigned<int64>
+ {
+ typedef uint64 type;
+ };
+
+ template<>
+ struct make_unsigned<unsigned char>
+ {
+ typedef unsigned char type;
+ };
+
+ template<>
+ struct make_unsigned<unsigned short>
+ {
+ typedef unsigned short type;
+ };
+
+ template<>
+ struct make_unsigned<unsigned int>
+ {
+ typedef unsigned int type;
+ };
+
+ template<>
+ struct make_unsigned<unsigned long>
+ {
+ typedef unsigned long type;
+ };
+
+ template<>
+ struct make_unsigned<uint64>
+ {
+ typedef uint64 type;
+ };
+}//namespace detail
+}//namespace glm
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Only use x, y, z, w as vector type components
+
+#ifdef GLM_FORCE_XYZW_ONLY
+# define GLM_CONFIG_XYZW_ONLY GLM_ENABLE
+#else
+# define GLM_CONFIG_XYZW_ONLY GLM_DISABLE
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Configure the use of defaulted initialized types
+
+#define GLM_CTOR_INIT_DISABLE 0
+#define GLM_CTOR_INITIALIZER_LIST 1
+#define GLM_CTOR_INITIALISATION 2
+
+#if defined(GLM_FORCE_CTOR_INIT) && GLM_HAS_INITIALIZER_LISTS
+# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALIZER_LIST
+#elif defined(GLM_FORCE_CTOR_INIT) && !GLM_HAS_INITIALIZER_LISTS
+# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALISATION
+#else
+# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INIT_DISABLE
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Use SIMD instruction sets
+
+#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (GLM_ARCH & GLM_ARCH_SIMD_BIT)
+# define GLM_CONFIG_SIMD GLM_ENABLE
+#else
+# define GLM_CONFIG_SIMD GLM_DISABLE
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Configure the use of defaulted function
+
+#if GLM_HAS_DEFAULTED_FUNCTIONS
+# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_ENABLE
+# define GLM_DEFAULT = default
+#else
+# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_DISABLE
+# define GLM_DEFAULT
+#endif
+
+#if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INIT_DISABLE && GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+# define GLM_CONFIG_DEFAULTED_DEFAULT_CTOR GLM_ENABLE
+# define GLM_DEFAULT_CTOR GLM_DEFAULT
+#else
+# define GLM_CONFIG_DEFAULTED_DEFAULT_CTOR GLM_DISABLE
+# define GLM_DEFAULT_CTOR
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Configure the use of aligned gentypes
+
+#ifdef GLM_FORCE_ALIGNED // Legacy define
+# define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES
+#endif
+
+#ifdef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES
+# define GLM_FORCE_ALIGNED_GENTYPES
+#endif
+
+#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (defined(GLM_FORCE_ALIGNED_GENTYPES) || (GLM_CONFIG_SIMD == GLM_ENABLE))
+# define GLM_CONFIG_ALIGNED_GENTYPES GLM_ENABLE
+#else
+# define GLM_CONFIG_ALIGNED_GENTYPES GLM_DISABLE
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Configure the use of anonymous structure as implementation detail
+
+#if ((GLM_CONFIG_SIMD == GLM_ENABLE) || (GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR) || (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE))
+# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_ENABLE
+#else
+# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_DISABLE
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Silent warnings
+
+#ifdef GLM_FORCE_SILENT_WARNINGS
+# define GLM_SILENT_WARNINGS GLM_ENABLE
+#else
+# define GLM_SILENT_WARNINGS GLM_DISABLE
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Precision
+
+#define GLM_HIGHP 1
+#define GLM_MEDIUMP 2
+#define GLM_LOWP 3
+
+#if defined(GLM_FORCE_PRECISION_HIGHP_BOOL) || defined(GLM_PRECISION_HIGHP_BOOL)
+# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP
+#elif defined(GLM_FORCE_PRECISION_MEDIUMP_BOOL) || defined(GLM_PRECISION_MEDIUMP_BOOL)
+# define GLM_CONFIG_PRECISION_BOOL GLM_MEDIUMP
+#elif defined(GLM_FORCE_PRECISION_LOWP_BOOL) || defined(GLM_PRECISION_LOWP_BOOL)
+# define GLM_CONFIG_PRECISION_BOOL GLM_LOWP
+#else
+# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP
+#endif
+
+#if defined(GLM_FORCE_PRECISION_HIGHP_INT) || defined(GLM_PRECISION_HIGHP_INT)
+# define GLM_CONFIG_PRECISION_INT GLM_HIGHP
+#elif defined(GLM_FORCE_PRECISION_MEDIUMP_INT) || defined(GLM_PRECISION_MEDIUMP_INT)
+# define GLM_CONFIG_PRECISION_INT GLM_MEDIUMP
+#elif defined(GLM_FORCE_PRECISION_LOWP_INT) || defined(GLM_PRECISION_LOWP_INT)
+# define GLM_CONFIG_PRECISION_INT GLM_LOWP
+#else
+# define GLM_CONFIG_PRECISION_INT GLM_HIGHP
+#endif
+
+#if defined(GLM_FORCE_PRECISION_HIGHP_UINT) || defined(GLM_PRECISION_HIGHP_UINT)
+# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP
+#elif defined(GLM_FORCE_PRECISION_MEDIUMP_UINT) || defined(GLM_PRECISION_MEDIUMP_UINT)
+# define GLM_CONFIG_PRECISION_UINT GLM_MEDIUMP
+#elif defined(GLM_FORCE_PRECISION_LOWP_UINT) || defined(GLM_PRECISION_LOWP_UINT)
+# define GLM_CONFIG_PRECISION_UINT GLM_LOWP
+#else
+# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP
+#endif
+
+#if defined(GLM_FORCE_PRECISION_HIGHP_FLOAT) || defined(GLM_PRECISION_HIGHP_FLOAT)
+# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP
+#elif defined(GLM_FORCE_PRECISION_MEDIUMP_FLOAT) || defined(GLM_PRECISION_MEDIUMP_FLOAT)
+# define GLM_CONFIG_PRECISION_FLOAT GLM_MEDIUMP
+#elif defined(GLM_FORCE_PRECISION_LOWP_FLOAT) || defined(GLM_PRECISION_LOWP_FLOAT)
+# define GLM_CONFIG_PRECISION_FLOAT GLM_LOWP
+#else
+# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP
+#endif
+
+#if defined(GLM_FORCE_PRECISION_HIGHP_DOUBLE) || defined(GLM_PRECISION_HIGHP_DOUBLE)
+# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP
+#elif defined(GLM_FORCE_PRECISION_MEDIUMP_DOUBLE) || defined(GLM_PRECISION_MEDIUMP_DOUBLE)
+# define GLM_CONFIG_PRECISION_DOUBLE GLM_MEDIUMP
+#elif defined(GLM_FORCE_PRECISION_LOWP_DOUBLE) || defined(GLM_PRECISION_LOWP_DOUBLE)
+# define GLM_CONFIG_PRECISION_DOUBLE GLM_LOWP
+#else
+# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////
+// Check inclusions of different versions of GLM
+
+#elif ((GLM_SETUP_INCLUDED != GLM_VERSION) && !defined(GLM_FORCE_IGNORE_VERSION))
+# error "GLM error: A different version of GLM is already included. Define GLM_FORCE_IGNORE_VERSION before including GLM headers to ignore this error."
+#elif GLM_SETUP_INCLUDED == GLM_VERSION
+
+///////////////////////////////////////////////////////////////////////////////////
+// Messages
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_DISPLAYED)
+# define GLM_MESSAGE_DISPLAYED
+# define GLM_STR_HELPER(x) #x
+# define GLM_STR(x) GLM_STR_HELPER(x)
+
+ // Report GLM version
+# pragma message (GLM_STR(GLM_VERSION_MESSAGE))
+
+ // Report C++ language
+# if (GLM_LANG & GLM_LANG_CXX2A_FLAG) && (GLM_LANG & GLM_LANG_EXT)
+# pragma message("GLM: C++ 2A with extensions")
+# elif (GLM_LANG & GLM_LANG_CXX2A_FLAG)
+# pragma message("GLM: C++ 2A")
+# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) && (GLM_LANG & GLM_LANG_EXT)
+# pragma message("GLM: C++ 17 with extensions")
+# elif (GLM_LANG & GLM_LANG_CXX17_FLAG)
+# pragma message("GLM: C++ 17")
+# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) && (GLM_LANG & GLM_LANG_EXT)
+# pragma message("GLM: C++ 14 with extensions")
+# elif (GLM_LANG & GLM_LANG_CXX14_FLAG)
+# pragma message("GLM: C++ 14")
+# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) && (GLM_LANG & GLM_LANG_EXT)
+# pragma message("GLM: C++ 11 with extensions")
+# elif (GLM_LANG & GLM_LANG_CXX11_FLAG)
+# pragma message("GLM: C++ 11")
+# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_LANG & GLM_LANG_EXT)
+# pragma message("GLM: C++ 0x with extensions")
+# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG)
+# pragma message("GLM: C++ 0x")
+# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) && (GLM_LANG & GLM_LANG_EXT)
+# pragma message("GLM: C++ 03 with extensions")
+# elif (GLM_LANG & GLM_LANG_CXX03_FLAG)
+# pragma message("GLM: C++ 03")
+# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) && (GLM_LANG & GLM_LANG_EXT)
+# pragma message("GLM: C++ 98 with extensions")
+# elif (GLM_LANG & GLM_LANG_CXX98_FLAG)
+# pragma message("GLM: C++ 98")
+# else
+# pragma message("GLM: C++ language undetected")
+# endif//GLM_LANG
+
+ // Report compiler detection
+# if GLM_COMPILER & GLM_COMPILER_CUDA
+# pragma message("GLM: CUDA compiler detected")
+# elif GLM_COMPILER & GLM_COMPILER_HIP
+# pragma message("GLM: HIP compiler detected")
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma message("GLM: Visual C++ compiler detected")
+# elif GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma message("GLM: Clang compiler detected")
+# elif GLM_COMPILER & GLM_COMPILER_INTEL
+# pragma message("GLM: Intel Compiler detected")
+# elif GLM_COMPILER & GLM_COMPILER_GCC
+# pragma message("GLM: GCC compiler detected")
+# else
+# pragma message("GLM: Compiler not detected")
+# endif
+
+ // Report build target
+# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: x86 64 bits with AVX2 instruction set build target")
+# elif (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: x86 32 bits with AVX2 instruction set build target")
+
+# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: x86 64 bits with AVX instruction set build target")
+# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: x86 32 bits with AVX instruction set build target")
+
+# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: x86 64 bits with SSE4.2 instruction set build target")
+# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: x86 32 bits with SSE4.2 instruction set build target")
+
+# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: x86 64 bits with SSE4.1 instruction set build target")
+# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: x86 32 bits with SSE4.1 instruction set build target")
+
+# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: x86 64 bits with SSSE3 instruction set build target")
+# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: x86 32 bits with SSSE3 instruction set build target")
+
+# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: x86 64 bits with SSE3 instruction set build target")
+# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: x86 32 bits with SSE3 instruction set build target")
+
+# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: x86 64 bits with SSE2 instruction set build target")
+# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: x86 32 bits with SSE2 instruction set build target")
+
+# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: x86 64 bits build target")
+# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: x86 32 bits build target")
+
+# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: ARM 64 bits with Neon instruction set build target")
+# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: ARM 32 bits with Neon instruction set build target")
+
+# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: ARM 64 bits build target")
+# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: ARM 32 bits build target")
+
+# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: MIPS 64 bits build target")
+# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: MIPS 32 bits build target")
+
+# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_64)
+# pragma message("GLM: PowerPC 64 bits build target")
+# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_32)
+# pragma message("GLM: PowerPC 32 bits build target")
+# else
+# pragma message("GLM: Unknown build target")
+# endif//GLM_ARCH
+
+ // Report platform name
+# if(GLM_PLATFORM & GLM_PLATFORM_QNXNTO)
+# pragma message("GLM: QNX platform detected")
+//# elif(GLM_PLATFORM & GLM_PLATFORM_IOS)
+//# pragma message("GLM: iOS platform detected")
+# elif(GLM_PLATFORM & GLM_PLATFORM_APPLE)
+# pragma message("GLM: Apple platform detected")
+# elif(GLM_PLATFORM & GLM_PLATFORM_WINCE)
+# pragma message("GLM: WinCE platform detected")
+# elif(GLM_PLATFORM & GLM_PLATFORM_WINDOWS)
+# pragma message("GLM: Windows platform detected")
+# elif(GLM_PLATFORM & GLM_PLATFORM_CHROME_NACL)
+# pragma message("GLM: Native Client detected")
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+# pragma message("GLM: Android platform detected")
+# elif(GLM_PLATFORM & GLM_PLATFORM_LINUX)
+# pragma message("GLM: Linux platform detected")
+# elif(GLM_PLATFORM & GLM_PLATFORM_UNIX)
+# pragma message("GLM: UNIX platform detected")
+# elif(GLM_PLATFORM & GLM_PLATFORM_UNKNOWN)
+# pragma message("GLM: platform unknown")
+# else
+# pragma message("GLM: platform not detected")
+# endif
+
+ // Report whether only xyzw component are used
+# if defined GLM_FORCE_XYZW_ONLY
+# pragma message("GLM: GLM_FORCE_XYZW_ONLY is defined. Only x, y, z and w component are available in vector type. This define disables swizzle operators and SIMD instruction sets.")
+# endif
+
+ // Report swizzle operator support
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling operators enabled.")
+# elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling functions enabled. Enable compiler C++ language extensions to enable swizzle operators.")
+# else
+# pragma message("GLM: GLM_FORCE_SWIZZLE is undefined. swizzling functions or operators are disabled.")
+# endif
+
+ // Report .length() type
+# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T
+# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is defined. .length() returns a glm::length_t, a typedef of std::size_t.")
+# else
+# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is undefined. .length() returns a glm::length_t, a typedef of int following GLSL.")
+# endif
+
+# if GLM_CONFIG_UNRESTRICTED_GENTYPE == GLM_ENABLE
+# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is defined. Removes GLSL restrictions on valid function genTypes.")
+# else
+# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is undefined. Follows strictly GLSL on valid function genTypes.")
+# endif
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is defined. Ignores C++ warnings from using C++ language extensions.")
+# else
+# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is undefined. Shows C++ warnings from using C++ language extensions.")
+# endif
+
+# ifdef GLM_FORCE_SINGLE_ONLY
+# pragma message("GLM: GLM_FORCE_SINGLE_ONLY is defined. Using only single precision floating-point types.")
+# endif
+
+# if defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE)
+# undef GLM_FORCE_ALIGNED_GENTYPES
+# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined, allowing aligned types. This prevents the use of C++ constexpr.")
+# elif defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE)
+# undef GLM_FORCE_ALIGNED_GENTYPES
+# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.")
+# endif
+
+# if defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES)
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE
+# undef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES
+# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.")
+# elif GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined. All gentypes (e.g. vec3) will be aligned and padded by default.")
+# endif
+# endif
+
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is defined. Using zero to one depth clip space.")
+# else
+# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is undefined. Using negative one to one depth clip space.")
+# endif
+
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+# pragma message("GLM: GLM_FORCE_LEFT_HANDED is defined. Using left handed coordinate system.")
+# else
+# pragma message("GLM: GLM_FORCE_LEFT_HANDED is undefined. Using right handed coordinate system.")
+# endif
+#endif//GLM_MESSAGES
+
+#endif//GLM_SETUP_INCLUDED
diff --git a/3rdparty/glm/source/glm/detail/type_float.hpp b/3rdparty/glm/source/glm/detail/type_float.hpp
new file mode 100644
index 0000000..c8037eb
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_float.hpp
@@ -0,0 +1,68 @@
+#pragma once
+
+#include "setup.hpp"
+
+#if GLM_COMPILER == GLM_COMPILER_VC12
+# pragma warning(push)
+# pragma warning(disable: 4512) // assignment operator could not be generated
+#endif
+
+namespace glm{
+namespace detail
+{
+ template <typename T>
+ union float_t
+ {};
+
+ // https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+ template <>
+ union float_t<float>
+ {
+ typedef int int_type;
+ typedef float float_type;
+
+ GLM_CONSTEXPR float_t(float_type Num = 0.0f) : f(Num) {}
+
+ GLM_CONSTEXPR float_t& operator=(float_t const& x)
+ {
+ f = x.f;
+ return *this;
+ }
+
+ // Portable extraction of components.
+ GLM_CONSTEXPR bool negative() const { return i < 0; }
+ GLM_CONSTEXPR int_type mantissa() const { return i & ((1 << 23) - 1); }
+ GLM_CONSTEXPR int_type exponent() const { return (i >> 23) & ((1 << 8) - 1); }
+
+ int_type i;
+ float_type f;
+ };
+
+ template <>
+ union float_t<double>
+ {
+ typedef detail::int64 int_type;
+ typedef double float_type;
+
+ GLM_CONSTEXPR float_t(float_type Num = static_cast<float_type>(0)) : f(Num) {}
+
+ GLM_CONSTEXPR float_t& operator=(float_t const& x)
+ {
+ f = x.f;
+ return *this;
+ }
+
+ // Portable extraction of components.
+ GLM_CONSTEXPR bool negative() const { return i < 0; }
+ GLM_CONSTEXPR int_type mantissa() const { return i & ((int_type(1) << 52) - 1); }
+ GLM_CONSTEXPR int_type exponent() const { return (i >> 52) & ((int_type(1) << 11) - 1); }
+
+ int_type i;
+ float_type f;
+ };
+}//namespace detail
+}//namespace glm
+
+#if GLM_COMPILER == GLM_COMPILER_VC12
+# pragma warning(pop)
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_half.hpp b/3rdparty/glm/source/glm/detail/type_half.hpp
new file mode 100644
index 0000000..40b8bec
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_half.hpp
@@ -0,0 +1,16 @@
+#pragma once
+
+#include "setup.hpp"
+
+namespace glm{
+namespace detail
+{
+ typedef short hdata;
+
+ GLM_FUNC_DECL float toFloat32(hdata value);
+ GLM_FUNC_DECL hdata toFloat16(float const& value);
+
+}//namespace detail
+}//namespace glm
+
+#include "type_half.inl"
diff --git a/3rdparty/glm/source/glm/detail/type_half.inl b/3rdparty/glm/source/glm/detail/type_half.inl
new file mode 100644
index 0000000..5d239cf
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_half.inl
@@ -0,0 +1,241 @@
+namespace glm{
+namespace detail
+{
+ GLM_FUNC_QUALIFIER float overflow()
+ {
+ volatile float f = 1e10;
+
+ for(int i = 0; i < 10; ++i)
+ f = f * f; // this will overflow before the for loop terminates
+ return f;
+ }
+
+ union uif32
+ {
+ GLM_FUNC_QUALIFIER uif32() :
+ i(0)
+ {}
+
+ GLM_FUNC_QUALIFIER uif32(float f_) :
+ f(f_)
+ {}
+
+ GLM_FUNC_QUALIFIER uif32(unsigned int i_) :
+ i(i_)
+ {}
+
+ float f;
+ unsigned int i;
+ };
+
+ GLM_FUNC_QUALIFIER float toFloat32(hdata value)
+ {
+ int s = (value >> 15) & 0x00000001;
+ int e = (value >> 10) & 0x0000001f;
+ int m = value & 0x000003ff;
+
+ if(e == 0)
+ {
+ if(m == 0)
+ {
+ //
+ // Plus or minus zero
+ //
+
+ detail::uif32 result;
+ result.i = static_cast<unsigned int>(s << 31);
+ return result.f;
+ }
+ else
+ {
+ //
+ // Denormalized number -- renormalize it
+ //
+
+ while(!(m & 0x00000400))
+ {
+ m <<= 1;
+ e -= 1;
+ }
+
+ e += 1;
+ m &= ~0x00000400;
+ }
+ }
+ else if(e == 31)
+ {
+ if(m == 0)
+ {
+ //
+ // Positive or negative infinity
+ //
+
+ uif32 result;
+ result.i = static_cast<unsigned int>((s << 31) | 0x7f800000);
+ return result.f;
+ }
+ else
+ {
+ //
+ // Nan -- preserve sign and significand bits
+ //
+
+ uif32 result;
+ result.i = static_cast<unsigned int>((s << 31) | 0x7f800000 | (m << 13));
+ return result.f;
+ }
+ }
+
+ //
+ // Normalized number
+ //
+
+ e = e + (127 - 15);
+ m = m << 13;
+
+ //
+ // Assemble s, e and m.
+ //
+
+ uif32 Result;
+ Result.i = static_cast<unsigned int>((s << 31) | (e << 23) | m);
+ return Result.f;
+ }
+
+ GLM_FUNC_QUALIFIER hdata toFloat16(float const& f)
+ {
+ uif32 Entry;
+ Entry.f = f;
+ int i = static_cast<int>(Entry.i);
+
+ //
+ // Our floating point number, f, is represented by the bit
+ // pattern in integer i. Disassemble that bit pattern into
+ // the sign, s, the exponent, e, and the significand, m.
+ // Shift s into the position where it will go in the
+ // resulting half number.
+ // Adjust e, accounting for the different exponent bias
+ // of float and half (127 versus 15).
+ //
+
+ int s = (i >> 16) & 0x00008000;
+ int e = ((i >> 23) & 0x000000ff) - (127 - 15);
+ int m = i & 0x007fffff;
+
+ //
+ // Now reassemble s, e and m into a half:
+ //
+
+ if(e <= 0)
+ {
+ if(e < -10)
+ {
+ //
+ // E is less than -10. The absolute value of f is
+ // less than half_MIN (f may be a small normalized
+ // float, a denormalized float or a zero).
+ //
+ // We convert f to a half zero.
+ //
+
+ return hdata(s);
+ }
+
+ //
+ // E is between -10 and 0. F is a normalized float,
+ // whose magnitude is less than __half_NRM_MIN.
+ //
+ // We convert f to a denormalized half.
+ //
+
+ m = (m | 0x00800000) >> (1 - e);
+
+ //
+ // Round to nearest, round "0.5" up.
+ //
+ // Rounding may cause the significand to overflow and make
+ // our number normalized. Because of the way a half's bits
+ // are laid out, we don't have to treat this case separately;
+ // the code below will handle it correctly.
+ //
+
+ if(m & 0x00001000)
+ m += 0x00002000;
+
+ //
+ // Assemble the half from s, e (zero) and m.
+ //
+
+ return hdata(s | (m >> 13));
+ }
+ else if(e == 0xff - (127 - 15))
+ {
+ if(m == 0)
+ {
+ //
+ // F is an infinity; convert f to a half
+ // infinity with the same sign as f.
+ //
+
+ return hdata(s | 0x7c00);
+ }
+ else
+ {
+ //
+ // F is a NAN; we produce a half NAN that preserves
+ // the sign bit and the 10 leftmost bits of the
+ // significand of f, with one exception: If the 10
+ // leftmost bits are all zero, the NAN would turn
+ // into an infinity, so we have to set at least one
+ // bit in the significand.
+ //
+
+ m >>= 13;
+
+ return hdata(s | 0x7c00 | m | (m == 0));
+ }
+ }
+ else
+ {
+ //
+ // E is greater than zero. F is a normalized float.
+ // We try to convert f to a normalized half.
+ //
+
+ //
+ // Round to nearest, round "0.5" up
+ //
+
+ if(m & 0x00001000)
+ {
+ m += 0x00002000;
+
+ if(m & 0x00800000)
+ {
+ m = 0; // overflow in significand,
+ e += 1; // adjust exponent
+ }
+ }
+
+ //
+ // Handle exponent overflow
+ //
+
+ if (e > 30)
+ {
+ overflow(); // Cause a hardware floating point overflow;
+
+ return hdata(s | 0x7c00);
+ // if this returns, the half becomes an
+ } // infinity with the same sign as f.
+
+ //
+ // Assemble the half from s, e and m.
+ //
+
+ return hdata(s | (e << 10) | (m >> 13));
+ }
+ }
+
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat2x2.hpp b/3rdparty/glm/source/glm/detail/type_mat2x2.hpp
new file mode 100644
index 0000000..827022d
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat2x2.hpp
@@ -0,0 +1,177 @@
+/// @ref core
+/// @file glm/detail/type_mat2x2.hpp
+
+#pragma once
+
+#include "type_vec2.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<2, 2, T, Q>
+ {
+ typedef vec<2, T, Q> col_type;
+ typedef vec<2, T, Q> row_type;
+ typedef mat<2, 2, T, Q> type;
+ typedef mat<2, 2, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[2];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; }
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 2, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T const& x1, T const& y1,
+ T const& x2, T const& y2);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v1,
+ col_type const& v2);
+
+ // -- Conversions --
+
+ template<typename U, typename V, typename M, typename N>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ U const& x1, V const& y1,
+ M const& x2, N const& y2);
+
+ template<typename U, typename V>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<2, U, Q> const& v1,
+ vec<2, V, Q> const& v2);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator=(mat<2, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator+=(mat<2, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator-=(mat<2, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator*=(mat<2, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator/=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator/=(mat<2, 2, U, Q> const& m);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator++ ();
+ GLM_FUNC_DECL mat<2, 2, T, Q> & operator-- ();
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<2, 2, T, Q>::col_type operator*(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<2, 2, T, Q>::row_type operator*(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2);
+} //namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat2x2.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_mat2x2.inl b/3rdparty/glm/source/glm/detail/type_mat2x2.inl
new file mode 100644
index 0000000..33159a6
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat2x2.inl
@@ -0,0 +1,536 @@
+#include "../matrix.hpp"
+
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0), col_type(0, 1)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0);
+ this->value[1] = col_type(0, 1);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{m[0], m[1]}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(T scalar)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(scalar, 0), col_type(0, scalar)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(scalar, 0);
+ this->value[1] = col_type(0, scalar);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat
+ (
+ T const& x0, T const& y0,
+ T const& x1, T const& y1
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0), col_type(x1, y1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0);
+ this->value[1] = col_type(x1, y1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(col_type const& v0, col_type const& v1)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{v0, v1}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = v0;
+ this->value[1] = v1;
+# endif
+ }
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<typename X1, typename Y1, typename X2, typename Y2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat
+ (
+ X1 const& x1, Y1 const& y1,
+ X2 const& x2, Y2 const& y2
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(static_cast<T>(x1), value_type(y1)), col_type(static_cast<T>(x2), value_type(y2)) }
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(static_cast<T>(x1), value_type(y1));
+ this->value[1] = col_type(static_cast<T>(x2), value_type(y2));
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V1, typename V2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v1), col_type(v2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v1);
+ this->value[1] = col_type(v2);
+# endif
+ }
+
+ // -- mat2x2 matrix conversions --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type const& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary updatable operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator=(mat<2, 2, U, Q> const& m)
+ {
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(U scalar)
+ {
+ this->value[0] += scalar;
+ this->value[1] += scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(mat<2, 2, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(U scalar)
+ {
+ this->value[0] -= scalar;
+ this->value[1] -= scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(mat<2, 2, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(U scalar)
+ {
+ this->value[0] *= scalar;
+ this->value[1] *= scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(mat<2, 2, U, Q> const& m)
+ {
+ return (*this = *this * m);
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(U scalar)
+ {
+ this->value[0] /= scalar;
+ this->value[1] /= scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(mat<2, 2, U, Q> const& m)
+ {
+ return *this *= inverse(m);
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> mat<2, 2, T, Q>::operator++(int)
+ {
+ mat<2, 2, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> mat<2, 2, T, Q>::operator--(int)
+ {
+ mat<2, 2, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m)
+ {
+ return mat<2, 2, T, Q>(
+ -m[0],
+ -m[1]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar)
+ {
+ return mat<2, 2, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m)
+ {
+ return mat<2, 2, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2)
+ {
+ return mat<2, 2, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar)
+ {
+ return mat<2, 2, T, Q>(
+ m[0] - scalar,
+ m[1] - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m)
+ {
+ return mat<2, 2, T, Q>(
+ scalar - m[0],
+ scalar - m[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2)
+ {
+ return mat<2, 2, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar)
+ {
+ return mat<2, 2, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m)
+ {
+ return mat<2, 2, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type operator*
+ (
+ mat<2, 2, T, Q> const& m,
+ typename mat<2, 2, T, Q>::row_type const& v
+ )
+ {
+ return vec<2, T, Q>(
+ m[0][0] * v.x + m[1][0] * v.y,
+ m[0][1] * v.x + m[1][1] * v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::row_type operator*
+ (
+ typename mat<2, 2, T, Q>::col_type const& v,
+ mat<2, 2, T, Q> const& m
+ )
+ {
+ return vec<2, T, Q>(
+ v.x * m[0][0] + v.y * m[0][1],
+ v.x * m[1][0] + v.y * m[1][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2)
+ {
+ return mat<2, 2, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2)
+ {
+ return mat<3, 2, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2)
+ {
+ return mat<4, 2, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1],
+ m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1],
+ m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar)
+ {
+ return mat<2, 2, T, Q>(
+ m[0] / scalar,
+ m[1] / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m)
+ {
+ return mat<2, 2, T, Q>(
+ scalar / m[0],
+ scalar / m[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v)
+ {
+ return inverse(m) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m)
+ {
+ return v * inverse(m);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2)
+ {
+ mat<2, 2, T, Q> m1_copy(m1);
+ return m1_copy /= m2;
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat2x3.hpp b/3rdparty/glm/source/glm/detail/type_mat2x3.hpp
new file mode 100644
index 0000000..c9303cb
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat2x3.hpp
@@ -0,0 +1,159 @@
+/// @ref core
+/// @file glm/detail/type_mat2x3.hpp
+
+#pragma once
+
+#include "type_vec2.hpp"
+#include "type_vec3.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<2, 3, T, Q>
+ {
+ typedef vec<3, T, Q> col_type;
+ typedef vec<2, T, Q> row_type;
+ typedef mat<2, 3, T, Q> type;
+ typedef mat<3, 2, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[2];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; }
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 3, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T x0, T y0, T z0,
+ T x1, T y1, T z1);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v0,
+ col_type const& v1);
+
+ // -- Conversions --
+
+ template<typename X1, typename Y1, typename Z1, typename X2, typename Y2, typename Z2>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ X1 x1, Y1 y1, Z1 z1,
+ X2 x2, Y2 y2, Z2 z2);
+
+ template<typename U, typename V>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<3, U, Q> const& v1,
+ vec<3, V, Q> const& v2);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator=(mat<2, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator+=(mat<2, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator-=(mat<2, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator/=(U s);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator++ ();
+ GLM_FUNC_DECL mat<2, 3, T, Q> & operator-- ();
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<2, 3, T, Q>::col_type operator*(mat<2, 3, T, Q> const& m, typename mat<2, 3, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<2, 3, T, Q>::row_type operator*(typename mat<2, 3, T, Q>::col_type const& v, mat<2, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat2x3.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_mat2x3.inl b/3rdparty/glm/source/glm/detail/type_mat2x3.inl
new file mode 100644
index 0000000..c158a5d
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat2x3.inl
@@ -0,0 +1,510 @@
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0, 0), col_type(0, 1, 0)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0, 0);
+ this->value[1] = col_type(0, 1, 0);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{m.value[0], m.value[1]}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m.value[0];
+ this->value[1] = m.value[1];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(T scalar)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(scalar, 0, 0), col_type(0, scalar, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(scalar, 0, 0);
+ this->value[1] = col_type(0, scalar, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat
+ (
+ T x0, T y0, T z0,
+ T x1, T y1, T z1
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0, z0), col_type(x1, y1, z1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0, z0);
+ this->value[1] = col_type(x1, y1, z1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(col_type const& v0, col_type const& v1)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v0);
+ this->value[1] = col_type(v1);
+# endif
+ }
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<
+ typename X1, typename Y1, typename Z1,
+ typename X2, typename Y2, typename Z2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat
+ (
+ X1 x1, Y1 y1, Z1 z1,
+ X2 x2, Y2 y2, Z2 z2
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x1, y1, z1), col_type(x2, y2, z2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x1, y1, z1);
+ this->value[1] = col_type(x2, y2, z2);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V1, typename V2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v1), col_type(v2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v1);
+ this->value[1] = col_type(v2);
+# endif
+ }
+
+ // -- Matrix conversions --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::col_type & mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type const& mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary updatable operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator=(mat<2, 3, U, Q> const& m)
+ {
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator+=(U s)
+ {
+ this->value[0] += s;
+ this->value[1] += s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator+=(mat<2, 3, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(U s)
+ {
+ this->value[0] -= s;
+ this->value[1] -= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(mat<2, 3, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator*=(U s)
+ {
+ this->value[0] *= s;
+ this->value[1] *= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator/=(U s)
+ {
+ this->value[0] /= s;
+ this->value[1] /= s;
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> mat<2, 3, T, Q>::operator++(int)
+ {
+ mat<2, 3, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> mat<2, 3, T, Q>::operator--(int)
+ {
+ mat<2, 3, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m)
+ {
+ return mat<2, 3, T, Q>(
+ -m[0],
+ -m[1]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar)
+ {
+ return mat<2, 3, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2)
+ {
+ return mat<2, 3, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar)
+ {
+ return mat<2, 3, T, Q>(
+ m[0] - scalar,
+ m[1] - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2)
+ {
+ return mat<2, 3, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar)
+ {
+ return mat<2, 3, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m)
+ {
+ return mat<2, 3, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::col_type operator*
+ (
+ mat<2, 3, T, Q> const& m,
+ typename mat<2, 3, T, Q>::row_type const& v)
+ {
+ return typename mat<2, 3, T, Q>::col_type(
+ m[0][0] * v.x + m[1][0] * v.y,
+ m[0][1] * v.x + m[1][1] * v.y,
+ m[0][2] * v.x + m[1][2] * v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::row_type operator*
+ (
+ typename mat<2, 3, T, Q>::col_type const& v,
+ mat<2, 3, T, Q> const& m)
+ {
+ return typename mat<2, 3, T, Q>::row_type(
+ v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2],
+ v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2)
+ {
+ return mat<2, 3, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2)
+ {
+ T SrcA00 = m1[0][0];
+ T SrcA01 = m1[0][1];
+ T SrcA02 = m1[0][2];
+ T SrcA10 = m1[1][0];
+ T SrcA11 = m1[1][1];
+ T SrcA12 = m1[1][2];
+
+ T SrcB00 = m2[0][0];
+ T SrcB01 = m2[0][1];
+ T SrcB10 = m2[1][0];
+ T SrcB11 = m2[1][1];
+ T SrcB20 = m2[2][0];
+ T SrcB21 = m2[2][1];
+
+ mat<3, 3, T, Q> Result;
+ Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01;
+ Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01;
+ Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01;
+ Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11;
+ Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11;
+ Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11;
+ Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21;
+ Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21;
+ Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2)
+ {
+ return mat<4, 3, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1],
+ m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1],
+ m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1],
+ m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1],
+ m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar)
+ {
+ return mat<2, 3, T, Q>(
+ m[0] / scalar,
+ m[1] / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m)
+ {
+ return mat<2, 3, T, Q>(
+ scalar / m[0],
+ scalar / m[1]);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat2x4.hpp b/3rdparty/glm/source/glm/detail/type_mat2x4.hpp
new file mode 100644
index 0000000..f769796
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat2x4.hpp
@@ -0,0 +1,161 @@
+/// @ref core
+/// @file glm/detail/type_mat2x4.hpp
+
+#pragma once
+
+#include "type_vec2.hpp"
+#include "type_vec4.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<2, 4, T, Q>
+ {
+ typedef vec<4, T, Q> col_type;
+ typedef vec<2, T, Q> row_type;
+ typedef mat<2, 4, T, Q> type;
+ typedef mat<4, 2, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[2];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; }
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 4, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T x0, T y0, T z0, T w0,
+ T x1, T y1, T z1, T w1);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v0,
+ col_type const& v1);
+
+ // -- Conversions --
+
+ template<
+ typename X1, typename Y1, typename Z1, typename W1,
+ typename X2, typename Y2, typename Z2, typename W2>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ X1 x1, Y1 y1, Z1 z1, W1 w1,
+ X2 x2, Y2 y2, Z2 z2, W2 w2);
+
+ template<typename U, typename V>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<4, U, Q> const& v1,
+ vec<4, V, Q> const& v2);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator=(mat<2, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator+=(mat<2, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator-=(mat<2, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator/=(U s);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator++ ();
+ GLM_FUNC_DECL mat<2, 4, T, Q> & operator-- ();
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat2x4.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_mat2x4.inl b/3rdparty/glm/source/glm/detail/type_mat2x4.inl
new file mode 100644
index 0000000..2a96d3d
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat2x4.inl
@@ -0,0 +1,520 @@
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0, 0, 0);
+ this->value[1] = col_type(0, 1, 0, 0);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{m[0], m[1]}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(T s)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(s, 0, 0, 0);
+ this->value[1] = col_type(0, s, 0, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat
+ (
+ T x0, T y0, T z0, T w0,
+ T x1, T y1, T z1, T w1
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0, z0, w0), col_type(x1, y1, z1, w1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0, z0, w0);
+ this->value[1] = col_type(x1, y1, z1, w1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(col_type const& v0, col_type const& v1)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = v0;
+ this->value[1] = v1;
+# endif
+ }
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<
+ typename X1, typename Y1, typename Z1, typename W1,
+ typename X2, typename Y2, typename Z2, typename W2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat
+ (
+ X1 x1, Y1 y1, Z1 z1, W1 w1,
+ X2 x2, Y2 y2, Z2 z2, W2 w2
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{
+ col_type(x1, y1, z1, w1),
+ col_type(x2, y2, z2, w2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x1, y1, z1, w1);
+ this->value[1] = col_type(x2, y2, z2, w2);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V1, typename V2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v1), col_type(v2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v1);
+ this->value[1] = col_type(v2);
+# endif
+ }
+
+ // -- Matrix conversions --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::col_type & mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type const& mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary updatable operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator=(mat<2, 4, U, Q> const& m)
+ {
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(U s)
+ {
+ this->value[0] += s;
+ this->value[1] += s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(mat<2, 4, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(U s)
+ {
+ this->value[0] -= s;
+ this->value[1] -= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(mat<2, 4, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator*=(U s)
+ {
+ this->value[0] *= s;
+ this->value[1] *= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> & mat<2, 4, T, Q>::operator/=(U s)
+ {
+ this->value[0] /= s;
+ this->value[1] /= s;
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat<2, 4, T, Q>::operator++(int)
+ {
+ mat<2, 4, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat<2, 4, T, Q>::operator--(int)
+ {
+ mat<2, 4, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m)
+ {
+ return mat<2, 4, T, Q>(
+ -m[0],
+ -m[1]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar)
+ {
+ return mat<2, 4, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2)
+ {
+ return mat<2, 4, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar)
+ {
+ return mat<2, 4, T, Q>(
+ m[0] - scalar,
+ m[1] - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2)
+ {
+ return mat<2, 4, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar)
+ {
+ return mat<2, 4, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m)
+ {
+ return mat<2, 4, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v)
+ {
+ return typename mat<2, 4, T, Q>::col_type(
+ m[0][0] * v.x + m[1][0] * v.y,
+ m[0][1] * v.x + m[1][1] * v.y,
+ m[0][2] * v.x + m[1][2] * v.y,
+ m[0][3] * v.x + m[1][3] * v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m)
+ {
+ return typename mat<2, 4, T, Q>::row_type(
+ v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3],
+ v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2)
+ {
+ T SrcA00 = m1[0][0];
+ T SrcA01 = m1[0][1];
+ T SrcA02 = m1[0][2];
+ T SrcA03 = m1[0][3];
+ T SrcA10 = m1[1][0];
+ T SrcA11 = m1[1][1];
+ T SrcA12 = m1[1][2];
+ T SrcA13 = m1[1][3];
+
+ T SrcB00 = m2[0][0];
+ T SrcB01 = m2[0][1];
+ T SrcB10 = m2[1][0];
+ T SrcB11 = m2[1][1];
+ T SrcB20 = m2[2][0];
+ T SrcB21 = m2[2][1];
+ T SrcB30 = m2[3][0];
+ T SrcB31 = m2[3][1];
+
+ mat<4, 4, T, Q> Result;
+ Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01;
+ Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01;
+ Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01;
+ Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01;
+ Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11;
+ Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11;
+ Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11;
+ Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11;
+ Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21;
+ Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21;
+ Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21;
+ Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21;
+ Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31;
+ Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31;
+ Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31;
+ Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2)
+ {
+ return mat<2, 4, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1],
+ m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1],
+ m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2)
+ {
+ return mat<3, 4, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1],
+ m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1],
+ m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1],
+ m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1],
+ m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar)
+ {
+ return mat<2, 4, T, Q>(
+ m[0] / scalar,
+ m[1] / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m)
+ {
+ return mat<2, 4, T, Q>(
+ scalar / m[0],
+ scalar / m[1]);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat3x2.hpp b/3rdparty/glm/source/glm/detail/type_mat3x2.hpp
new file mode 100644
index 0000000..d528af0
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat3x2.hpp
@@ -0,0 +1,167 @@
+/// @ref core
+/// @file glm/detail/type_mat3x2.hpp
+
+#pragma once
+
+#include "type_vec2.hpp"
+#include "type_vec3.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<3, 2, T, Q>
+ {
+ typedef vec<2, T, Q> col_type;
+ typedef vec<3, T, Q> row_type;
+ typedef mat<3, 2, T, Q> type;
+ typedef mat<2, 3, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[3];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; }
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 2, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T x0, T y0,
+ T x1, T y1,
+ T x2, T y2);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v0,
+ col_type const& v1,
+ col_type const& v2);
+
+ // -- Conversions --
+
+ template<
+ typename X1, typename Y1,
+ typename X2, typename Y2,
+ typename X3, typename Y3>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ X1 x1, Y1 y1,
+ X2 x2, Y2 y2,
+ X3 x3, Y3 y3);
+
+ template<typename V1, typename V2, typename V3>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<2, V1, Q> const& v1,
+ vec<2, V2, Q> const& v2,
+ vec<2, V3, Q> const& v3);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator=(mat<3, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator+=(mat<3, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator-=(mat<3, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator/=(U s);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator++ ();
+ GLM_FUNC_DECL mat<3, 2, T, Q> & operator-- ();
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2);
+
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat3x2.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_mat3x2.inl b/3rdparty/glm/source/glm/detail/type_mat3x2.inl
new file mode 100644
index 0000000..8cf8ed3
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat3x2.inl
@@ -0,0 +1,532 @@
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0), col_type(0, 1), col_type(0, 0)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0);
+ this->value[1] = col_type(0, 1);
+ this->value[2] = col_type(0, 0);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(T s)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(s, 0), col_type(0, s), col_type(0, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(s, 0);
+ this->value[1] = col_type(0, s);
+ this->value[2] = col_type(0, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat
+ (
+ T x0, T y0,
+ T x1, T y1,
+ T x2, T y2
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0);
+ this->value[1] = col_type(x1, y1);
+ this->value[2] = col_type(x2, y2);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = v0;
+ this->value[1] = v1;
+ this->value[2] = v2;
+# endif
+ }
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<
+ typename X0, typename Y0,
+ typename X1, typename Y1,
+ typename X2, typename Y2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat
+ (
+ X0 x0, Y0 y0,
+ X1 x1, Y1 y1,
+ X2 x2, Y2 y2
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0);
+ this->value[1] = col_type(x1, y1);
+ this->value[2] = col_type(x2, y2);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V0, typename V1, typename V2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v0);
+ this->value[1] = col_type(v1);
+ this->value[2] = col_type(v2);
+# endif
+ }
+
+ // -- Matrix conversions --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::col_type & mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type const& mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary updatable operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator=(mat<3, 2, U, Q> const& m)
+ {
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(U s)
+ {
+ this->value[0] += s;
+ this->value[1] += s;
+ this->value[2] += s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(mat<3, 2, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ this->value[2] += m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(U s)
+ {
+ this->value[0] -= s;
+ this->value[1] -= s;
+ this->value[2] -= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(mat<3, 2, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ this->value[2] -= m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator*=(U s)
+ {
+ this->value[0] *= s;
+ this->value[1] *= s;
+ this->value[2] *= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> & mat<3, 2, T, Q>::operator/=(U s)
+ {
+ this->value[0] /= s;
+ this->value[1] /= s;
+ this->value[2] /= s;
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ ++this->value[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ --this->value[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> mat<3, 2, T, Q>::operator++(int)
+ {
+ mat<3, 2, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> mat<3, 2, T, Q>::operator--(int)
+ {
+ mat<3, 2, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m)
+ {
+ return mat<3, 2, T, Q>(
+ -m[0],
+ -m[1],
+ -m[2]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar)
+ {
+ return mat<3, 2, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar,
+ m[2] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2)
+ {
+ return mat<3, 2, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1],
+ m1[2] + m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar)
+ {
+ return mat<3, 2, T, Q>(
+ m[0] - scalar,
+ m[1] - scalar,
+ m[2] - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2)
+ {
+ return mat<3, 2, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1],
+ m1[2] - m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar)
+ {
+ return mat<3, 2, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar,
+ m[2] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m)
+ {
+ return mat<3, 2, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar,
+ m[2] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v)
+ {
+ return typename mat<3, 2, T, Q>::col_type(
+ m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z,
+ m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m)
+ {
+ return typename mat<3, 2, T, Q>::row_type(
+ v.x * m[0][0] + v.y * m[0][1],
+ v.x * m[1][0] + v.y * m[1][1],
+ v.x * m[2][0] + v.y * m[2][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2)
+ {
+ const T SrcA00 = m1[0][0];
+ const T SrcA01 = m1[0][1];
+ const T SrcA10 = m1[1][0];
+ const T SrcA11 = m1[1][1];
+ const T SrcA20 = m1[2][0];
+ const T SrcA21 = m1[2][1];
+
+ const T SrcB00 = m2[0][0];
+ const T SrcB01 = m2[0][1];
+ const T SrcB02 = m2[0][2];
+ const T SrcB10 = m2[1][0];
+ const T SrcB11 = m2[1][1];
+ const T SrcB12 = m2[1][2];
+
+ mat<2, 2, T, Q> Result;
+ Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02;
+ Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02;
+ Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12;
+ Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2)
+ {
+ return mat<3, 2, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2)
+ {
+ return mat<4, 2, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2],
+ m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2],
+ m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar)
+ {
+ return mat<3, 2, T, Q>(
+ m[0] / scalar,
+ m[1] / scalar,
+ m[2] / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m)
+ {
+ return mat<3, 2, T, Q>(
+ scalar / m[0],
+ scalar / m[1],
+ scalar / m[2]);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat3x3.hpp b/3rdparty/glm/source/glm/detail/type_mat3x3.hpp
new file mode 100644
index 0000000..9b435ae
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat3x3.hpp
@@ -0,0 +1,184 @@
+/// @ref core
+/// @file glm/detail/type_mat3x3.hpp
+
+#pragma once
+
+#include "type_vec3.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<3, 3, T, Q>
+ {
+ typedef vec<3, T, Q> col_type;
+ typedef vec<3, T, Q> row_type;
+ typedef mat<3, 3, T, Q> type;
+ typedef mat<3, 3, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[3];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; }
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 3, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T x0, T y0, T z0,
+ T x1, T y1, T z1,
+ T x2, T y2, T z2);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v0,
+ col_type const& v1,
+ col_type const& v2);
+
+ // -- Conversions --
+
+ template<
+ typename X1, typename Y1, typename Z1,
+ typename X2, typename Y2, typename Z2,
+ typename X3, typename Y3, typename Z3>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ X1 x1, Y1 y1, Z1 z1,
+ X2 x2, Y2 y2, Z2 z2,
+ X3 x3, Y3 y3, Z3 z3);
+
+ template<typename V1, typename V2, typename V3>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<3, V1, Q> const& v1,
+ vec<3, V2, Q> const& v2,
+ vec<3, V3, Q> const& v3);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator=(mat<3, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator+=(mat<3, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator-=(mat<3, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator*=(mat<3, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator/=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator/=(mat<3, 3, U, Q> const& m);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator++();
+ GLM_FUNC_DECL mat<3, 3, T, Q> & operator--();
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat3x3.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_mat3x3.inl b/3rdparty/glm/source/glm/detail/type_mat3x3.inl
new file mode 100644
index 0000000..4362d84
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat3x3.inl
@@ -0,0 +1,601 @@
+#include "../matrix.hpp"
+
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0, 0);
+ this->value[1] = col_type(0, 1, 0);
+ this->value[2] = col_type(0, 0, 1);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(T s)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(s, 0, 0);
+ this->value[1] = col_type(0, s, 0);
+ this->value[2] = col_type(0, 0, s);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat
+ (
+ T x0, T y0, T z0,
+ T x1, T y1, T z1,
+ T x2, T y2, T z2
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0, z0);
+ this->value[1] = col_type(x1, y1, z1);
+ this->value[2] = col_type(x2, y2, z2);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v0);
+ this->value[1] = col_type(v1);
+ this->value[2] = col_type(v2);
+# endif
+ }
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<
+ typename X1, typename Y1, typename Z1,
+ typename X2, typename Y2, typename Z2,
+ typename X3, typename Y3, typename Z3>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat
+ (
+ X1 x1, Y1 y1, Z1 z1,
+ X2 x2, Y2 y2, Z2 z2,
+ X3 x3, Y3 y3, Z3 z3
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x1, y1, z1);
+ this->value[1] = col_type(x2, y2, z2);
+ this->value[2] = col_type(x3, y3, z3);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V1, typename V2, typename V3>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v1), col_type(v2), col_type(v3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v1);
+ this->value[1] = col_type(v2);
+ this->value[2] = col_type(v3);
+# endif
+ }
+
+ // -- Matrix conversions --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(m[2], 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(m[2], 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type & mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type const& mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary updatable operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator=(mat<3, 3, U, Q> const& m)
+ {
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(U s)
+ {
+ this->value[0] += s;
+ this->value[1] += s;
+ this->value[2] += s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(mat<3, 3, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ this->value[2] += m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(U s)
+ {
+ this->value[0] -= s;
+ this->value[1] -= s;
+ this->value[2] -= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(mat<3, 3, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ this->value[2] -= m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(U s)
+ {
+ this->value[0] *= s;
+ this->value[1] *= s;
+ this->value[2] *= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(mat<3, 3, U, Q> const& m)
+ {
+ return (*this = *this * m);
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(U s)
+ {
+ this->value[0] /= s;
+ this->value[1] /= s;
+ this->value[2] /= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(mat<3, 3, U, Q> const& m)
+ {
+ return *this *= inverse(m);
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ ++this->value[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ --this->value[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat<3, 3, T, Q>::operator++(int)
+ {
+ mat<3, 3, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat<3, 3, T, Q>::operator--(int)
+ {
+ mat<3, 3, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m)
+ {
+ return mat<3, 3, T, Q>(
+ -m[0],
+ -m[1],
+ -m[2]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar)
+ {
+ return mat<3, 3, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar,
+ m[2] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m)
+ {
+ return mat<3, 3, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar,
+ m[2] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2)
+ {
+ return mat<3, 3, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1],
+ m1[2] + m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar)
+ {
+ return mat<3, 3, T, Q>(
+ m[0] - scalar,
+ m[1] - scalar,
+ m[2] - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m)
+ {
+ return mat<3, 3, T, Q>(
+ scalar - m[0],
+ scalar - m[1],
+ scalar - m[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2)
+ {
+ return mat<3, 3, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1],
+ m1[2] - m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar)
+ {
+ return mat<3, 3, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar,
+ m[2] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m)
+ {
+ return mat<3, 3, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar,
+ m[2] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v)
+ {
+ return typename mat<3, 3, T, Q>::col_type(
+ m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z,
+ m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z,
+ m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m)
+ {
+ return typename mat<3, 3, T, Q>::row_type(
+ m[0][0] * v.x + m[0][1] * v.y + m[0][2] * v.z,
+ m[1][0] * v.x + m[1][1] * v.y + m[1][2] * v.z,
+ m[2][0] * v.x + m[2][1] * v.y + m[2][2] * v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2)
+ {
+ T const SrcA00 = m1[0][0];
+ T const SrcA01 = m1[0][1];
+ T const SrcA02 = m1[0][2];
+ T const SrcA10 = m1[1][0];
+ T const SrcA11 = m1[1][1];
+ T const SrcA12 = m1[1][2];
+ T const SrcA20 = m1[2][0];
+ T const SrcA21 = m1[2][1];
+ T const SrcA22 = m1[2][2];
+
+ T const SrcB00 = m2[0][0];
+ T const SrcB01 = m2[0][1];
+ T const SrcB02 = m2[0][2];
+ T const SrcB10 = m2[1][0];
+ T const SrcB11 = m2[1][1];
+ T const SrcB12 = m2[1][2];
+ T const SrcB20 = m2[2][0];
+ T const SrcB21 = m2[2][1];
+ T const SrcB22 = m2[2][2];
+
+ mat<3, 3, T, Q> Result;
+ Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02;
+ Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02;
+ Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02;
+ Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12;
+ Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12;
+ Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12;
+ Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22;
+ Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22;
+ Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2)
+ {
+ return mat<2, 3, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2)
+ {
+ return mat<4, 3, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2],
+ m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2],
+ m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2],
+ m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2],
+ m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar)
+ {
+ return mat<3, 3, T, Q>(
+ m[0] / scalar,
+ m[1] / scalar,
+ m[2] / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m)
+ {
+ return mat<3, 3, T, Q>(
+ scalar / m[0],
+ scalar / m[1],
+ scalar / m[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v)
+ {
+ return inverse(m) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m)
+ {
+ return v * inverse(m);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2)
+ {
+ mat<3, 3, T, Q> m1_copy(m1);
+ return m1_copy /= m2;
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat3x4.hpp b/3rdparty/glm/source/glm/detail/type_mat3x4.hpp
new file mode 100644
index 0000000..27bc425
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat3x4.hpp
@@ -0,0 +1,166 @@
+/// @ref core
+/// @file glm/detail/type_mat3x4.hpp
+
+#pragma once
+
+#include "type_vec3.hpp"
+#include "type_vec4.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<3, 4, T, Q>
+ {
+ typedef vec<4, T, Q> col_type;
+ typedef vec<3, T, Q> row_type;
+ typedef mat<3, 4, T, Q> type;
+ typedef mat<4, 3, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[3];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; }
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 4, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T x0, T y0, T z0, T w0,
+ T x1, T y1, T z1, T w1,
+ T x2, T y2, T z2, T w2);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v0,
+ col_type const& v1,
+ col_type const& v2);
+
+ // -- Conversions --
+
+ template<
+ typename X1, typename Y1, typename Z1, typename W1,
+ typename X2, typename Y2, typename Z2, typename W2,
+ typename X3, typename Y3, typename Z3, typename W3>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ X1 x1, Y1 y1, Z1 z1, W1 w1,
+ X2 x2, Y2 y2, Z2 z2, W2 w2,
+ X3 x3, Y3 y3, Z3 z3, W3 w3);
+
+ template<typename V1, typename V2, typename V3>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<4, V1, Q> const& v1,
+ vec<4, V2, Q> const& v2,
+ vec<4, V3, Q> const& v3);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator=(mat<3, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator+=(mat<3, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator-=(mat<3, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator/=(U s);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator++();
+ GLM_FUNC_DECL mat<3, 4, T, Q> & operator--();
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<3, 4, T, Q>::col_type operator*(mat<3, 4, T, Q> const& m, typename mat<3, 4, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<3, 4, T, Q>::row_type operator*(typename mat<3, 4, T, Q>::col_type const& v, mat<3, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat3x4.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_mat3x4.inl b/3rdparty/glm/source/glm/detail/type_mat3x4.inl
new file mode 100644
index 0000000..c1a0fa6
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat3x4.inl
@@ -0,0 +1,578 @@
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0, 0, 0);
+ this->value[1] = col_type(0, 1, 0, 0);
+ this->value[2] = col_type(0, 0, 1, 0);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(T s)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(s, 0, 0, 0);
+ this->value[1] = col_type(0, s, 0, 0);
+ this->value[2] = col_type(0, 0, s, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat
+ (
+ T x0, T y0, T z0, T w0,
+ T x1, T y1, T z1, T w1,
+ T x2, T y2, T z2, T w2
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{
+ col_type(x0, y0, z0, w0),
+ col_type(x1, y1, z1, w1),
+ col_type(x2, y2, z2, w2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0, z0, w0);
+ this->value[1] = col_type(x1, y1, z1, w1);
+ this->value[2] = col_type(x2, y2, z2, w2);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = v0;
+ this->value[1] = v1;
+ this->value[2] = v2;
+# endif
+ }
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<
+ typename X0, typename Y0, typename Z0, typename W0,
+ typename X1, typename Y1, typename Z1, typename W1,
+ typename X2, typename Y2, typename Z2, typename W2>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat
+ (
+ X0 x0, Y0 y0, Z0 z0, W0 w0,
+ X1 x1, Y1 y1, Z1 z1, W1 w1,
+ X2 x2, Y2 y2, Z2 z2, W2 w2
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{
+ col_type(x0, y0, z0, w0),
+ col_type(x1, y1, z1, w1),
+ col_type(x2, y2, z2, w2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0, z0, w0);
+ this->value[1] = col_type(x1, y1, z1, w1);
+ this->value[2] = col_type(x2, y2, z2, w2);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V1, typename V2, typename V3>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(vec<4, V1, Q> const& v0, vec<4, V2, Q> const& v1, vec<4, V3, Q> const& v2)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v0);
+ this->value[1] = col_type(v1);
+ this->value[2] = col_type(v2);
+# endif
+ }
+
+ // -- Matrix conversions --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+ this->value[2] = col_type(0, 0, 1, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(m[2], 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(0, 0, 1, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+ this->value[2] = col_type(m[2], 1, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0, 0, 1, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+ this->value[2] = col_type(m[2], 1, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(m[2], 0);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::col_type & mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type const& mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary updatable operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator=(mat<3, 4, U, Q> const& m)
+ {
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(U s)
+ {
+ this->value[0] += s;
+ this->value[1] += s;
+ this->value[2] += s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(mat<3, 4, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ this->value[2] += m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(U s)
+ {
+ this->value[0] -= s;
+ this->value[1] -= s;
+ this->value[2] -= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(mat<3, 4, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ this->value[2] -= m[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator*=(U s)
+ {
+ this->value[0] *= s;
+ this->value[1] *= s;
+ this->value[2] *= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> & mat<3, 4, T, Q>::operator/=(U s)
+ {
+ this->value[0] /= s;
+ this->value[1] /= s;
+ this->value[2] /= s;
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ ++this->value[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ --this->value[2];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat<3, 4, T, Q>::operator++(int)
+ {
+ mat<3, 4, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat<3, 4, T, Q>::operator--(int)
+ {
+ mat<3, 4, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m)
+ {
+ return mat<3, 4, T, Q>(
+ -m[0],
+ -m[1],
+ -m[2]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar)
+ {
+ return mat<3, 4, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar,
+ m[2] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2)
+ {
+ return mat<3, 4, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1],
+ m1[2] + m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar)
+ {
+ return mat<3, 4, T, Q>(
+ m[0] - scalar,
+ m[1] - scalar,
+ m[2] - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2)
+ {
+ return mat<3, 4, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1],
+ m1[2] - m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar)
+ {
+ return mat<3, 4, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar,
+ m[2] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m)
+ {
+ return mat<3, 4, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar,
+ m[2] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::col_type operator*
+ (
+ mat<3, 4, T, Q> const& m,
+ typename mat<3, 4, T, Q>::row_type const& v
+ )
+ {
+ return typename mat<3, 4, T, Q>::col_type(
+ m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z,
+ m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z,
+ m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z,
+ m[0][3] * v.x + m[1][3] * v.y + m[2][3] * v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::row_type operator*
+ (
+ typename mat<3, 4, T, Q>::col_type const& v,
+ mat<3, 4, T, Q> const& m
+ )
+ {
+ return typename mat<3, 4, T, Q>::row_type(
+ v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3],
+ v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3],
+ v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2] + v.w * m[2][3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2)
+ {
+ const T SrcA00 = m1[0][0];
+ const T SrcA01 = m1[0][1];
+ const T SrcA02 = m1[0][2];
+ const T SrcA03 = m1[0][3];
+ const T SrcA10 = m1[1][0];
+ const T SrcA11 = m1[1][1];
+ const T SrcA12 = m1[1][2];
+ const T SrcA13 = m1[1][3];
+ const T SrcA20 = m1[2][0];
+ const T SrcA21 = m1[2][1];
+ const T SrcA22 = m1[2][2];
+ const T SrcA23 = m1[2][3];
+
+ const T SrcB00 = m2[0][0];
+ const T SrcB01 = m2[0][1];
+ const T SrcB02 = m2[0][2];
+ const T SrcB10 = m2[1][0];
+ const T SrcB11 = m2[1][1];
+ const T SrcB12 = m2[1][2];
+ const T SrcB20 = m2[2][0];
+ const T SrcB21 = m2[2][1];
+ const T SrcB22 = m2[2][2];
+ const T SrcB30 = m2[3][0];
+ const T SrcB31 = m2[3][1];
+ const T SrcB32 = m2[3][2];
+
+ mat<4, 4, T, Q> Result;
+ Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02;
+ Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02;
+ Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02;
+ Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01 + SrcA23 * SrcB02;
+ Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12;
+ Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12;
+ Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12;
+ Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11 + SrcA23 * SrcB12;
+ Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22;
+ Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22;
+ Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22;
+ Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21 + SrcA23 * SrcB22;
+ Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31 + SrcA20 * SrcB32;
+ Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31 + SrcA21 * SrcB32;
+ Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31 + SrcA22 * SrcB32;
+ Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31 + SrcA23 * SrcB32;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2)
+ {
+ return mat<2, 4, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2],
+ m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2],
+ m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2)
+ {
+ return mat<3, 4, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2],
+ m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2],
+ m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2],
+ m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2],
+ m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar)
+ {
+ return mat<3, 4, T, Q>(
+ m[0] / scalar,
+ m[1] / scalar,
+ m[2] / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m)
+ {
+ return mat<3, 4, T, Q>(
+ scalar / m[0],
+ scalar / m[1],
+ scalar / m[2]);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat4x2.hpp b/3rdparty/glm/source/glm/detail/type_mat4x2.hpp
new file mode 100644
index 0000000..56f500d
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat4x2.hpp
@@ -0,0 +1,171 @@
+/// @ref core
+/// @file glm/detail/type_mat4x2.hpp
+
+#pragma once
+
+#include "type_vec2.hpp"
+#include "type_vec4.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<4, 2, T, Q>
+ {
+ typedef vec<2, T, Q> col_type;
+ typedef vec<4, T, Q> row_type;
+ typedef mat<4, 2, T, Q> type;
+ typedef mat<2, 4, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[4];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; }
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 2, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T x0, T y0,
+ T x1, T y1,
+ T x2, T y2,
+ T x3, T y3);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v0,
+ col_type const& v1,
+ col_type const& v2,
+ col_type const& v3);
+
+ // -- Conversions --
+
+ template<
+ typename X0, typename Y0,
+ typename X1, typename Y1,
+ typename X2, typename Y2,
+ typename X3, typename Y3>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ X0 x0, Y0 y0,
+ X1 x1, Y1 y1,
+ X2 x2, Y2 y2,
+ X3 x3, Y3 y3);
+
+ template<typename V1, typename V2, typename V3, typename V4>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<2, V1, Q> const& v1,
+ vec<2, V2, Q> const& v2,
+ vec<2, V3, Q> const& v3,
+ vec<2, V4, Q> const& v4);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator=(mat<4, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator+=(mat<4, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator-=(mat<4, 2, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator/=(U s);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator++ ();
+ GLM_FUNC_DECL mat<4, 2, T, Q> & operator-- ();
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat4x2.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_mat4x2.inl b/3rdparty/glm/source/glm/detail/type_mat4x2.inl
new file mode 100644
index 0000000..efe5833
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat4x2.inl
@@ -0,0 +1,574 @@
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0), col_type(0, 1), col_type(0, 0), col_type(0, 0)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0);
+ this->value[1] = col_type(0, 1);
+ this->value[2] = col_type(0, 0);
+ this->value[3] = col_type(0, 0);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ this->value[3] = m[3];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(T s)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(s, 0), col_type(0, s), col_type(0, 0), col_type(0, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(s, 0);
+ this->value[1] = col_type(0, s);
+ this->value[2] = col_type(0, 0);
+ this->value[3] = col_type(0, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat
+ (
+ T x0, T y0,
+ T x1, T y1,
+ T x2, T y2,
+ T x3, T y3
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0);
+ this->value[1] = col_type(x1, y1);
+ this->value[2] = col_type(x2, y2);
+ this->value[3] = col_type(x3, y3);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = v0;
+ this->value[1] = v1;
+ this->value[2] = v2;
+ this->value[3] = v3;
+# endif
+ }
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<
+ typename X0, typename Y0,
+ typename X1, typename Y1,
+ typename X2, typename Y2,
+ typename X3, typename Y3>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat
+ (
+ X0 x0, Y0 y0,
+ X1 x1, Y1 y1,
+ X2 x2, Y2 y2,
+ X3 x3, Y3 y3
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0);
+ this->value[1] = col_type(x1, y1);
+ this->value[2] = col_type(x2, y2);
+ this->value[3] = col_type(x3, y3);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V0, typename V1, typename V2, typename V3>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2, vec<2, V3, Q> const& v3)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v0);
+ this->value[1] = col_type(v1);
+ this->value[2] = col_type(v2);
+ this->value[3] = col_type(v3);
+# endif
+ }
+
+ // -- Conversion --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(m[3]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(m[3]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(m[3]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::col_type & mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type const& mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary updatable operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q>& mat<4, 2, T, Q>::operator=(mat<4, 2, U, Q> const& m)
+ {
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ this->value[3] = m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(U s)
+ {
+ this->value[0] += s;
+ this->value[1] += s;
+ this->value[2] += s;
+ this->value[3] += s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(mat<4, 2, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ this->value[2] += m[2];
+ this->value[3] += m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(U s)
+ {
+ this->value[0] -= s;
+ this->value[1] -= s;
+ this->value[2] -= s;
+ this->value[3] -= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(mat<4, 2, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ this->value[2] -= m[2];
+ this->value[3] -= m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator*=(U s)
+ {
+ this->value[0] *= s;
+ this->value[1] *= s;
+ this->value[2] *= s;
+ this->value[3] *= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator/=(U s)
+ {
+ this->value[0] /= s;
+ this->value[1] /= s;
+ this->value[2] /= s;
+ this->value[3] /= s;
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ ++this->value[2];
+ ++this->value[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ --this->value[2];
+ --this->value[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> mat<4, 2, T, Q>::operator++(int)
+ {
+ mat<4, 2, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> mat<4, 2, T, Q>::operator--(int)
+ {
+ mat<4, 2, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m)
+ {
+ return mat<4, 2, T, Q>(
+ -m[0],
+ -m[1],
+ -m[2],
+ -m[3]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar)
+ {
+ return mat<4, 2, T, Q>(
+ m[0] + scalar,
+ m[1] + scalar,
+ m[2] + scalar,
+ m[3] + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2)
+ {
+ return mat<4, 2, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1],
+ m1[2] + m2[2],
+ m1[3] + m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar)
+ {
+ return mat<4, 2, T, Q>(
+ m[0] - scalar,
+ m[1] - scalar,
+ m[2] - scalar,
+ m[3] - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2)
+ {
+ return mat<4, 2, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1],
+ m1[2] - m2[2],
+ m1[3] - m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar)
+ {
+ return mat<4, 2, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar,
+ m[2] * scalar,
+ m[3] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m)
+ {
+ return mat<4, 2, T, Q>(
+ m[0] * scalar,
+ m[1] * scalar,
+ m[2] * scalar,
+ m[3] * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v)
+ {
+ return typename mat<4, 2, T, Q>::col_type(
+ m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w,
+ m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m)
+ {
+ return typename mat<4, 2, T, Q>::row_type(
+ v.x * m[0][0] + v.y * m[0][1],
+ v.x * m[1][0] + v.y * m[1][1],
+ v.x * m[2][0] + v.y * m[2][1],
+ v.x * m[3][0] + v.y * m[3][1]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2)
+ {
+ T const SrcA00 = m1[0][0];
+ T const SrcA01 = m1[0][1];
+ T const SrcA10 = m1[1][0];
+ T const SrcA11 = m1[1][1];
+ T const SrcA20 = m1[2][0];
+ T const SrcA21 = m1[2][1];
+ T const SrcA30 = m1[3][0];
+ T const SrcA31 = m1[3][1];
+
+ T const SrcB00 = m2[0][0];
+ T const SrcB01 = m2[0][1];
+ T const SrcB02 = m2[0][2];
+ T const SrcB03 = m2[0][3];
+ T const SrcB10 = m2[1][0];
+ T const SrcB11 = m2[1][1];
+ T const SrcB12 = m2[1][2];
+ T const SrcB13 = m2[1][3];
+
+ mat<2, 2, T, Q> Result;
+ Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03;
+ Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03;
+ Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13;
+ Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2)
+ {
+ return mat<3, 2, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2)
+ {
+ return mat<4, 2, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3],
+ m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3],
+ m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar)
+ {
+ return mat<4, 2, T, Q>(
+ m[0] / scalar,
+ m[1] / scalar,
+ m[2] / scalar,
+ m[3] / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m)
+ {
+ return mat<4, 2, T, Q>(
+ scalar / m[0],
+ scalar / m[1],
+ scalar / m[2],
+ scalar / m[3]);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat4x3.hpp b/3rdparty/glm/source/glm/detail/type_mat4x3.hpp
new file mode 100644
index 0000000..5a4668f
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat4x3.hpp
@@ -0,0 +1,171 @@
+/// @ref core
+/// @file glm/detail/type_mat4x3.hpp
+
+#pragma once
+
+#include "type_vec3.hpp"
+#include "type_vec4.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<4, 3, T, Q>
+ {
+ typedef vec<3, T, Q> col_type;
+ typedef vec<4, T, Q> row_type;
+ typedef mat<4, 3, T, Q> type;
+ typedef mat<3, 4, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[4];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; }
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 3, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T const& x);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T const& x0, T const& y0, T const& z0,
+ T const& x1, T const& y1, T const& z1,
+ T const& x2, T const& y2, T const& z2,
+ T const& x3, T const& y3, T const& z3);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v0,
+ col_type const& v1,
+ col_type const& v2,
+ col_type const& v3);
+
+ // -- Conversions --
+
+ template<
+ typename X1, typename Y1, typename Z1,
+ typename X2, typename Y2, typename Z2,
+ typename X3, typename Y3, typename Z3,
+ typename X4, typename Y4, typename Z4>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ X1 const& x1, Y1 const& y1, Z1 const& z1,
+ X2 const& x2, Y2 const& y2, Z2 const& z2,
+ X3 const& x3, Y3 const& y3, Z3 const& z3,
+ X4 const& x4, Y4 const& y4, Z4 const& z4);
+
+ template<typename V1, typename V2, typename V3, typename V4>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<3, V1, Q> const& v1,
+ vec<3, V2, Q> const& v2,
+ vec<3, V3, Q> const& v3,
+ vec<3, V4, Q> const& v4);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 3, T, Q> & operator=(mat<4, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 3, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 3, T, Q> & operator+=(mat<4, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 3, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 3, T, Q> & operator-=(mat<4, 3, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 3, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 3, T, Q> & operator/=(U s);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<4, 3, T, Q>& operator++();
+ GLM_FUNC_DECL mat<4, 3, T, Q>& operator--();
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator*(T const& s, mat<4, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<4, 3, T, Q>::col_type operator*(mat<4, 3, T, Q> const& m, typename mat<4, 3, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<4, 3, T, Q>::row_type operator*(typename mat<4, 3, T, Q>::col_type const& v, mat<4, 3, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> operator/(T const& s, mat<4, 3, T, Q> const& m);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat4x3.inl"
+#endif //GLM_EXTERNAL_TEMPLATE
diff --git a/3rdparty/glm/source/glm/detail/type_mat4x3.inl b/3rdparty/glm/source/glm/detail/type_mat4x3.inl
new file mode 100644
index 0000000..1249e95
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat4x3.inl
@@ -0,0 +1,598 @@
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1), col_type(0, 0, 0)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0, 0);
+ this->value[1] = col_type(0, 1, 0);
+ this->value[2] = col_type(0, 0, 1);
+ this->value[3] = col_type(0, 0, 0);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ this->value[3] = m[3];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(T const& s)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s), col_type(0, 0, 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(s, 0, 0);
+ this->value[1] = col_type(0, s, 0);
+ this->value[2] = col_type(0, 0, s);
+ this->value[3] = col_type(0, 0, 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat
+ (
+ T const& x0, T const& y0, T const& z0,
+ T const& x1, T const& y1, T const& z1,
+ T const& x2, T const& y2, T const& z2,
+ T const& x3, T const& y3, T const& z3
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0, z0);
+ this->value[1] = col_type(x1, y1, z1);
+ this->value[2] = col_type(x2, y2, z2);
+ this->value[3] = col_type(x3, y3, z3);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = v0;
+ this->value[1] = v1;
+ this->value[2] = v2;
+ this->value[3] = v3;
+# endif
+ }
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<
+ typename X0, typename Y0, typename Z0,
+ typename X1, typename Y1, typename Z1,
+ typename X2, typename Y2, typename Z2,
+ typename X3, typename Y3, typename Z3>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat
+ (
+ X0 const& x0, Y0 const& y0, Z0 const& z0,
+ X1 const& x1, Y1 const& y1, Z1 const& z1,
+ X2 const& x2, Y2 const& y2, Z2 const& z2,
+ X3 const& x3, Y3 const& y3, Z3 const& z3
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0, z0);
+ this->value[1] = col_type(x1, y1, z1);
+ this->value[2] = col_type(x2, y2, z2);
+ this->value[3] = col_type(x3, y3, z3);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V1, typename V2, typename V3, typename V4>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3, vec<3, V4, Q> const& v4)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v1);
+ this->value[1] = col_type(v2);
+ this->value[2] = col_type(v3);
+ this->value[3] = col_type(v4);
+# endif
+ }
+
+ // -- Matrix conversions --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(m[3]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(0, 0, 1);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(m[3]);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0, 0, 1);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(m[2], 1);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(0, 0, 1);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(m[3], 0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(m[2], 1);
+ this->value[3] = col_type(m[3], 0);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(0);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::col_type & mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type const& mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary updatable operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q>& mat<4, 3, T, Q>::operator=(mat<4, 3, U, Q> const& m)
+ {
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ this->value[3] = m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(U s)
+ {
+ this->value[0] += s;
+ this->value[1] += s;
+ this->value[2] += s;
+ this->value[3] += s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(mat<4, 3, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ this->value[2] += m[2];
+ this->value[3] += m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(U s)
+ {
+ this->value[0] -= s;
+ this->value[1] -= s;
+ this->value[2] -= s;
+ this->value[3] -= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(mat<4, 3, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ this->value[2] -= m[2];
+ this->value[3] -= m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator*=(U s)
+ {
+ this->value[0] *= s;
+ this->value[1] *= s;
+ this->value[2] *= s;
+ this->value[3] *= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator/=(U s)
+ {
+ this->value[0] /= s;
+ this->value[1] /= s;
+ this->value[2] /= s;
+ this->value[3] /= s;
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ ++this->value[2];
+ ++this->value[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ --this->value[2];
+ --this->value[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> mat<4, 3, T, Q>::operator++(int)
+ {
+ mat<4, 3, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> mat<4, 3, T, Q>::operator--(int)
+ {
+ mat<4, 3, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m)
+ {
+ return mat<4, 3, T, Q>(
+ -m[0],
+ -m[1],
+ -m[2],
+ -m[3]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T const& s)
+ {
+ return mat<4, 3, T, Q>(
+ m[0] + s,
+ m[1] + s,
+ m[2] + s,
+ m[3] + s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2)
+ {
+ return mat<4, 3, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1],
+ m1[2] + m2[2],
+ m1[3] + m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T const& s)
+ {
+ return mat<4, 3, T, Q>(
+ m[0] - s,
+ m[1] - s,
+ m[2] - s,
+ m[3] - s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2)
+ {
+ return mat<4, 3, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1],
+ m1[2] - m2[2],
+ m1[3] - m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T const& s)
+ {
+ return mat<4, 3, T, Q>(
+ m[0] * s,
+ m[1] * s,
+ m[2] * s,
+ m[3] * s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(T const& s, mat<4, 3, T, Q> const& m)
+ {
+ return mat<4, 3, T, Q>(
+ m[0] * s,
+ m[1] * s,
+ m[2] * s,
+ m[3] * s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::col_type operator*
+ (
+ mat<4, 3, T, Q> const& m,
+ typename mat<4, 3, T, Q>::row_type const& v)
+ {
+ return typename mat<4, 3, T, Q>::col_type(
+ m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w,
+ m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w,
+ m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z + m[3][2] * v.w);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::row_type operator*
+ (
+ typename mat<4, 3, T, Q>::col_type const& v,
+ mat<4, 3, T, Q> const& m)
+ {
+ return typename mat<4, 3, T, Q>::row_type(
+ v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2],
+ v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2],
+ v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2],
+ v.x * m[3][0] + v.y * m[3][1] + v.z * m[3][2]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2)
+ {
+ return mat<2, 3, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2)
+ {
+ T const SrcA00 = m1[0][0];
+ T const SrcA01 = m1[0][1];
+ T const SrcA02 = m1[0][2];
+ T const SrcA10 = m1[1][0];
+ T const SrcA11 = m1[1][1];
+ T const SrcA12 = m1[1][2];
+ T const SrcA20 = m1[2][0];
+ T const SrcA21 = m1[2][1];
+ T const SrcA22 = m1[2][2];
+ T const SrcA30 = m1[3][0];
+ T const SrcA31 = m1[3][1];
+ T const SrcA32 = m1[3][2];
+
+ T const SrcB00 = m2[0][0];
+ T const SrcB01 = m2[0][1];
+ T const SrcB02 = m2[0][2];
+ T const SrcB03 = m2[0][3];
+ T const SrcB10 = m2[1][0];
+ T const SrcB11 = m2[1][1];
+ T const SrcB12 = m2[1][2];
+ T const SrcB13 = m2[1][3];
+ T const SrcB20 = m2[2][0];
+ T const SrcB21 = m2[2][1];
+ T const SrcB22 = m2[2][2];
+ T const SrcB23 = m2[2][3];
+
+ mat<3, 3, T, Q> Result;
+ Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03;
+ Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03;
+ Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02 + SrcA32 * SrcB03;
+ Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13;
+ Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13;
+ Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12 + SrcA32 * SrcB13;
+ Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22 + SrcA30 * SrcB23;
+ Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22 + SrcA31 * SrcB23;
+ Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22 + SrcA32 * SrcB23;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2)
+ {
+ return mat<4, 3, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3],
+ m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3],
+ m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3],
+ m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3],
+ m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2] + m1[3][2] * m2[3][3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T const& s)
+ {
+ return mat<4, 3, T, Q>(
+ m[0] / s,
+ m[1] / s,
+ m[2] / s,
+ m[3] / s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator/(T const& s, mat<4, 3, T, Q> const& m)
+ {
+ return mat<4, 3, T, Q>(
+ s / m[0],
+ s / m[1],
+ s / m[2],
+ s / m[3]);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_mat4x4.hpp b/3rdparty/glm/source/glm/detail/type_mat4x4.hpp
new file mode 100644
index 0000000..b92e208
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat4x4.hpp
@@ -0,0 +1,189 @@
+/// @ref core
+/// @file glm/detail/type_mat4x4.hpp
+
+#pragma once
+
+#include "type_vec4.hpp"
+#include <limits>
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct mat<4, 4, T, Q>
+ {
+ typedef vec<4, T, Q> col_type;
+ typedef vec<4, T, Q> row_type;
+ typedef mat<4, 4, T, Q> type;
+ typedef mat<4, 4, T, Q> transpose_type;
+ typedef T value_type;
+
+ private:
+ col_type value[4];
+
+ public:
+ // -- Accesses --
+
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;}
+
+ GLM_FUNC_DECL col_type & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const;
+
+ // -- Constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 4, T, P> const& m);
+
+ GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T const& x);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ T const& x0, T const& y0, T const& z0, T const& w0,
+ T const& x1, T const& y1, T const& z1, T const& w1,
+ T const& x2, T const& y2, T const& z2, T const& w2,
+ T const& x3, T const& y3, T const& z3, T const& w3);
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ col_type const& v0,
+ col_type const& v1,
+ col_type const& v2,
+ col_type const& v3);
+
+ // -- Conversions --
+
+ template<
+ typename X1, typename Y1, typename Z1, typename W1,
+ typename X2, typename Y2, typename Z2, typename W2,
+ typename X3, typename Y3, typename Z3, typename W3,
+ typename X4, typename Y4, typename Z4, typename W4>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1,
+ X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2,
+ X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3,
+ X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4);
+
+ template<typename V1, typename V2, typename V3, typename V4>
+ GLM_FUNC_DECL GLM_CONSTEXPR mat(
+ vec<4, V1, Q> const& v1,
+ vec<4, V2, Q> const& v2,
+ vec<4, V3, Q> const& v3,
+ vec<4, V4, Q> const& v4);
+
+ // -- Matrix conversions --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, U, P> const& m);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x);
+
+ // -- Unary arithmetic operators --
+
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator=(mat<4, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator+=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator+=(mat<4, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator-=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator-=(mat<4, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator*=(mat<4, 4, U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator/=(U s);
+ template<typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator/=(mat<4, 4, U, Q> const& m);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator++();
+ GLM_FUNC_DECL mat<4, 4, T, Q> & operator--();
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator++(int);
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator--(int);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator+(T const& s, mat<4, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator-(T const& s, mat<4, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator*(T const& s, mat<4, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<4, 4, T, Q>::col_type operator*(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<4, 4, T, Q>::row_type operator*(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator/(T const& s, mat<4, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_mat4x4.inl"
+#endif//GLM_EXTERNAL_TEMPLATE
diff --git a/3rdparty/glm/source/glm/detail/type_mat4x4.inl b/3rdparty/glm/source/glm/detail/type_mat4x4.inl
new file mode 100644
index 0000000..5c2166e
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat4x4.inl
@@ -0,0 +1,706 @@
+#include "../matrix.hpp"
+
+namespace glm
+{
+ // -- Constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat()
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST
+ : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)}
+# endif
+ {
+# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION
+ this->value[0] = col_type(1, 0, 0, 0);
+ this->value[1] = col_type(0, 1, 0, 0);
+ this->value[2] = col_type(0, 0, 1, 0);
+ this->value[3] = col_type(0, 0, 0, 1);
+# endif
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, T, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ this->value[3] = m[3];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(T const& s)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0), col_type(0, 0, 0, s)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(s, 0, 0, 0);
+ this->value[1] = col_type(0, s, 0, 0);
+ this->value[2] = col_type(0, 0, s, 0);
+ this->value[3] = col_type(0, 0, 0, s);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat
+ (
+ T const& x0, T const& y0, T const& z0, T const& w0,
+ T const& x1, T const& y1, T const& z1, T const& w1,
+ T const& x2, T const& y2, T const& z2, T const& w2,
+ T const& x3, T const& y3, T const& z3, T const& w3
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{
+ col_type(x0, y0, z0, w0),
+ col_type(x1, y1, z1, w1),
+ col_type(x2, y2, z2, w2),
+ col_type(x3, y3, z3, w3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x0, y0, z0, w0);
+ this->value[1] = col_type(x1, y1, z1, w1);
+ this->value[2] = col_type(x2, y2, z2, w2);
+ this->value[3] = col_type(x3, y3, z3, w3);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = v0;
+ this->value[1] = v1;
+ this->value[2] = v2;
+ this->value[3] = v3;
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, U, P> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0]);
+ this->value[1] = col_type(m[1]);
+ this->value[2] = col_type(m[2]);
+ this->value[3] = col_type(m[3]);
+# endif
+ }
+
+ // -- Conversions --
+
+ template<typename T, qualifier Q>
+ template<
+ typename X1, typename Y1, typename Z1, typename W1,
+ typename X2, typename Y2, typename Z2, typename W2,
+ typename X3, typename Y3, typename Z3, typename W3,
+ typename X4, typename Y4, typename Z4, typename W4>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat
+ (
+ X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1,
+ X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2,
+ X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3,
+ X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4
+ )
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(x1, y1, z1, w1), col_type(x2, y2, z2, w2), col_type(x3, y3, z3, w3), col_type(x4, y4, z4, w4)}
+# endif
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<X1>::is_iec559 || std::numeric_limits<X1>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<Y1>::is_iec559 || std::numeric_limits<Y1>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<Z1>::is_iec559 || std::numeric_limits<Z1>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<W1>::is_iec559 || std::numeric_limits<W1>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid.");
+
+ GLM_STATIC_ASSERT(std::numeric_limits<X2>::is_iec559 || std::numeric_limits<X2>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 5th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<Y2>::is_iec559 || std::numeric_limits<Y2>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 6th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<Z2>::is_iec559 || std::numeric_limits<Z2>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 7th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<W2>::is_iec559 || std::numeric_limits<W2>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 8th parameter type invalid.");
+
+ GLM_STATIC_ASSERT(std::numeric_limits<X3>::is_iec559 || std::numeric_limits<X3>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 9th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<Y3>::is_iec559 || std::numeric_limits<Y3>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 10th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<Z3>::is_iec559 || std::numeric_limits<Z3>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 11th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<W3>::is_iec559 || std::numeric_limits<W3>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 12th parameter type invalid.");
+
+ GLM_STATIC_ASSERT(std::numeric_limits<X4>::is_iec559 || std::numeric_limits<X4>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 13th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<Y4>::is_iec559 || std::numeric_limits<Y4>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 14th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<Z4>::is_iec559 || std::numeric_limits<Z4>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 15th parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<W4>::is_iec559 || std::numeric_limits<W4>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 16th parameter type invalid.");
+
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(x1, y1, z1, w1);
+ this->value[1] = col_type(x2, y2, z2, w2);
+ this->value[2] = col_type(x3, y3, z3, w3);
+ this->value[3] = col_type(x4, y4, z4, w4);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ template<typename V1, typename V2, typename V3, typename V4>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2, vec<4, V3, Q> const& v3, vec<4, V4, Q> const& v4)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)}
+# endif
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<V1>::is_iec559 || std::numeric_limits<V1>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<V2>::is_iec559 || std::numeric_limits<V2>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<V3>::is_iec559 || std::numeric_limits<V3>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid.");
+ GLM_STATIC_ASSERT(std::numeric_limits<V4>::is_iec559 || std::numeric_limits<V4>::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid.");
+
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(v1);
+ this->value[1] = col_type(v2);
+ this->value[2] = col_type(v3);
+ this->value[3] = col_type(v4);
+# endif
+ }
+
+ // -- Matrix conversions --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+ this->value[2] = col_type(0, 0, 1, 0);
+ this->value[3] = col_type(0, 0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(0, 0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(m[2], 0);
+ this->value[3] = col_type(0, 0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(0, 0, 1, 0);
+ this->value[3] = col_type(0, 0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0), col_type(0, 0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+ this->value[2] = col_type(m[2], 1, 0);
+ this->value[3] = col_type(0, 0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = col_type(0, 0, 1, 0);
+ this->value[3] = col_type(0, 0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 2, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0, 0);
+ this->value[1] = col_type(m[1], 0, 0);
+ this->value[2] = col_type(0, 0, 1, 0);
+ this->value[3] = col_type(0, 0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 4, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0, 0, 0, 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ this->value[3] = col_type(0, 0, 0, 1);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 3, T, Q> const& m)
+# if GLM_HAS_INITIALIZER_LISTS
+ : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(m[3], 1)}
+# endif
+ {
+# if !GLM_HAS_INITIALIZER_LISTS
+ this->value[0] = col_type(m[0], 0);
+ this->value[1] = col_type(m[1], 0);
+ this->value[2] = col_type(m[2], 0);
+ this->value[3] = col_type(m[3], 1);
+# endif
+ }
+
+ // -- Accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type & mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i)
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type const& mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) const
+ {
+ assert(i < this->length());
+ return this->value[i];
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator=(mat<4, 4, U, Q> const& m)
+ {
+ //memcpy could be faster
+ //memcpy(&this->value, &m.value, 16 * sizeof(valType));
+ this->value[0] = m[0];
+ this->value[1] = m[1];
+ this->value[2] = m[2];
+ this->value[3] = m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(U s)
+ {
+ this->value[0] += s;
+ this->value[1] += s;
+ this->value[2] += s;
+ this->value[3] += s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(mat<4, 4, U, Q> const& m)
+ {
+ this->value[0] += m[0];
+ this->value[1] += m[1];
+ this->value[2] += m[2];
+ this->value[3] += m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(U s)
+ {
+ this->value[0] -= s;
+ this->value[1] -= s;
+ this->value[2] -= s;
+ this->value[3] -= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(mat<4, 4, U, Q> const& m)
+ {
+ this->value[0] -= m[0];
+ this->value[1] -= m[1];
+ this->value[2] -= m[2];
+ this->value[3] -= m[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(U s)
+ {
+ this->value[0] *= s;
+ this->value[1] *= s;
+ this->value[2] *= s;
+ this->value[3] *= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(mat<4, 4, U, Q> const& m)
+ {
+ return (*this = *this * m);
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(U s)
+ {
+ this->value[0] /= s;
+ this->value[1] /= s;
+ this->value[2] /= s;
+ this->value[3] /= s;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(mat<4, 4, U, Q> const& m)
+ {
+ return *this *= inverse(m);
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator++()
+ {
+ ++this->value[0];
+ ++this->value[1];
+ ++this->value[2];
+ ++this->value[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator--()
+ {
+ --this->value[0];
+ --this->value[1];
+ --this->value[2];
+ --this->value[3];
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat<4, 4, T, Q>::operator++(int)
+ {
+ mat<4, 4, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat<4, 4, T, Q>::operator--(int)
+ {
+ mat<4, 4, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary constant operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m)
+ {
+ return m;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m)
+ {
+ return mat<4, 4, T, Q>(
+ -m[0],
+ -m[1],
+ -m[2],
+ -m[3]);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T const& s)
+ {
+ return mat<4, 4, T, Q>(
+ m[0] + s,
+ m[1] + s,
+ m[2] + s,
+ m[3] + s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(T const& s, mat<4, 4, T, Q> const& m)
+ {
+ return mat<4, 4, T, Q>(
+ m[0] + s,
+ m[1] + s,
+ m[2] + s,
+ m[3] + s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2)
+ {
+ return mat<4, 4, T, Q>(
+ m1[0] + m2[0],
+ m1[1] + m2[1],
+ m1[2] + m2[2],
+ m1[3] + m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T const& s)
+ {
+ return mat<4, 4, T, Q>(
+ m[0] - s,
+ m[1] - s,
+ m[2] - s,
+ m[3] - s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(T const& s, mat<4, 4, T, Q> const& m)
+ {
+ return mat<4, 4, T, Q>(
+ s - m[0],
+ s - m[1],
+ s - m[2],
+ s - m[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2)
+ {
+ return mat<4, 4, T, Q>(
+ m1[0] - m2[0],
+ m1[1] - m2[1],
+ m1[2] - m2[2],
+ m1[3] - m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T const & s)
+ {
+ return mat<4, 4, T, Q>(
+ m[0] * s,
+ m[1] * s,
+ m[2] * s,
+ m[3] * s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(T const& s, mat<4, 4, T, Q> const& m)
+ {
+ return mat<4, 4, T, Q>(
+ m[0] * s,
+ m[1] * s,
+ m[2] * s,
+ m[3] * s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type operator*
+ (
+ mat<4, 4, T, Q> const& m,
+ typename mat<4, 4, T, Q>::row_type const& v
+ )
+ {
+/*
+ __m128 v0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 v1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 v2 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 v3 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(m[0].data, v0);
+ __m128 m1 = _mm_mul_ps(m[1].data, v1);
+ __m128 a0 = _mm_add_ps(m0, m1);
+
+ __m128 m2 = _mm_mul_ps(m[2].data, v2);
+ __m128 m3 = _mm_mul_ps(m[3].data, v3);
+ __m128 a1 = _mm_add_ps(m2, m3);
+
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ return typename mat<4, 4, T, Q>::col_type(a2);
+*/
+
+ typename mat<4, 4, T, Q>::col_type const Mov0(v[0]);
+ typename mat<4, 4, T, Q>::col_type const Mov1(v[1]);
+ typename mat<4, 4, T, Q>::col_type const Mul0 = m[0] * Mov0;
+ typename mat<4, 4, T, Q>::col_type const Mul1 = m[1] * Mov1;
+ typename mat<4, 4, T, Q>::col_type const Add0 = Mul0 + Mul1;
+ typename mat<4, 4, T, Q>::col_type const Mov2(v[2]);
+ typename mat<4, 4, T, Q>::col_type const Mov3(v[3]);
+ typename mat<4, 4, T, Q>::col_type const Mul2 = m[2] * Mov2;
+ typename mat<4, 4, T, Q>::col_type const Mul3 = m[3] * Mov3;
+ typename mat<4, 4, T, Q>::col_type const Add1 = Mul2 + Mul3;
+ typename mat<4, 4, T, Q>::col_type const Add2 = Add0 + Add1;
+ return Add2;
+
+/*
+ return typename mat<4, 4, T, Q>::col_type(
+ m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0] * v[3],
+ m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1] * v[3],
+ m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2] * v[3],
+ m[0][3] * v[0] + m[1][3] * v[1] + m[2][3] * v[2] + m[3][3] * v[3]);
+*/
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::row_type operator*
+ (
+ typename mat<4, 4, T, Q>::col_type const& v,
+ mat<4, 4, T, Q> const& m
+ )
+ {
+ return typename mat<4, 4, T, Q>::row_type(
+ m[0][0] * v[0] + m[0][1] * v[1] + m[0][2] * v[2] + m[0][3] * v[3],
+ m[1][0] * v[0] + m[1][1] * v[1] + m[1][2] * v[2] + m[1][3] * v[3],
+ m[2][0] * v[0] + m[2][1] * v[1] + m[2][2] * v[2] + m[2][3] * v[3],
+ m[3][0] * v[0] + m[3][1] * v[1] + m[3][2] * v[2] + m[3][3] * v[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2)
+ {
+ return mat<2, 4, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3],
+ m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3],
+ m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2)
+ {
+ return mat<3, 4, T, Q>(
+ m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3],
+ m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3],
+ m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3],
+ m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3],
+ m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3],
+ m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3],
+ m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3],
+ m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3],
+ m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3],
+ m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3],
+ m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3],
+ m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2] + m1[3][3] * m2[2][3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2)
+ {
+ typename mat<4, 4, T, Q>::col_type const SrcA0 = m1[0];
+ typename mat<4, 4, T, Q>::col_type const SrcA1 = m1[1];
+ typename mat<4, 4, T, Q>::col_type const SrcA2 = m1[2];
+ typename mat<4, 4, T, Q>::col_type const SrcA3 = m1[3];
+
+ typename mat<4, 4, T, Q>::col_type const SrcB0 = m2[0];
+ typename mat<4, 4, T, Q>::col_type const SrcB1 = m2[1];
+ typename mat<4, 4, T, Q>::col_type const SrcB2 = m2[2];
+ typename mat<4, 4, T, Q>::col_type const SrcB3 = m2[3];
+
+ mat<4, 4, T, Q> Result;
+ Result[0] = SrcA0 * SrcB0[0] + SrcA1 * SrcB0[1] + SrcA2 * SrcB0[2] + SrcA3 * SrcB0[3];
+ Result[1] = SrcA0 * SrcB1[0] + SrcA1 * SrcB1[1] + SrcA2 * SrcB1[2] + SrcA3 * SrcB1[3];
+ Result[2] = SrcA0 * SrcB2[0] + SrcA1 * SrcB2[1] + SrcA2 * SrcB2[2] + SrcA3 * SrcB2[3];
+ Result[3] = SrcA0 * SrcB3[0] + SrcA1 * SrcB3[1] + SrcA2 * SrcB3[2] + SrcA3 * SrcB3[3];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T const& s)
+ {
+ return mat<4, 4, T, Q>(
+ m[0] / s,
+ m[1] / s,
+ m[2] / s,
+ m[3] / s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(T const& s, mat<4, 4, T, Q> const& m)
+ {
+ return mat<4, 4, T, Q>(
+ s / m[0],
+ s / m[1],
+ s / m[2],
+ s / m[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v)
+ {
+ return inverse(m) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m)
+ {
+ return v * inverse(m);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2)
+ {
+ mat<4, 4, T, Q> m1_copy(m1);
+ return m1_copy /= m2;
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2)
+ {
+ return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2)
+ {
+ return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]);
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "type_mat4x4_simd.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_mat4x4_simd.inl b/3rdparty/glm/source/glm/detail/type_mat4x4_simd.inl
new file mode 100644
index 0000000..fb3a16f
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_mat4x4_simd.inl
@@ -0,0 +1,6 @@
+/// @ref core
+
+namespace glm
+{
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_quat.hpp b/3rdparty/glm/source/glm/detail/type_quat.hpp
new file mode 100644
index 0000000..376c0dc
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_quat.hpp
@@ -0,0 +1,191 @@
+/// @ref core
+/// @file glm/detail/type_quat.hpp
+
+#pragma once
+
+// Dependency:
+#include "../detail/type_mat3x3.hpp"
+#include "../detail/type_mat4x4.hpp"
+#include "../detail/type_vec3.hpp"
+#include "../detail/type_vec4.hpp"
+#include "../ext/vector_relational.hpp"
+#include "../ext/quaternion_relational.hpp"
+#include "../gtc/constants.hpp"
+#include "../gtc/matrix_transform.hpp"
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct qua
+ {
+ // -- Implementation detail --
+
+ typedef qua<T, Q> type;
+ typedef T value_type;
+
+ // -- Data --
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wpedantic"
+# elif GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-anonymous-struct"
+# pragma clang diagnostic ignored "-Wnested-anon-types"
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(push)
+# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union
+# endif
+# endif
+
+# if GLM_LANG & GLM_LANG_CXXMS_FLAG
+ union
+ {
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ struct { T x, y, z, w; };
+# else
+ struct { T w, x, y, z; };
+# endif
+
+ typename detail::storage<4, T, detail::is_aligned<Q>::value>::type data;
+ };
+# else
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ T x, y, z, w;
+# else
+ T w, x, y, z;
+# endif
+# endif
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(pop)
+# endif
+# endif
+
+ // -- Component accesses --
+
+ typedef length_t length_type;
+
+ /// Return the count of components of a quaternion
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;}
+
+ GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const;
+
+ // -- Implicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR qua() GLM_DEFAULT_CTOR;
+ GLM_FUNC_DECL GLM_CONSTEXPR qua(qua<T, Q> const& q) GLM_DEFAULT;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua(qua<T, P> const& q);
+
+ // -- Explicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR qua(T s, vec<3, T, Q> const& v);
+
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ GLM_FUNC_DECL GLM_CONSTEXPR qua(T x, T y, T z, T w);
+# else
+ GLM_FUNC_DECL GLM_CONSTEXPR qua(T w, T x, T y, T z);
+# endif
+
+ // -- Conversion constructors --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT qua(qua<U, P> const& q);
+
+ /// Explicit conversion operators
+# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS
+ GLM_FUNC_DECL explicit operator mat<3, 3, T, Q>() const;
+ GLM_FUNC_DECL explicit operator mat<4, 4, T, Q>() const;
+# endif
+
+ /// Create a quaternion from two normalized axis
+ ///
+ /// @param u A first normalized axis
+ /// @param v A second normalized axis
+ /// @see gtc_quaternion
+ /// @see http://lolengine.net/blog/2013/09/18/beautiful-maths-quaternion-from-vectors
+ GLM_FUNC_DECL qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v);
+
+ /// Build a quaternion from euler angles (pitch, yaw, roll), in radians.
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT qua(vec<3, T, Q> const& eulerAngles);
+ GLM_FUNC_DECL GLM_EXPLICIT qua(mat<3, 3, T, Q> const& q);
+ GLM_FUNC_DECL GLM_EXPLICIT qua(mat<4, 4, T, Q> const& q);
+
+ // -- Unary arithmetic operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q>& operator=(qua<T, Q> const& q) GLM_DEFAULT;
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q>& operator=(qua<U, Q> const& q);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q>& operator+=(qua<U, Q> const& q);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q>& operator-=(qua<U, Q> const& q);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q>& operator*=(qua<U, Q> const& q);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q>& operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q>& operator/=(U s);
+ };
+
+ // -- Unary bit operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> operator+(qua<T, Q> const& q);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> operator-(qua<T, Q> const& q);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> operator+(qua<T, Q> const& q, qua<T, Q> const& p);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> operator-(qua<T, Q> const& q, qua<T, Q> const& p);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> operator*(qua<T, Q> const& q, qua<T, Q> const& p);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(qua<T, Q> const& q, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua<T, Q> const& q);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(qua<T, Q> const& q, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua<T, Q> const& q);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> operator*(qua<T, Q> const& q, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> operator*(T const& s, qua<T, Q> const& q);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> operator/(qua<T, Q> const& q, T const& s);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(qua<T, Q> const& q1, qua<T, Q> const& q2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(qua<T, Q> const& q1, qua<T, Q> const& q2);
+} //namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_quat.inl"
+#endif//GLM_EXTERNAL_TEMPLATE
diff --git a/3rdparty/glm/source/glm/detail/type_quat.inl b/3rdparty/glm/source/glm/detail/type_quat.inl
new file mode 100644
index 0000000..52deed4
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_quat.inl
@@ -0,0 +1,412 @@
+#include "../trigonometric.hpp"
+#include "../exponential.hpp"
+#include "../ext/quaternion_common.hpp"
+#include "../ext/quaternion_geometric.hpp"
+#include <limits>
+
+namespace glm{
+namespace detail
+{
+ template <typename T>
+ struct genTypeTrait<qua<T> >
+ {
+ static const genTypeEnum GENTYPE = GENTYPE_QUAT;
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_dot<qua<T, Q>, T, Aligned>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(qua<T, Q> const& a, qua<T, Q> const& b)
+ {
+ vec<4, T, Q> tmp(a.w * b.w, a.x * b.x, a.y * b.y, a.z * b.z);
+ return (tmp.x + tmp.y) + (tmp.z + tmp.w);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_quat_add
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua<T, Q> call(qua<T, Q> const& q, qua<T, Q> const& p)
+ {
+ return qua<T, Q>(q.w + p.w, q.x + p.x, q.y + p.y, q.z + p.z);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_quat_sub
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua<T, Q> call(qua<T, Q> const& q, qua<T, Q> const& p)
+ {
+ return qua<T, Q>(q.w - p.w, q.x - p.x, q.y - p.y, q.z - p.z);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_quat_mul_scalar
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua<T, Q> call(qua<T, Q> const& q, T s)
+ {
+ return qua<T, Q>(q.w * s, q.x * s, q.y * s, q.z * s);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_quat_div_scalar
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua<T, Q> call(qua<T, Q> const& q, T s)
+ {
+ return qua<T, Q>(q.w / s, q.x / s, q.y / s, q.z / s);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_quat_mul_vec4
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(qua<T, Q> const& q, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w);
+ }
+ };
+}//namespace detail
+
+ // -- Component accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & qua<T, Q>::operator[](typename qua<T, Q>::length_type i)
+ {
+ assert(i >= 0 && i < this->length());
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ return (&x)[i];
+# else
+ return (&w)[i];
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& qua<T, Q>::operator[](typename qua<T, Q>::length_type i) const
+ {
+ assert(i >= 0 && i < this->length());
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ return (&x)[i];
+# else
+ return (&w)[i];
+# endif
+ }
+
+ // -- Implicit basic constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q>::qua()
+# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ : x(0), y(0), z(0), w(1)
+# else
+ : w(1), x(0), y(0), z(0)
+# endif
+# endif
+ {}
+# endif
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q>::qua(qua<T, Q> const& q)
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ : x(q.x), y(q.y), z(q.z), w(q.w)
+# else
+ : w(q.w), x(q.x), y(q.y), z(q.z)
+# endif
+ {}
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q>::qua(qua<T, P> const& q)
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ : x(q.x), y(q.y), z(q.z), w(q.w)
+# else
+ : w(q.w), x(q.x), y(q.y), z(q.z)
+# endif
+ {}
+
+ // -- Explicit basic constructors --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q>::qua(T s, vec<3, T, Q> const& v)
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ : x(v.x), y(v.y), z(v.z), w(s)
+# else
+ : w(s), x(v.x), y(v.y), z(v.z)
+# endif
+ {}
+
+ template <typename T, qualifier Q>
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q>::qua(T _x, T _y, T _z, T _w)
+ : x(_x), y(_y), z(_z), w(_w)
+# else
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q>::qua(T _w, T _x, T _y, T _z)
+ : w(_w), x(_x), y(_y), z(_z)
+# endif
+ {}
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q>::qua(qua<U, P> const& q)
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ : x(static_cast<T>(q.x)), y(static_cast<T>(q.y)), z(static_cast<T>(q.z)), w(static_cast<T>(q.w))
+# else
+ : w(static_cast<T>(q.w)), x(static_cast<T>(q.x)), y(static_cast<T>(q.y)), z(static_cast<T>(q.z))
+# endif
+ {}
+
+ //template<typename valType>
+ //GLM_FUNC_QUALIFIER qua<valType>::qua
+ //(
+ // valType const& pitch,
+ // valType const& yaw,
+ // valType const& roll
+ //)
+ //{
+ // vec<3, valType> eulerAngle(pitch * valType(0.5), yaw * valType(0.5), roll * valType(0.5));
+ // vec<3, valType> c = glm::cos(eulerAngle * valType(0.5));
+ // vec<3, valType> s = glm::sin(eulerAngle * valType(0.5));
+ //
+ // this->w = c.x * c.y * c.z + s.x * s.y * s.z;
+ // this->x = s.x * c.y * c.z - c.x * s.y * s.z;
+ // this->y = c.x * s.y * c.z + s.x * c.y * s.z;
+ // this->z = c.x * c.y * s.z - s.x * s.y * c.z;
+ //}
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q>::qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v)
+ {
+ T norm_u_norm_v = sqrt(dot(u, u) * dot(v, v));
+ T real_part = norm_u_norm_v + dot(u, v);
+ vec<3, T, Q> t;
+
+ if(real_part < static_cast<T>(1.e-6f) * norm_u_norm_v)
+ {
+ // If u and v are exactly opposite, rotate 180 degrees
+ // around an arbitrary orthogonal axis. Axis normalisation
+ // can happen later, when we normalise the quaternion.
+ real_part = static_cast<T>(0);
+ t = abs(u.x) > abs(u.z) ? vec<3, T, Q>(-u.y, u.x, static_cast<T>(0)) : vec<3, T, Q>(static_cast<T>(0), -u.z, u.y);
+ }
+ else
+ {
+ // Otherwise, build quaternion the standard way.
+ t = cross(u, v);
+ }
+
+ *this = normalize(qua<T, Q>(real_part, t.x, t.y, t.z));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q>::qua(vec<3, T, Q> const& eulerAngle)
+ {
+ vec<3, T, Q> c = glm::cos(eulerAngle * T(0.5));
+ vec<3, T, Q> s = glm::sin(eulerAngle * T(0.5));
+
+ this->w = c.x * c.y * c.z + s.x * s.y * s.z;
+ this->x = s.x * c.y * c.z - c.x * s.y * s.z;
+ this->y = c.x * s.y * c.z + s.x * c.y * s.z;
+ this->z = c.x * c.y * s.z - s.x * s.y * c.z;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q>::qua(mat<3, 3, T, Q> const& m)
+ {
+ *this = quat_cast(m);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q>::qua(mat<4, 4, T, Q> const& m)
+ {
+ *this = quat_cast(m);
+ }
+
+# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q>::operator mat<3, 3, T, Q>() const
+ {
+ return mat3_cast(*this);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q>::operator mat<4, 4, T, Q>() const
+ {
+ return mat4_cast(*this);
+ }
+# endif//GLM_HAS_EXPLICIT_CONVERSION_OPERATORS
+
+ // -- Unary arithmetic operators --
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> & qua<T, Q>::operator=(qua<T, Q> const& q)
+ {
+ this->w = q.w;
+ this->x = q.x;
+ this->y = q.y;
+ this->z = q.z;
+ return *this;
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> & qua<T, Q>::operator=(qua<U, Q> const& q)
+ {
+ this->w = static_cast<T>(q.w);
+ this->x = static_cast<T>(q.x);
+ this->y = static_cast<T>(q.y);
+ this->z = static_cast<T>(q.z);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> & qua<T, Q>::operator+=(qua<U, Q> const& q)
+ {
+ return (*this = detail::compute_quat_add<T, Q, detail::is_aligned<Q>::value>::call(*this, qua<T, Q>(q)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> & qua<T, Q>::operator-=(qua<U, Q> const& q)
+ {
+ return (*this = detail::compute_quat_sub<T, Q, detail::is_aligned<Q>::value>::call(*this, qua<T, Q>(q)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> & qua<T, Q>::operator*=(qua<U, Q> const& r)
+ {
+ qua<T, Q> const p(*this);
+ qua<T, Q> const q(r);
+
+ this->w = p.w * q.w - p.x * q.x - p.y * q.y - p.z * q.z;
+ this->x = p.w * q.x + p.x * q.w + p.y * q.z - p.z * q.y;
+ this->y = p.w * q.y + p.y * q.w + p.z * q.x - p.x * q.z;
+ this->z = p.w * q.z + p.z * q.w + p.x * q.y - p.y * q.x;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> & qua<T, Q>::operator*=(U s)
+ {
+ return (*this = detail::compute_quat_mul_scalar<T, Q, detail::is_aligned<Q>::value>::call(*this, static_cast<U>(s)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> & qua<T, Q>::operator/=(U s)
+ {
+ return (*this = detail::compute_quat_div_scalar<T, Q, detail::is_aligned<Q>::value>::call(*this, static_cast<U>(s)));
+ }
+
+ // -- Unary bit operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> operator+(qua<T, Q> const& q)
+ {
+ return q;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> operator-(qua<T, Q> const& q)
+ {
+ return qua<T, Q>(-q.w, -q.x, -q.y, -q.z);
+ }
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> operator+(qua<T, Q> const& q, qua<T, Q> const& p)
+ {
+ return qua<T, Q>(q) += p;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> operator-(qua<T, Q> const& q, qua<T, Q> const& p)
+ {
+ return qua<T, Q>(q) -= p;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> operator*(qua<T, Q> const& q, qua<T, Q> const& p)
+ {
+ return qua<T, Q>(q) *= p;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(qua<T, Q> const& q, vec<3, T, Q> const& v)
+ {
+ vec<3, T, Q> const QuatVector(q.x, q.y, q.z);
+ vec<3, T, Q> const uv(glm::cross(QuatVector, v));
+ vec<3, T, Q> const uuv(glm::cross(QuatVector, uv));
+
+ return v + ((uv * q.w) + uuv) * static_cast<T>(2);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua<T, Q> const& q)
+ {
+ return glm::inverse(q) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(qua<T, Q> const& q, vec<4, T, Q> const& v)
+ {
+ return detail::compute_quat_mul_vec4<T, Q, detail::is_aligned<Q>::value>::call(q, v);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua<T, Q> const& q)
+ {
+ return glm::inverse(q) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> operator*(qua<T, Q> const& q, T const& s)
+ {
+ return qua<T, Q>(
+ q.w * s, q.x * s, q.y * s, q.z * s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> operator*(T const& s, qua<T, Q> const& q)
+ {
+ return q * s;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> operator/(qua<T, Q> const& q, T const& s)
+ {
+ return qua<T, Q>(
+ q.w / s, q.x / s, q.y / s, q.z / s);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(qua<T, Q> const& q1, qua<T, Q> const& q2)
+ {
+ return q1.x == q2.x && q1.y == q2.y && q1.z == q2.z && q1.w == q2.w;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(qua<T, Q> const& q1, qua<T, Q> const& q2)
+ {
+ return q1.x != q2.x || q1.y != q2.y || q1.z != q2.z || q1.w != q2.w;
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "type_quat_simd.inl"
+#endif
+
diff --git a/3rdparty/glm/source/glm/detail/type_quat_simd.inl b/3rdparty/glm/source/glm/detail/type_quat_simd.inl
new file mode 100644
index 0000000..3333e59
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_quat_simd.inl
@@ -0,0 +1,188 @@
+/// @ref core
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+namespace glm{
+namespace detail
+{
+/*
+ template<qualifier Q>
+ struct compute_quat_mul<float, Q, true>
+ {
+ static qua<float, Q> call(qua<float, Q> const& q1, qua<float, Q> const& q2)
+ {
+ // SSE2 STATS: 11 shuffle, 8 mul, 8 add
+ // SSE4 STATS: 3 shuffle, 4 mul, 4 dpps
+
+ __m128 const mul0 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(0, 1, 2, 3)));
+ __m128 const mul1 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(1, 0, 3, 2)));
+ __m128 const mul2 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(2, 3, 0, 1)));
+ __m128 const mul3 = _mm_mul_ps(q1.Data, q2.Data);
+
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ __m128 const add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff);
+ __m128 const add1 = _mm_dp_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f), 0xff);
+ __m128 const add2 = _mm_dp_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f), 0xff);
+ __m128 const add3 = _mm_dp_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f), 0xff);
+# else
+ __m128 const mul4 = _mm_mul_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f));
+ __m128 const add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul4, mul4));
+ __m128 const add4 = _mm_add_ss(add0, _mm_shuffle_ps(add0, add0, 1));
+
+ __m128 const mul5 = _mm_mul_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f));
+ __m128 const add1 = _mm_add_ps(mul1, _mm_movehl_ps(mul5, mul5));
+ __m128 const add5 = _mm_add_ss(add1, _mm_shuffle_ps(add1, add1, 1));
+
+ __m128 const mul6 = _mm_mul_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f));
+ __m128 const add2 = _mm_add_ps(mul6, _mm_movehl_ps(mul6, mul6));
+ __m128 const add6 = _mm_add_ss(add2, _mm_shuffle_ps(add2, add2, 1));
+
+ __m128 const mul7 = _mm_mul_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f));
+ __m128 const add3 = _mm_add_ps(mul3, _mm_movehl_ps(mul7, mul7));
+ __m128 const add7 = _mm_add_ss(add3, _mm_shuffle_ps(add3, add3, 1));
+ #endif
+
+ // This SIMD code is a politically correct way of doing this, but in every test I've tried it has been slower than
+ // the final code below. I'll keep this here for reference - maybe somebody else can do something better...
+ //
+ //__m128 xxyy = _mm_shuffle_ps(add4, add5, _MM_SHUFFLE(0, 0, 0, 0));
+ //__m128 zzww = _mm_shuffle_ps(add6, add7, _MM_SHUFFLE(0, 0, 0, 0));
+ //
+ //return _mm_shuffle_ps(xxyy, zzww, _MM_SHUFFLE(2, 0, 2, 0));
+
+ qua<float, Q> Result;
+ _mm_store_ss(&Result.x, add4);
+ _mm_store_ss(&Result.y, add5);
+ _mm_store_ss(&Result.z, add6);
+ _mm_store_ss(&Result.w, add7);
+ return Result;
+ }
+ };
+*/
+
+ template<qualifier Q>
+ struct compute_quat_add<float, Q, true>
+ {
+ static qua<float, Q> call(qua<float, Q> const& q, qua<float, Q> const& p)
+ {
+ qua<float, Q> Result;
+ Result.data = _mm_add_ps(q.data, p.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<qualifier Q>
+ struct compute_quat_add<double, Q, true>
+ {
+ static qua<double, Q> call(qua<double, Q> const& a, qua<double, Q> const& b)
+ {
+ qua<double, Q> Result;
+ Result.data = _mm256_add_pd(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_quat_sub<float, Q, true>
+ {
+ static qua<float, Q> call(qua<float, Q> const& q, qua<float, Q> const& p)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_sub_ps(q.data, p.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<qualifier Q>
+ struct compute_quat_sub<double, Q, true>
+ {
+ static qua<double, Q> call(qua<double, Q> const& a, qua<double, Q> const& b)
+ {
+ qua<double, Q> Result;
+ Result.data = _mm256_sub_pd(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_quat_mul_scalar<float, Q, true>
+ {
+ static qua<float, Q> call(qua<float, Q> const& q, float s)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_mul_ps(q.data, _mm_set_ps1(s));
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<qualifier Q>
+ struct compute_quat_mul_scalar<double, Q, true>
+ {
+ static qua<double, Q> call(qua<double, Q> const& q, double s)
+ {
+ qua<double, Q> Result;
+ Result.data = _mm256_mul_pd(q.data, _mm_set_ps1(s));
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_quat_div_scalar<float, Q, true>
+ {
+ static qua<float, Q> call(qua<float, Q> const& q, float s)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_div_ps(q.data, _mm_set_ps1(s));
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<qualifier Q>
+ struct compute_quat_div_scalar<double, Q, true>
+ {
+ static qua<double, Q> call(qua<double, Q> const& q, double s)
+ {
+ qua<double, Q> Result;
+ Result.data = _mm256_div_pd(q.data, _mm_set_ps1(s));
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_quat_mul_vec4<float, Q, true>
+ {
+ static vec<4, float, Q> call(qua<float, Q> const& q, vec<4, float, Q> const& v)
+ {
+ __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 0, 2, 1));
+ __m128 const q_swp1 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 1, 0, 2));
+ __m128 const v_swp0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 0, 2, 1));
+ __m128 const v_swp1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 1, 0, 2));
+
+ __m128 uv = _mm_sub_ps(_mm_mul_ps(q_swp0, v_swp1), _mm_mul_ps(q_swp1, v_swp0));
+ __m128 uv_swp0 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 0, 2, 1));
+ __m128 uv_swp1 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 1, 0, 2));
+ __m128 uuv = _mm_sub_ps(_mm_mul_ps(q_swp0, uv_swp1), _mm_mul_ps(q_swp1, uv_swp0));
+
+ __m128 const two = _mm_set1_ps(2.0f);
+ uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two));
+ uuv = _mm_mul_ps(uuv, two);
+
+ vec<4, float, Q> Result;
+ Result.data = _mm_add_ps(v.Data, _mm_add_ps(uv, uuv));
+ return Result;
+ }
+ };
+}//namespace detail
+}//namespace glm
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+
diff --git a/3rdparty/glm/source/glm/detail/type_vec1.hpp b/3rdparty/glm/source/glm/detail/type_vec1.hpp
new file mode 100644
index 0000000..ed381cc
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec1.hpp
@@ -0,0 +1,308 @@
+/// @ref core
+/// @file glm/detail/type_vec1.hpp
+
+#pragma once
+
+#include "qualifier.hpp"
+#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+# include "_swizzle.hpp"
+#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+# include "_swizzle_func.hpp"
+#endif
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct vec<1, T, Q>
+ {
+ // -- Implementation detail --
+
+ typedef T value_type;
+ typedef vec<1, T, Q> type;
+ typedef vec<1, bool, Q> bool_type;
+
+ // -- Data --
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wpedantic"
+# elif GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-anonymous-struct"
+# pragma clang diagnostic ignored "-Wnested-anon-types"
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(push)
+# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union
+# endif
+# endif
+
+# if GLM_CONFIG_XYZW_ONLY
+ T x;
+# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE
+ union
+ {
+ T x;
+ T r;
+ T s;
+
+ typename detail::storage<1, T, detail::is_aligned<Q>::value>::type data;
+/*
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ _GLM_SWIZZLE1_2_MEMBERS(T, Q, x)
+ _GLM_SWIZZLE1_2_MEMBERS(T, Q, r)
+ _GLM_SWIZZLE1_2_MEMBERS(T, Q, s)
+ _GLM_SWIZZLE1_3_MEMBERS(T, Q, x)
+ _GLM_SWIZZLE1_3_MEMBERS(T, Q, r)
+ _GLM_SWIZZLE1_3_MEMBERS(T, Q, s)
+ _GLM_SWIZZLE1_4_MEMBERS(T, Q, x)
+ _GLM_SWIZZLE1_4_MEMBERS(T, Q, r)
+ _GLM_SWIZZLE1_4_MEMBERS(T, Q, s)
+# endif
+*/
+ };
+# else
+ union {T x, r, s;};
+/*
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC1(T, Q)
+# endif
+*/
+# endif
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(pop)
+# endif
+# endif
+
+ // -- Component accesses --
+
+ /// Return the count of components of the vector
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 1;}
+
+ GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const;
+
+ // -- Implicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR;
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, T, P> const& v);
+
+ // -- Explicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar);
+
+ // -- Conversion vector constructors --
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<2, U, P> const& v);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v);
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<1, U, P> const& v);
+
+ // -- Swizzle constructors --
+/*
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ template<int E0>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<1, T, Q, E0, -1,-2,-3> const& that)
+ {
+ *this = that();
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+*/
+ // -- Unary arithmetic operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec const& v) GLM_DEFAULT;
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(vec<1, U, Q> const& v);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator++();
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator--();
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator++(int);
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator--(int);
+
+ // -- Unary bit operators --
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(vec<1, U, Q> const& v);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2);
+
+ template<qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_vec1.inl"
+#endif//GLM_EXTERNAL_TEMPLATE
diff --git a/3rdparty/glm/source/glm/detail/type_vec1.inl b/3rdparty/glm/source/glm/detail/type_vec1.inl
new file mode 100644
index 0000000..52a9b3a
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec1.inl
@@ -0,0 +1,553 @@
+/// @ref core
+
+#include "./compute_vector_relational.hpp"
+
+namespace glm
+{
+ // -- Implicit basic constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec()
+# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE
+ : x(0)
+# endif
+ {}
+# endif
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, Q> const& v)
+ : x(v.x)
+ {}
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, P> const& v)
+ : x(v.x)
+ {}
+
+ // -- Explicit basic constructors --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(T scalar)
+ : x(scalar)
+ {}
+
+ // -- Conversion vector constructors --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<2, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<3, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<4, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ {}
+
+ // -- Component accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type)
+ {
+ return x;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) const
+ {
+ return x;
+ }
+
+ // -- Unary arithmetic operators --
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, T, Q> const& v)
+ {
+ this->x = v.x;
+ return *this;
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, U, Q> const& v)
+ {
+ this->x = static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(U scalar)
+ {
+ this->x += static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(vec<1, U, Q> const& v)
+ {
+ this->x += static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(U scalar)
+ {
+ this->x -= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(vec<1, U, Q> const& v)
+ {
+ this->x -= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(U scalar)
+ {
+ this->x *= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(vec<1, U, Q> const& v)
+ {
+ this->x *= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(U scalar)
+ {
+ this->x /= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(vec<1, U, Q> const& v)
+ {
+ this->x /= static_cast<T>(v.x);
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator++()
+ {
+ ++this->x;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator--()
+ {
+ --this->x;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator++(int)
+ {
+ vec<1, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator--(int)
+ {
+ vec<1, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary bit operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(U scalar)
+ {
+ this->x %= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(vec<1, U, Q> const& v)
+ {
+ this->x %= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(U scalar)
+ {
+ this->x &= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(vec<1, U, Q> const& v)
+ {
+ this->x &= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(U scalar)
+ {
+ this->x |= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(vec<1, U, Q> const& v)
+ {
+ this->x |= U(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(U scalar)
+ {
+ this->x ^= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(vec<1, U, Q> const& v)
+ {
+ this->x ^= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(U scalar)
+ {
+ this->x <<= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(vec<1, U, Q> const& v)
+ {
+ this->x <<= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(U scalar)
+ {
+ this->x >>= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(vec<1, U, Q> const& v)
+ {
+ this->x >>= static_cast<T>(v.x);
+ return *this;
+ }
+
+ // -- Unary constant operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v)
+ {
+ return v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ -v.x);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ v.x + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ scalar + v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ v1.x + v2.x);
+ }
+
+ //operator-
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ v.x - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ scalar - v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ v1.x - v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ v.x * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ scalar * v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ v1.x * v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ v.x / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ scalar / v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ v1.x / v2.x);
+ }
+
+ // -- Binary bit operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ v.x % scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ scalar % v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ v1.x % v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ v.x & scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ scalar & v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ v1.x & v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ v.x | scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ scalar | v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ v1.x | v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ v.x ^ scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ scalar ^ v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ v1.x ^ v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ static_cast<T>(v.x << scalar));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ static_cast<T>(scalar << v.x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ static_cast<T>(v1.x << v2.x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar)
+ {
+ return vec<1, T, Q>(
+ static_cast<T>(v.x >> scalar));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ static_cast<T>(scalar >> v.x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<1, T, Q>(
+ static_cast<T>(v1.x >> v2.x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v)
+ {
+ return vec<1, T, Q>(
+ ~v.x);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.x, v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return !(v1 == v2);
+ }
+
+ template<qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2)
+ {
+ return vec<1, bool, Q>(v1.x && v2.x);
+ }
+
+ template<qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2)
+ {
+ return vec<1, bool, Q>(v1.x || v2.x);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_vec2.hpp b/3rdparty/glm/source/glm/detail/type_vec2.hpp
new file mode 100644
index 0000000..5aa969d
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec2.hpp
@@ -0,0 +1,402 @@
+/// @ref core
+/// @file glm/detail/type_vec2.hpp
+
+#pragma once
+
+#include "qualifier.hpp"
+#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+# include "_swizzle.hpp"
+#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+# include "_swizzle_func.hpp"
+#endif
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct vec<2, T, Q>
+ {
+ // -- Implementation detail --
+
+ typedef T value_type;
+ typedef vec<2, T, Q> type;
+ typedef vec<2, bool, Q> bool_type;
+
+ // -- Data --
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wpedantic"
+# elif GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-anonymous-struct"
+# pragma clang diagnostic ignored "-Wnested-anon-types"
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(push)
+# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union
+# endif
+# endif
+
+# if GLM_CONFIG_XYZW_ONLY
+ T x, y;
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, Q, x, y)
+# endif//GLM_CONFIG_SWIZZLE
+# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE
+ union
+ {
+ struct{ T x, y; };
+ struct{ T r, g; };
+ struct{ T s, t; };
+
+ typename detail::storage<2, T, detail::is_aligned<Q>::value>::type data;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ GLM_SWIZZLE2_2_MEMBERS(T, Q, x, y)
+ GLM_SWIZZLE2_2_MEMBERS(T, Q, r, g)
+ GLM_SWIZZLE2_2_MEMBERS(T, Q, s, t)
+ GLM_SWIZZLE2_3_MEMBERS(T, Q, x, y)
+ GLM_SWIZZLE2_3_MEMBERS(T, Q, r, g)
+ GLM_SWIZZLE2_3_MEMBERS(T, Q, s, t)
+ GLM_SWIZZLE2_4_MEMBERS(T, Q, x, y)
+ GLM_SWIZZLE2_4_MEMBERS(T, Q, r, g)
+ GLM_SWIZZLE2_4_MEMBERS(T, Q, s, t)
+# endif
+ };
+# else
+ union {T x, r, s;};
+ union {T y, g, t;};
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q)
+# endif//GLM_CONFIG_SWIZZLE
+# endif
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(pop)
+# endif
+# endif
+
+ // -- Component accesses --
+
+ /// Return the count of components of the vector
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;}
+
+ GLM_FUNC_DECL GLM_CONSTEXPR T& operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const;
+
+ // -- Implicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR;
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, T, P> const& v);
+
+ // -- Explicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(T x, T y);
+
+ // -- Conversion constructors --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v);
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(A x, B y);
+ template<typename A, typename B>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, Q> const& x, B y);
+ template<typename A, typename B>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(A x, vec<1, B, Q> const& y);
+ template<typename A, typename B>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, Q> const& x, vec<1, B, Q> const& y);
+
+ // -- Conversion vector constructors --
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v);
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<2, U, P> const& v);
+
+ // -- Swizzle constructors --
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ template<int E0, int E1>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1,-1,-2> const& that)
+ {
+ *this = that();
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+ // -- Unary arithmetic operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec const& v) GLM_DEFAULT;
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<2, U, Q> const& v);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator++();
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator--();
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator++(int);
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator--(int);
+
+ // -- Unary bit operators --
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<2, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<2, U, Q> const& v);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2);
+
+ template<qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2);
+
+ template<qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_vec2.inl"
+#endif//GLM_EXTERNAL_TEMPLATE
diff --git a/3rdparty/glm/source/glm/detail/type_vec2.inl b/3rdparty/glm/source/glm/detail/type_vec2.inl
new file mode 100644
index 0000000..55ab64d
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec2.inl
@@ -0,0 +1,915 @@
+/// @ref core
+
+#include "./compute_vector_relational.hpp"
+
+namespace glm
+{
+ // -- Implicit basic constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec()
+# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE
+ : x(0), y(0)
+# endif
+ {}
+# endif
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, Q> const& v)
+ : x(v.x), y(v.y)
+ {}
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, P> const& v)
+ : x(v.x), y(v.y)
+ {}
+
+ // -- Explicit basic constructors --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T scalar)
+ : x(scalar), y(scalar)
+ {}
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T _x, T _y)
+ : x(_x), y(_y)
+ {}
+
+ // -- Conversion scalar constructors --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, B _y)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, B _y)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, vec<1, B, Q> const& _y)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, vec<1, B, Q> const& _y)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y.x))
+ {}
+
+ // -- Conversion vector constructors --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<3, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<4, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.y))
+ {}
+
+ // -- Component accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i)
+ {
+ assert(i >= 0 && i < this->length());
+ switch(i)
+ {
+ default:
+ case 0:
+ return x;
+ case 1:
+ return y;
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) const
+ {
+ assert(i >= 0 && i < this->length());
+ switch(i)
+ {
+ default:
+ case 0:
+ return x;
+ case 1:
+ return y;
+ }
+ }
+
+ // -- Unary arithmetic operators --
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, T, Q> const& v)
+ {
+ this->x = v.x;
+ this->y = v.y;
+ return *this;
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, U, Q> const& v)
+ {
+ this->x = static_cast<T>(v.x);
+ this->y = static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(U scalar)
+ {
+ this->x += static_cast<T>(scalar);
+ this->y += static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<1, U, Q> const& v)
+ {
+ this->x += static_cast<T>(v.x);
+ this->y += static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<2, U, Q> const& v)
+ {
+ this->x += static_cast<T>(v.x);
+ this->y += static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(U scalar)
+ {
+ this->x -= static_cast<T>(scalar);
+ this->y -= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<1, U, Q> const& v)
+ {
+ this->x -= static_cast<T>(v.x);
+ this->y -= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<2, U, Q> const& v)
+ {
+ this->x -= static_cast<T>(v.x);
+ this->y -= static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(U scalar)
+ {
+ this->x *= static_cast<T>(scalar);
+ this->y *= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<1, U, Q> const& v)
+ {
+ this->x *= static_cast<T>(v.x);
+ this->y *= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<2, U, Q> const& v)
+ {
+ this->x *= static_cast<T>(v.x);
+ this->y *= static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(U scalar)
+ {
+ this->x /= static_cast<T>(scalar);
+ this->y /= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<1, U, Q> const& v)
+ {
+ this->x /= static_cast<T>(v.x);
+ this->y /= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<2, U, Q> const& v)
+ {
+ this->x /= static_cast<T>(v.x);
+ this->y /= static_cast<T>(v.y);
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator++()
+ {
+ ++this->x;
+ ++this->y;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator--()
+ {
+ --this->x;
+ --this->y;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator++(int)
+ {
+ vec<2, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator--(int)
+ {
+ vec<2, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary bit operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(U scalar)
+ {
+ this->x %= static_cast<T>(scalar);
+ this->y %= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<1, U, Q> const& v)
+ {
+ this->x %= static_cast<T>(v.x);
+ this->y %= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<2, U, Q> const& v)
+ {
+ this->x %= static_cast<T>(v.x);
+ this->y %= static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(U scalar)
+ {
+ this->x &= static_cast<T>(scalar);
+ this->y &= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<1, U, Q> const& v)
+ {
+ this->x &= static_cast<T>(v.x);
+ this->y &= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<2, U, Q> const& v)
+ {
+ this->x &= static_cast<T>(v.x);
+ this->y &= static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(U scalar)
+ {
+ this->x |= static_cast<T>(scalar);
+ this->y |= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<1, U, Q> const& v)
+ {
+ this->x |= static_cast<T>(v.x);
+ this->y |= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<2, U, Q> const& v)
+ {
+ this->x |= static_cast<T>(v.x);
+ this->y |= static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(U scalar)
+ {
+ this->x ^= static_cast<T>(scalar);
+ this->y ^= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<1, U, Q> const& v)
+ {
+ this->x ^= static_cast<T>(v.x);
+ this->y ^= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<2, U, Q> const& v)
+ {
+ this->x ^= static_cast<T>(v.x);
+ this->y ^= static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(U scalar)
+ {
+ this->x <<= static_cast<T>(scalar);
+ this->y <<= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<1, U, Q> const& v)
+ {
+ this->x <<= static_cast<T>(v.x);
+ this->y <<= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<2, U, Q> const& v)
+ {
+ this->x <<= static_cast<T>(v.x);
+ this->y <<= static_cast<T>(v.y);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(U scalar)
+ {
+ this->x >>= static_cast<T>(scalar);
+ this->y >>= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<1, U, Q> const& v)
+ {
+ this->x >>= static_cast<T>(v.x);
+ this->y >>= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<2, U, Q> const& v)
+ {
+ this->x >>= static_cast<T>(v.x);
+ this->y >>= static_cast<T>(v.y);
+ return *this;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v)
+ {
+ return v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ -v.x,
+ -v.y);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x + scalar,
+ v.y + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x + v2.x,
+ v1.y + v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar + v.x,
+ scalar + v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x + v2.x,
+ v1.x + v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x + v2.x,
+ v1.y + v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x - scalar,
+ v.y - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x - v2.x,
+ v1.y - v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar - v.x,
+ scalar - v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x - v2.x,
+ v1.x - v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x - v2.x,
+ v1.y - v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x * scalar,
+ v.y * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x * v2.x,
+ v1.y * v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar * v.x,
+ scalar * v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x * v2.x,
+ v1.x * v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x * v2.x,
+ v1.y * v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x / scalar,
+ v.y / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x / v2.x,
+ v1.y / v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar / v.x,
+ scalar / v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x / v2.x,
+ v1.x / v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x / v2.x,
+ v1.y / v2.y);
+ }
+
+ // -- Binary bit operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x % scalar,
+ v.y % scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x % v2.x,
+ v1.y % v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar % v.x,
+ scalar % v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x % v2.x,
+ v1.x % v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x % v2.x,
+ v1.y % v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x & scalar,
+ v.y & scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x & v2.x,
+ v1.y & v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar & v.x,
+ scalar & v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x & v2.x,
+ v1.x & v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x & v2.x,
+ v1.y & v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x | scalar,
+ v.y | scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x | v2.x,
+ v1.y | v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar | v.x,
+ scalar | v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x | v2.x,
+ v1.x | v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x | v2.x,
+ v1.y | v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x ^ scalar,
+ v.y ^ scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x ^ v2.x,
+ v1.y ^ v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar ^ v.x,
+ scalar ^ v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x ^ v2.x,
+ v1.x ^ v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x ^ v2.x,
+ v1.y ^ v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x << scalar,
+ v.y << scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x << v2.x,
+ v1.y << v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar << v.x,
+ scalar << v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x << v2.x,
+ v1.x << v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x << v2.x,
+ v1.y << v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar)
+ {
+ return vec<2, T, Q>(
+ v.x >> scalar,
+ v.y >> scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x >> v2.x,
+ v1.y >> v2.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ scalar >> v.x,
+ scalar >> v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x >> v2.x,
+ v1.x >> v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return vec<2, T, Q>(
+ v1.x >> v2.x,
+ v1.y >> v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v)
+ {
+ return vec<2, T, Q>(
+ ~v.x,
+ ~v.y);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.x, v2.x) &&
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.y, v2.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2)
+ {
+ return !(v1 == v2);
+ }
+
+ template<qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2)
+ {
+ return vec<2, bool, Q>(v1.x && v2.x, v1.y && v2.y);
+ }
+
+ template<qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2)
+ {
+ return vec<2, bool, Q>(v1.x || v2.x, v1.y || v2.y);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_vec3.hpp b/3rdparty/glm/source/glm/detail/type_vec3.hpp
new file mode 100644
index 0000000..cfc5000
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec3.hpp
@@ -0,0 +1,435 @@
+/// @ref core
+/// @file glm/detail/type_vec3.hpp
+
+#pragma once
+
+#include "qualifier.hpp"
+#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+# include "_swizzle.hpp"
+#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+# include "_swizzle_func.hpp"
+#endif
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct vec<3, T, Q>
+ {
+ // -- Implementation detail --
+
+ typedef T value_type;
+ typedef vec<3, T, Q> type;
+ typedef vec<3, bool, Q> bool_type;
+
+ // -- Data --
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wpedantic"
+# elif GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-anonymous-struct"
+# pragma clang diagnostic ignored "-Wnested-anon-types"
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(push)
+# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+# pragma warning(disable: 4324) // structure was padded due to alignment specifier
+# endif
+# endif
+# endif
+
+# if GLM_CONFIG_XYZW_ONLY
+ T x, y, z;
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, Q, x, y, z)
+# endif//GLM_CONFIG_SWIZZLE
+# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE
+ union
+ {
+ struct{ T x, y, z; };
+ struct{ T r, g, b; };
+ struct{ T s, t, p; };
+
+ typename detail::storage<3, T, detail::is_aligned<Q>::value>::type data;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ GLM_SWIZZLE3_2_MEMBERS(T, Q, x, y, z)
+ GLM_SWIZZLE3_2_MEMBERS(T, Q, r, g, b)
+ GLM_SWIZZLE3_2_MEMBERS(T, Q, s, t, p)
+ GLM_SWIZZLE3_3_MEMBERS(T, Q, x, y, z)
+ GLM_SWIZZLE3_3_MEMBERS(T, Q, r, g, b)
+ GLM_SWIZZLE3_3_MEMBERS(T, Q, s, t, p)
+ GLM_SWIZZLE3_4_MEMBERS(T, Q, x, y, z)
+ GLM_SWIZZLE3_4_MEMBERS(T, Q, r, g, b)
+ GLM_SWIZZLE3_4_MEMBERS(T, Q, s, t, p)
+# endif
+ };
+# else
+ union { T x, r, s; };
+ union { T y, g, t; };
+ union { T z, b, p; };
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q)
+# endif//GLM_CONFIG_SWIZZLE
+# endif//GLM_LANG
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(pop)
+# endif
+# endif
+
+ // -- Component accesses --
+
+ /// Return the count of components of the vector
+ typedef length_t length_type;
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 3;}
+
+ GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const;
+
+ // -- Implicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR;
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, T, P> const& v);
+
+ // -- Explicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(T a, T b, T c);
+
+ // -- Conversion scalar constructors --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v);
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X x, Y y, Z z);
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z);
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z);
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z);
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z);
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z);
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z);
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z);
+
+ // -- Conversion vector constructors --
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v);
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v);
+
+ // -- Swizzle constructors --
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ template<int E0, int E1, int E2>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& that)
+ {
+ *this = that();
+ }
+
+ template<int E0, int E1>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& scalar)
+ {
+ *this = vec(v(), scalar);
+ }
+
+ template<int E0, int E1>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& scalar, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v)
+ {
+ *this = vec(scalar, v());
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+ // -- Unary arithmetic operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q>& operator=(vec<3, T, Q> const& v) GLM_DEFAULT;
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<3, U, Q> const& v);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator++();
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator--();
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator++(int);
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator--(int);
+
+ // -- Unary bit operators --
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<3, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<3, U, Q> const& v);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2);
+
+ template<qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2);
+
+ template<qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_vec3.inl"
+#endif//GLM_EXTERNAL_TEMPLATE
diff --git a/3rdparty/glm/source/glm/detail/type_vec3.inl b/3rdparty/glm/source/glm/detail/type_vec3.inl
new file mode 100644
index 0000000..8d15db8
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec3.inl
@@ -0,0 +1,1070 @@
+/// @ref core
+
+#include "compute_vector_relational.hpp"
+
+namespace glm
+{
+ // -- Implicit basic constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec()
+# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE
+ : x(0), y(0), z(0)
+# endif
+ {}
+# endif
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, Q> const& v)
+ : x(v.x), y(v.y), z(v.z)
+ {}
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, P> const& v)
+ : x(v.x), y(v.y), z(v.z)
+ {}
+
+ // -- Explicit basic constructors --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T scalar)
+ : x(scalar), y(scalar), z(scalar)
+ {}
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T _x, T _y, T _z)
+ : x(_x), y(_y), z(_z)
+ {}
+
+ // -- Conversion scalar constructors --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.x))
+ , z(static_cast<T>(v.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, Z _z)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z.x))
+ {}
+
+ // -- Conversion vector constructors --
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, B _z)
+ : x(static_cast<T>(_xy.x))
+ , y(static_cast<T>(_xy.y))
+ , z(static_cast<T>(_z))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z)
+ : x(static_cast<T>(_xy.x))
+ , y(static_cast<T>(_xy.y))
+ , z(static_cast<T>(_z.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(A _x, vec<2, B, P> const& _yz)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_yz.x))
+ , z(static_cast<T>(_yz.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_yz.x))
+ , z(static_cast<T>(_yz.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.y))
+ , z(static_cast<T>(v.z))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<4, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.y))
+ , z(static_cast<T>(v.z))
+ {}
+
+ // -- Component accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i)
+ {
+ assert(i >= 0 && i < this->length());
+ switch(i)
+ {
+ default:
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) const
+ {
+ assert(i >= 0 && i < this->length());
+ switch(i)
+ {
+ default:
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ }
+ }
+
+ // -- Unary arithmetic operators --
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, T, Q> const& v)
+ {
+ this->x = v.x;
+ this->y = v.y;
+ this->z = v.z;
+ return *this;
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, U, Q> const& v)
+ {
+ this->x = static_cast<T>(v.x);
+ this->y = static_cast<T>(v.y);
+ this->z = static_cast<T>(v.z);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(U scalar)
+ {
+ this->x += static_cast<T>(scalar);
+ this->y += static_cast<T>(scalar);
+ this->z += static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<1, U, Q> const& v)
+ {
+ this->x += static_cast<T>(v.x);
+ this->y += static_cast<T>(v.x);
+ this->z += static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<3, U, Q> const& v)
+ {
+ this->x += static_cast<T>(v.x);
+ this->y += static_cast<T>(v.y);
+ this->z += static_cast<T>(v.z);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(U scalar)
+ {
+ this->x -= static_cast<T>(scalar);
+ this->y -= static_cast<T>(scalar);
+ this->z -= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<1, U, Q> const& v)
+ {
+ this->x -= static_cast<T>(v.x);
+ this->y -= static_cast<T>(v.x);
+ this->z -= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<3, U, Q> const& v)
+ {
+ this->x -= static_cast<T>(v.x);
+ this->y -= static_cast<T>(v.y);
+ this->z -= static_cast<T>(v.z);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(U scalar)
+ {
+ this->x *= static_cast<T>(scalar);
+ this->y *= static_cast<T>(scalar);
+ this->z *= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<1, U, Q> const& v)
+ {
+ this->x *= static_cast<T>(v.x);
+ this->y *= static_cast<T>(v.x);
+ this->z *= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<3, U, Q> const& v)
+ {
+ this->x *= static_cast<T>(v.x);
+ this->y *= static_cast<T>(v.y);
+ this->z *= static_cast<T>(v.z);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(U v)
+ {
+ this->x /= static_cast<T>(v);
+ this->y /= static_cast<T>(v);
+ this->z /= static_cast<T>(v);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<1, U, Q> const& v)
+ {
+ this->x /= static_cast<T>(v.x);
+ this->y /= static_cast<T>(v.x);
+ this->z /= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<3, U, Q> const& v)
+ {
+ this->x /= static_cast<T>(v.x);
+ this->y /= static_cast<T>(v.y);
+ this->z /= static_cast<T>(v.z);
+ return *this;
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator++()
+ {
+ ++this->x;
+ ++this->y;
+ ++this->z;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator--()
+ {
+ --this->x;
+ --this->y;
+ --this->z;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator++(int)
+ {
+ vec<3, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator--(int)
+ {
+ vec<3, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary bit operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(U scalar)
+ {
+ this->x %= scalar;
+ this->y %= scalar;
+ this->z %= scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<1, U, Q> const& v)
+ {
+ this->x %= v.x;
+ this->y %= v.x;
+ this->z %= v.x;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<3, U, Q> const& v)
+ {
+ this->x %= v.x;
+ this->y %= v.y;
+ this->z %= v.z;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(U scalar)
+ {
+ this->x &= scalar;
+ this->y &= scalar;
+ this->z &= scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<1, U, Q> const& v)
+ {
+ this->x &= v.x;
+ this->y &= v.x;
+ this->z &= v.x;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<3, U, Q> const& v)
+ {
+ this->x &= v.x;
+ this->y &= v.y;
+ this->z &= v.z;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(U scalar)
+ {
+ this->x |= scalar;
+ this->y |= scalar;
+ this->z |= scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<1, U, Q> const& v)
+ {
+ this->x |= v.x;
+ this->y |= v.x;
+ this->z |= v.x;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<3, U, Q> const& v)
+ {
+ this->x |= v.x;
+ this->y |= v.y;
+ this->z |= v.z;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(U scalar)
+ {
+ this->x ^= scalar;
+ this->y ^= scalar;
+ this->z ^= scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<1, U, Q> const& v)
+ {
+ this->x ^= v.x;
+ this->y ^= v.x;
+ this->z ^= v.x;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<3, U, Q> const& v)
+ {
+ this->x ^= v.x;
+ this->y ^= v.y;
+ this->z ^= v.z;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(U scalar)
+ {
+ this->x <<= scalar;
+ this->y <<= scalar;
+ this->z <<= scalar;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<1, U, Q> const& v)
+ {
+ this->x <<= static_cast<T>(v.x);
+ this->y <<= static_cast<T>(v.x);
+ this->z <<= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<3, U, Q> const& v)
+ {
+ this->x <<= static_cast<T>(v.x);
+ this->y <<= static_cast<T>(v.y);
+ this->z <<= static_cast<T>(v.z);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(U scalar)
+ {
+ this->x >>= static_cast<T>(scalar);
+ this->y >>= static_cast<T>(scalar);
+ this->z >>= static_cast<T>(scalar);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<1, U, Q> const& v)
+ {
+ this->x >>= static_cast<T>(v.x);
+ this->y >>= static_cast<T>(v.x);
+ this->z >>= static_cast<T>(v.x);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<3, U, Q> const& v)
+ {
+ this->x >>= static_cast<T>(v.x);
+ this->y >>= static_cast<T>(v.y);
+ this->z >>= static_cast<T>(v.z);
+ return *this;
+ }
+
+ // -- Unary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v)
+ {
+ return v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ -v.x,
+ -v.y,
+ -v.z);
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x + scalar,
+ v.y + scalar,
+ v.z + scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x + scalar.x,
+ v.y + scalar.x,
+ v.z + scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar + v.x,
+ scalar + v.y,
+ scalar + v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x + v.x,
+ scalar.x + v.y,
+ scalar.x + v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x + v2.x,
+ v1.y + v2.y,
+ v1.z + v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x - scalar,
+ v.y - scalar,
+ v.z - scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x - scalar.x,
+ v.y - scalar.x,
+ v.z - scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar - v.x,
+ scalar - v.y,
+ scalar - v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x - v.x,
+ scalar.x - v.y,
+ scalar.x - v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x - v2.x,
+ v1.y - v2.y,
+ v1.z - v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x * scalar,
+ v.y * scalar,
+ v.z * scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x * scalar.x,
+ v.y * scalar.x,
+ v.z * scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar * v.x,
+ scalar * v.y,
+ scalar * v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x * v.x,
+ scalar.x * v.y,
+ scalar.x * v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x * v2.x,
+ v1.y * v2.y,
+ v1.z * v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x / scalar,
+ v.y / scalar,
+ v.z / scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x / scalar.x,
+ v.y / scalar.x,
+ v.z / scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar / v.x,
+ scalar / v.y,
+ scalar / v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x / v.x,
+ scalar.x / v.y,
+ scalar.x / v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x / v2.x,
+ v1.y / v2.y,
+ v1.z / v2.z);
+ }
+
+ // -- Binary bit operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x % scalar,
+ v.y % scalar,
+ v.z % scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x % scalar.x,
+ v.y % scalar.x,
+ v.z % scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar % v.x,
+ scalar % v.y,
+ scalar % v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x % v.x,
+ scalar.x % v.y,
+ scalar.x % v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x % v2.x,
+ v1.y % v2.y,
+ v1.z % v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x & scalar,
+ v.y & scalar,
+ v.z & scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x & scalar.x,
+ v.y & scalar.x,
+ v.z & scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar & v.x,
+ scalar & v.y,
+ scalar & v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x & v.x,
+ scalar.x & v.y,
+ scalar.x & v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x & v2.x,
+ v1.y & v2.y,
+ v1.z & v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x | scalar,
+ v.y | scalar,
+ v.z | scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x | scalar.x,
+ v.y | scalar.x,
+ v.z | scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar | v.x,
+ scalar | v.y,
+ scalar | v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x | v.x,
+ scalar.x | v.y,
+ scalar.x | v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x | v2.x,
+ v1.y | v2.y,
+ v1.z | v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x ^ scalar,
+ v.y ^ scalar,
+ v.z ^ scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x ^ scalar.x,
+ v.y ^ scalar.x,
+ v.z ^ scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar ^ v.x,
+ scalar ^ v.y,
+ scalar ^ v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x ^ v.x,
+ scalar.x ^ v.y,
+ scalar.x ^ v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x ^ v2.x,
+ v1.y ^ v2.y,
+ v1.z ^ v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x << scalar,
+ v.y << scalar,
+ v.z << scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x << scalar.x,
+ v.y << scalar.x,
+ v.z << scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar << v.x,
+ scalar << v.y,
+ scalar << v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x << v.x,
+ scalar.x << v.y,
+ scalar.x << v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x << v2.x,
+ v1.y << v2.y,
+ v1.z << v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar)
+ {
+ return vec<3, T, Q>(
+ v.x >> scalar,
+ v.y >> scalar,
+ v.z >> scalar);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<3, T, Q>(
+ v.x >> scalar.x,
+ v.y >> scalar.x,
+ v.z >> scalar.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar >> v.x,
+ scalar >> v.y,
+ scalar >> v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ scalar.x >> v.x,
+ scalar.x >> v.y,
+ scalar.x >> v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return vec<3, T, Q>(
+ v1.x >> v2.x,
+ v1.y >> v2.y,
+ v1.z >> v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v)
+ {
+ return vec<3, T, Q>(
+ ~v.x,
+ ~v.y,
+ ~v.z);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.x, v2.x) &&
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.y, v2.y) &&
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.z, v2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2)
+ {
+ return !(v1 == v2);
+ }
+
+ template<qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2)
+ {
+ return vec<3, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z);
+ }
+
+ template<qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2)
+ {
+ return vec<3, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/detail/type_vec4.hpp b/3rdparty/glm/source/glm/detail/type_vec4.hpp
new file mode 100644
index 0000000..fbe8c0e
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec4.hpp
@@ -0,0 +1,508 @@
+/// @ref core
+/// @file glm/detail/type_vec4.hpp
+
+#pragma once
+
+#include "qualifier.hpp"
+#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+# include "_swizzle.hpp"
+#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+# include "_swizzle_func.hpp"
+#endif
+#include <cstddef>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ struct vec<4, T, Q>
+ {
+ // -- Implementation detail --
+
+ typedef T value_type;
+ typedef vec<4, T, Q> type;
+ typedef vec<4, bool, Q> bool_type;
+
+ // -- Data --
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wpedantic"
+# elif GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-anonymous-struct"
+# pragma clang diagnostic ignored "-Wnested-anon-types"
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(push)
+# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union
+# endif
+# endif
+
+# if GLM_CONFIG_XYZW_ONLY
+ T x, y, z, w;
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, Q, x, y, z, w)
+# endif//GLM_CONFIG_SWIZZLE
+# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE
+ union
+ {
+ struct { T x, y, z, w; };
+ struct { T r, g, b, a; };
+ struct { T s, t, p, q; };
+
+ typename detail::storage<4, T, detail::is_aligned<Q>::value>::type data;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ GLM_SWIZZLE4_2_MEMBERS(T, Q, x, y, z, w)
+ GLM_SWIZZLE4_2_MEMBERS(T, Q, r, g, b, a)
+ GLM_SWIZZLE4_2_MEMBERS(T, Q, s, t, p, q)
+ GLM_SWIZZLE4_3_MEMBERS(T, Q, x, y, z, w)
+ GLM_SWIZZLE4_3_MEMBERS(T, Q, r, g, b, a)
+ GLM_SWIZZLE4_3_MEMBERS(T, Q, s, t, p, q)
+ GLM_SWIZZLE4_4_MEMBERS(T, Q, x, y, z, w)
+ GLM_SWIZZLE4_4_MEMBERS(T, Q, r, g, b, a)
+ GLM_SWIZZLE4_4_MEMBERS(T, Q, s, t, p, q)
+# endif
+ };
+# else
+ union { T x, r, s; };
+ union { T y, g, t; };
+ union { T z, b, p; };
+ union { T w, a, q; };
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q)
+# endif
+# endif
+
+# if GLM_SILENT_WARNINGS == GLM_ENABLE
+# if GLM_COMPILER & GLM_COMPILER_CLANG
+# pragma clang diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_GCC
+# pragma GCC diagnostic pop
+# elif GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(pop)
+# endif
+# endif
+
+ // -- Component accesses --
+
+ typedef length_t length_type;
+
+ /// Return the count of components of the vector
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;}
+
+ GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i);
+ GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const;
+
+ // -- Implicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR;
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, Q> const& v) GLM_DEFAULT;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, P> const& v);
+
+ // -- Explicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar);
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(T x, T y, T z, T w);
+
+ // -- Conversion scalar constructors --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v);
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, Z _z, W _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w);
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _Y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w);
+
+ // -- Conversion vector constructors --
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z, C _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz, C _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, B _y, vec<2, C, P> const& _zw);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, A, P> const& _xyz, B _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<3, B, P> const& _yzw);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw);
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw);
+
+ /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v);
+
+ // -- Swizzle constructors --
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ template<int E0, int E1, int E2, int E3>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<4, T, Q, E0, E1, E2, E3> const& that)
+ {
+ *this = that();
+ }
+
+ template<int E0, int E1, int F0, int F1>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, detail::_swizzle<2, T, Q, F0, F1, -1, -2> const& u)
+ {
+ *this = vec<4, T, Q>(v(), u());
+ }
+
+ template<int E0, int E1>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, T const& y, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v)
+ {
+ *this = vec<4, T, Q>(x, y, v());
+ }
+
+ template<int E0, int E1>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& w)
+ {
+ *this = vec<4, T, Q>(x, v(), w);
+ }
+
+ template<int E0, int E1>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& z, T const& w)
+ {
+ *this = vec<4, T, Q>(v(), z, w);
+ }
+
+ template<int E0, int E1, int E2>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& v, T const& w)
+ {
+ *this = vec<4, T, Q>(v(), w);
+ }
+
+ template<int E0, int E1, int E2>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& v)
+ {
+ *this = vec<4, T, Q>(x, v());
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+ // -- Unary arithmetic operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, T, Q> const& v) GLM_DEFAULT;
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<4, U, Q> const& v);
+
+ // -- Increment and decrement operators --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator++();
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator--();
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator++(int);
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator--(int);
+
+ // -- Unary bit operators --
+
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<4, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(U scalar);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<1, U, Q> const& v);
+ template<typename U>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<4, U, Q> const& v);
+ };
+
+ // -- Unary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T const & scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T const & scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T const & scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T const & scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2);
+
+ template<qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2);
+
+ template<qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2);
+}//namespace glm
+
+#ifndef GLM_EXTERNAL_TEMPLATE
+#include "type_vec4.inl"
+#endif//GLM_EXTERNAL_TEMPLATE
diff --git a/3rdparty/glm/source/glm/detail/type_vec4.inl b/3rdparty/glm/source/glm/detail/type_vec4.inl
new file mode 100644
index 0000000..f520c09
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec4.inl
@@ -0,0 +1,1142 @@
+/// @ref core
+
+#include "compute_vector_relational.hpp"
+
+namespace glm{
+namespace detail
+{
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_vec4_add
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_vec4_sub
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_vec4_mul
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_vec4_div
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_vec4_mod
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x % b.x, a.y % b.y, a.z % b.z, a.w % b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, int IsInt, std::size_t Size, bool Aligned>
+ struct compute_vec4_and
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x & b.x, a.y & b.y, a.z & b.z, a.w & b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, int IsInt, std::size_t Size, bool Aligned>
+ struct compute_vec4_or
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x | b.x, a.y | b.y, a.z | b.z, a.w | b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, int IsInt, std::size_t Size, bool Aligned>
+ struct compute_vec4_xor
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, int IsInt, std::size_t Size, bool Aligned>
+ struct compute_vec4_shift_left
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x << b.x, a.y << b.y, a.z << b.z, a.w << b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, int IsInt, std::size_t Size, bool Aligned>
+ struct compute_vec4_shift_right
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ return vec<4, T, Q>(a.x >> b.x, a.y >> b.y, a.z >> b.z, a.w >> b.w);
+ }
+ };
+
+ template<typename T, qualifier Q, int IsInt, std::size_t Size, bool Aligned>
+ struct compute_vec4_equal
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.x, v2.x) &&
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.y, v2.y) &&
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.z, v2.z) &&
+ detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(v1.w, v2.w);
+ }
+ };
+
+ template<typename T, qualifier Q, int IsInt, std::size_t Size, bool Aligned>
+ struct compute_vec4_nequal
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return !compute_vec4_equal<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(v1, v2);
+ }
+ };
+
+ template<typename T, qualifier Q, int IsInt, std::size_t Size, bool Aligned>
+ struct compute_vec4_bitwise_not
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(~v.x, ~v.y, ~v.z, ~v.w);
+ }
+ };
+}//namespace detail
+
+ // -- Implicit basic constructors --
+
+# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec()
+# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE
+ : x(0), y(0), z(0), w(0)
+# endif
+ {}
+# endif
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, Q> const& v)
+ : x(v.x), y(v.y), z(v.z), w(v.w)
+ {}
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, P> const& v)
+ : x(v.x), y(v.y), z(v.z), w(v.w)
+ {}
+
+ // -- Explicit basic constructors --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T scalar)
+ : x(scalar), y(scalar), z(scalar), w(scalar)
+ {}
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T _x, T _y, T _z, T _w)
+ : x(_x), y(_y), z(_z), w(_w)
+ {}
+
+ // -- Conversion scalar constructors --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.x))
+ , z(static_cast<T>(v.x))
+ , w(static_cast<T>(v.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, Z _z, W _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename X, typename Y, typename Z, typename W>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ // -- Conversion vector constructors --
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, C _w)
+ : x(static_cast<T>(_xy.x))
+ , y(static_cast<T>(_xy.y))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w)
+ : x(static_cast<T>(_xy.x))
+ , y(static_cast<T>(_xy.y))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w)
+ : x(static_cast<T>(_xy.x))
+ , y(static_cast<T>(_xy.y))
+ , z(static_cast<T>(_z))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w)
+ : x(static_cast<T>(_xy.x))
+ , y(static_cast<T>(_xy.y))
+ , z(static_cast<T>(_z.x))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, C _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_yz.x))
+ , z(static_cast<T>(_yz.y))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_yz.x))
+ , z(static_cast<T>(_yz.y))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_yz.x))
+ , z(static_cast<T>(_yz.y))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_yz.x))
+ , z(static_cast<T>(_yz.y))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, B _y, vec<2, C, P> const& _zw)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_zw.x))
+ , w(static_cast<T>(_zw.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y))
+ , z(static_cast<T>(_zw.x))
+ , w(static_cast<T>(_zw.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_zw.x))
+ , w(static_cast<T>(_zw.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, typename C, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_y.x))
+ , z(static_cast<T>(_zw.x))
+ , w(static_cast<T>(_zw.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, B _w)
+ : x(static_cast<T>(_xyz.x))
+ , y(static_cast<T>(_xyz.y))
+ , z(static_cast<T>(_xyz.z))
+ , w(static_cast<T>(_w))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w)
+ : x(static_cast<T>(_xyz.x))
+ , y(static_cast<T>(_xyz.y))
+ , z(static_cast<T>(_xyz.z))
+ , w(static_cast<T>(_w.x))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<3, B, P> const& _yzw)
+ : x(static_cast<T>(_x))
+ , y(static_cast<T>(_yzw.x))
+ , z(static_cast<T>(_yzw.y))
+ , w(static_cast<T>(_yzw.z))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw)
+ : x(static_cast<T>(_x.x))
+ , y(static_cast<T>(_yzw.x))
+ , z(static_cast<T>(_yzw.y))
+ , w(static_cast<T>(_yzw.z))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename A, typename B, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw)
+ : x(static_cast<T>(_xy.x))
+ , y(static_cast<T>(_xy.y))
+ , z(static_cast<T>(_zw.x))
+ , w(static_cast<T>(_zw.y))
+ {}
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, U, P> const& v)
+ : x(static_cast<T>(v.x))
+ , y(static_cast<T>(v.y))
+ , z(static_cast<T>(v.z))
+ , w(static_cast<T>(v.w))
+ {}
+
+ // -- Component accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i)
+ {
+ assert(i >= 0 && i < this->length());
+ switch(i)
+ {
+ default:
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ case 3:
+ return w;
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) const
+ {
+ assert(i >= 0 && i < this->length());
+ switch(i)
+ {
+ default:
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ case 3:
+ return w;
+ }
+ }
+
+ // -- Unary arithmetic operators --
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, T, Q> const& v)
+ {
+ this->x = v.x;
+ this->y = v.y;
+ this->z = v.z;
+ this->w = v.w;
+ return *this;
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, U, Q> const& v)
+ {
+ this->x = static_cast<T>(v.x);
+ this->y = static_cast<T>(v.y);
+ this->z = static_cast<T>(v.z);
+ this->w = static_cast<T>(v.w);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(U scalar)
+ {
+ return (*this = detail::compute_vec4_add<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_add<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v.x)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_add<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(U scalar)
+ {
+ return (*this = detail::compute_vec4_sub<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_sub<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v.x)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_sub<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(U scalar)
+ {
+ return (*this = detail::compute_vec4_mul<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_mul<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v.x)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_mul<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(U scalar)
+ {
+ return (*this = detail::compute_vec4_div<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_div<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v.x)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_div<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ // -- Increment and decrement operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator++()
+ {
+ ++this->x;
+ ++this->y;
+ ++this->z;
+ ++this->w;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator--()
+ {
+ --this->x;
+ --this->y;
+ --this->z;
+ --this->w;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator++(int)
+ {
+ vec<4, T, Q> Result(*this);
+ ++*this;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator--(int)
+ {
+ vec<4, T, Q> Result(*this);
+ --*this;
+ return Result;
+ }
+
+ // -- Unary bit operators --
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(U scalar)
+ {
+ return (*this = detail::compute_vec4_mod<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_mod<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_mod<T, Q, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(U scalar)
+ {
+ return (*this = detail::compute_vec4_and<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_and<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_and<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(U scalar)
+ {
+ return (*this = detail::compute_vec4_or<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_or<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_or<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(U scalar)
+ {
+ return (*this = detail::compute_vec4_xor<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_xor<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_xor<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(U scalar)
+ {
+ return (*this = detail::compute_vec4_shift_left<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_shift_left<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_shift_left<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(U scalar)
+ {
+ return (*this = detail::compute_vec4_shift_right<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(scalar)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<1, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_shift_right<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<4, U, Q> const& v)
+ {
+ return (*this = detail::compute_vec4_shift_right<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(*this, vec<4, T, Q>(v)));
+ }
+
+ // -- Unary constant operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v)
+ {
+ return v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(0) -= v;
+ }
+
+ // -- Binary arithmetic operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T const & scalar)
+ {
+ return vec<4, T, Q>(v) += scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) += v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(v) += scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v2) += v1;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) += v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T const & scalar)
+ {
+ return vec<4, T, Q>(v) -= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) -= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar) -= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1.x) -= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) -= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T const & scalar)
+ {
+ return vec<4, T, Q>(v) *= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) *= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(v) *= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v2) *= v1;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) *= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T const & scalar)
+ {
+ return vec<4, T, Q>(v) /= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) /= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar) /= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1.x) /= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) /= v2;
+ }
+
+ // -- Binary bit operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar)
+ {
+ return vec<4, T, Q>(v) %= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) %= v2.x;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar) %= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar.x) %= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) %= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar)
+ {
+ return vec<4, T, Q>(v) &= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar)
+ {
+ return vec<4, T, Q>(v) &= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar) &= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1.x) &= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) &= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar)
+ {
+ return vec<4, T, Q>(v) |= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) |= v2.x;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar) |= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1.x) |= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) |= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar)
+ {
+ return vec<4, T, Q>(v) ^= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) ^= v2.x;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar) ^= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1.x) ^= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) ^= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar)
+ {
+ return vec<4, T, Q>(v) <<= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) <<= v2.x;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar) <<= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1.x) <<= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) <<= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar)
+ {
+ return vec<4, T, Q>(v) >>= scalar;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) >>= v2.x;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(scalar) >>= v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1.x) >>= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return vec<4, T, Q>(v1) >>= v2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v)
+ {
+ return detail::compute_vec4_bitwise_not<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(v);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return detail::compute_vec4_equal<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(v1, v2);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2)
+ {
+ return detail::compute_vec4_nequal<T, Q, detail::is_int<T>::value, sizeof(T) * 8, detail::is_aligned<Q>::value>::call(v1, v2);
+ }
+
+ template<qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2)
+ {
+ return vec<4, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z, v1.w && v2.w);
+ }
+
+ template<qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2)
+ {
+ return vec<4, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z, v1.w || v2.w);
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "type_vec4_simd.inl"
+#endif
diff --git a/3rdparty/glm/source/glm/detail/type_vec4_simd.inl b/3rdparty/glm/source/glm/detail/type_vec4_simd.inl
new file mode 100644
index 0000000..149c413
--- /dev/null
+++ b/3rdparty/glm/source/glm/detail/type_vec4_simd.inl
@@ -0,0 +1,775 @@
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+namespace glm{
+namespace detail
+{
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ template<qualifier Q, int E0, int E1, int E2, int E3>
+ struct _swizzle_base1<4, float, Q, E0,E1,E2,E3, true> : public _swizzle_base0<float, 4>
+ {
+ GLM_FUNC_QUALIFIER vec<4, float, Q> operator ()() const
+ {
+ __m128 data = *reinterpret_cast<__m128 const*>(&this->_buffer);
+
+ vec<4, float, Q> Result;
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ Result.data = _mm_permute_ps(data, _MM_SHUFFLE(E3, E2, E1, E0));
+# else
+ Result.data = _mm_shuffle_ps(data, data, _MM_SHUFFLE(E3, E2, E1, E0));
+# endif
+ return Result;
+ }
+ };
+
+ template<qualifier Q, int E0, int E1, int E2, int E3>
+ struct _swizzle_base1<4, int, Q, E0,E1,E2,E3, true> : public _swizzle_base0<int, 4>
+ {
+ GLM_FUNC_QUALIFIER vec<4, int, Q> operator ()() const
+ {
+ __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer);
+
+ vec<4, int, Q> Result;
+ Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0));
+ return Result;
+ }
+ };
+
+ template<qualifier Q, int E0, int E1, int E2, int E3>
+ struct _swizzle_base1<4, uint, Q, E0,E1,E2,E3, true> : public _swizzle_base0<uint, 4>
+ {
+ GLM_FUNC_QUALIFIER vec<4, uint, Q> operator ()() const
+ {
+ __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer);
+
+ vec<4, uint, Q> Result;
+ Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0));
+ return Result;
+ }
+ };
+# endif// GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+ template<qualifier Q>
+ struct compute_vec4_add<float, Q, true>
+ {
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_add_ps(a.data, b.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<qualifier Q>
+ struct compute_vec4_add<double, Q, true>
+ {
+ static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
+ {
+ vec<4, double, Q> Result;
+ Result.data = _mm256_add_pd(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_vec4_sub<float, Q, true>
+ {
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_sub_ps(a.data, b.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<qualifier Q>
+ struct compute_vec4_sub<double, Q, true>
+ {
+ static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
+ {
+ vec<4, double, Q> Result;
+ Result.data = _mm256_sub_pd(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_vec4_mul<float, Q, true>
+ {
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_mul_ps(a.data, b.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<qualifier Q>
+ struct compute_vec4_mul<double, Q, true>
+ {
+ static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
+ {
+ vec<4, double, Q> Result;
+ Result.data = _mm256_mul_pd(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_vec4_div<float, Q, true>
+ {
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_div_ps(a.data, b.data);
+ return Result;
+ }
+ };
+
+ # if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<qualifier Q>
+ struct compute_vec4_div<double, Q, true>
+ {
+ static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
+ {
+ vec<4, double, Q> Result;
+ Result.data = _mm256_div_pd(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<>
+ struct compute_vec4_div<float, aligned_lowp, true>
+ {
+ static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& a, vec<4, float, aligned_lowp> const& b)
+ {
+ vec<4, float, aligned_lowp> Result;
+ Result.data = _mm_mul_ps(a.data, _mm_rcp_ps(b.data));
+ return Result;
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_vec4_and<T, Q, true, 32, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm_and_si128(a.data, b.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ template<typename T, qualifier Q>
+ struct compute_vec4_and<T, Q, true, 64, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm256_and_si256(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<typename T, qualifier Q>
+ struct compute_vec4_or<T, Q, true, 32, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm_or_si128(a.data, b.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ template<typename T, qualifier Q>
+ struct compute_vec4_or<T, Q, true, 64, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm256_or_si256(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<typename T, qualifier Q>
+ struct compute_vec4_xor<T, Q, true, 32, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm_xor_si128(a.data, b.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ template<typename T, qualifier Q>
+ struct compute_vec4_xor<T, Q, true, 64, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm256_xor_si256(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<typename T, qualifier Q>
+ struct compute_vec4_shift_left<T, Q, true, 32, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm_sll_epi32(a.data, b.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ template<typename T, qualifier Q>
+ struct compute_vec4_shift_left<T, Q, true, 64, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm256_sll_epi64(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<typename T, qualifier Q>
+ struct compute_vec4_shift_right<T, Q, true, 32, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm_srl_epi32(a.data, b.data);
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ template<typename T, qualifier Q>
+ struct compute_vec4_shift_right<T, Q, true, 64, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm256_srl_epi64(a.data, b.data);
+ return Result;
+ }
+ };
+# endif
+
+ template<typename T, qualifier Q>
+ struct compute_vec4_bitwise_not<T, Q, true, 32, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& v)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm_xor_si128(v.data, _mm_set1_epi32(-1));
+ return Result;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ template<typename T, qualifier Q>
+ struct compute_vec4_bitwise_not<T, Q, true, 64, true>
+ {
+ static vec<4, T, Q> call(vec<4, T, Q> const& v)
+ {
+ vec<4, T, Q> Result;
+ Result.data = _mm256_xor_si256(v.data, _mm_set1_epi32(-1));
+ return Result;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_vec4_equal<float, Q, false, 32, true>
+ {
+ static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
+ {
+ return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) == 0;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ template<qualifier Q>
+ struct compute_vec4_equal<int, Q, true, 32, true>
+ {
+ static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
+ {
+ //return _mm_movemask_epi8(_mm_cmpeq_epi32(v1.data, v2.data)) != 0;
+ __m128i neq = _mm_xor_si128(v1.data, v2.data);
+ return _mm_test_all_zeros(neq, neq) == 0;
+ }
+ };
+# endif
+
+ template<qualifier Q>
+ struct compute_vec4_nequal<float, Q, false, 32, true>
+ {
+ static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
+ {
+ return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) != 0;
+ }
+ };
+
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ template<qualifier Q>
+ struct compute_vec4_nequal<int, Q, true, 32, true>
+ {
+ static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
+ {
+ //return _mm_movemask_epi8(_mm_cmpneq_epi32(v1.data, v2.data)) != 0;
+ __m128i neq = _mm_xor_si128(v1.data, v2.data);
+ return _mm_test_all_zeros(neq, neq) != 0;
+ }
+ };
+# endif
+}//namespace detail
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) :
+ data(_mm_set1_ps(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _s) :
+ data(_mm_set1_ps(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _s) :
+ data(_mm_set1_ps(_s))
+ {}
+
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_lowp>::vec(double _s) :
+ data(_mm256_set1_pd(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_mediump>::vec(double _s) :
+ data(_mm256_set1_pd(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_highp>::vec(double _s) :
+ data(_mm256_set1_pd(_s))
+ {}
+# endif
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _s) :
+ data(_mm_set1_epi32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _s) :
+ data(_mm_set1_epi32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _s) :
+ data(_mm_set1_epi32(_s))
+ {}
+
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_lowp>::vec(detail::int64 _s) :
+ data(_mm256_set1_epi64x(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_mediump>::vec(detail::int64 _s) :
+ data(_mm256_set1_epi64x(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_highp>::vec(detail::int64 _s) :
+ data(_mm256_set1_epi64x(_s))
+ {}
+# endif
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _x, float _y, float _z, float _w) :
+ data(_mm_set_ps(_w, _z, _y, _x))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _x, float _y, float _z, float _w) :
+ data(_mm_set_ps(_w, _z, _y, _x))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _x, float _y, float _z, float _w) :
+ data(_mm_set_ps(_w, _z, _y, _x))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _x, int _y, int _z, int _w) :
+ data(_mm_set_epi32(_w, _z, _y, _x))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _x, int _y, int _z, int _w) :
+ data(_mm_set_epi32(_w, _z, _y, _x))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _x, int _y, int _z, int _w) :
+ data(_mm_set_epi32(_w, _z, _y, _x))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(int _x, int _y, int _z, int _w) :
+ data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x)))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(int _x, int _y, int _z, int _w) :
+ data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x)))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(int _x, int _y, int _z, int _w) :
+ data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x)))
+ {}
+}//namespace glm
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#if GLM_ARCH & GLM_ARCH_NEON_BIT
+namespace glm {
+namespace detail {
+
+ template<qualifier Q>
+ struct compute_vec4_add<float, Q, true>
+ {
+ static
+ vec<4, float, Q>
+ call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
+ {
+ vec<4, float, Q> Result;
+ Result.data = vaddq_f32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_add<uint, Q, true>
+ {
+ static
+ vec<4, uint, Q>
+ call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
+ {
+ vec<4, uint, Q> Result;
+ Result.data = vaddq_u32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_add<int, Q, true>
+ {
+ static
+ vec<4, int, Q>
+ call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
+ {
+ vec<4, uint, Q> Result;
+ Result.data = vaddq_s32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_sub<float, Q, true>
+ {
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
+ {
+ vec<4, float, Q> Result;
+ Result.data = vsubq_f32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_sub<uint, Q, true>
+ {
+ static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
+ {
+ vec<4, uint, Q> Result;
+ Result.data = vsubq_u32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_sub<int, Q, true>
+ {
+ static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
+ {
+ vec<4, int, Q> Result;
+ Result.data = vsubq_s32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_mul<float, Q, true>
+ {
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
+ {
+ vec<4, float, Q> Result;
+ Result.data = vmulq_f32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_mul<uint, Q, true>
+ {
+ static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
+ {
+ vec<4, uint, Q> Result;
+ Result.data = vmulq_u32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_mul<int, Q, true>
+ {
+ static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
+ {
+ vec<4, int, Q> Result;
+ Result.data = vmulq_s32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_div<float, Q, true>
+ {
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
+ {
+ vec<4, float, Q> Result;
+ Result.data = vdivq_f32(a.data, b.data);
+ return Result;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_equal<float, Q, false, 32, true>
+ {
+ static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
+ {
+ uint32x4_t cmp = vceqq_f32(v1.data, v2.data);
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ cmp = vpminq_u32(cmp, cmp);
+ cmp = vpminq_u32(cmp, cmp);
+ uint32_t r = cmp[0];
+#else
+ uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp));
+ cmpx2 = vpmin_u32(cmpx2, cmpx2);
+ uint32_t r = cmpx2[0];
+#endif
+ return r == ~0u;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_equal<uint, Q, false, 32, true>
+ {
+ static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2)
+ {
+ uint32x4_t cmp = vceqq_u32(v1.data, v2.data);
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ cmp = vpminq_u32(cmp, cmp);
+ cmp = vpminq_u32(cmp, cmp);
+ uint32_t r = cmp[0];
+#else
+ uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp));
+ cmpx2 = vpmin_u32(cmpx2, cmpx2);
+ uint32_t r = cmpx2[0];
+#endif
+ return r == ~0u;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_equal<int, Q, false, 32, true>
+ {
+ static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
+ {
+ uint32x4_t cmp = vceqq_s32(v1.data, v2.data);
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ cmp = vpminq_u32(cmp, cmp);
+ cmp = vpminq_u32(cmp, cmp);
+ uint32_t r = cmp[0];
+#else
+ uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp));
+ cmpx2 = vpmin_u32(cmpx2, cmpx2);
+ uint32_t r = cmpx2[0];
+#endif
+ return r == ~0u;
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_nequal<float, Q, false, 32, true>
+ {
+ static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
+ {
+ return !compute_vec4_equal<float, Q, false, 32, true>::call(v1, v2);
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_nequal<uint, Q, false, 32, true>
+ {
+ static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2)
+ {
+ return !compute_vec4_equal<uint, Q, false, 32, true>::call(v1, v2);
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_vec4_nequal<int, Q, false, 32, true>
+ {
+ static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
+ {
+ return !compute_vec4_equal<int, Q, false, 32, true>::call(v1, v2);
+ }
+ };
+
+}//namespace detail
+
+#if !GLM_CONFIG_XYZW_ONLY
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) :
+ data(vdupq_n_f32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _s) :
+ data(vdupq_n_f32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _s) :
+ data(vdupq_n_f32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _s) :
+ data(vdupq_n_s32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _s) :
+ data(vdupq_n_s32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _s) :
+ data(vdupq_n_s32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_lowp>::vec(uint _s) :
+ data(vdupq_n_u32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_mediump>::vec(uint _s) :
+ data(vdupq_n_u32(_s))
+ {}
+
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_highp>::vec(uint _s) :
+ data(vdupq_n_u32(_s))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, float, aligned_highp>& rhs) :
+ data(rhs.data)
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, int, aligned_highp>& rhs) :
+ data(vcvtq_f32_s32(rhs.data))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, uint, aligned_highp>& rhs) :
+ data(vcvtq_f32_u32(rhs.data))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(int _x, int _y, int _z, int _w) :
+ data(vcvtq_f32_s32(vec<4, int, aligned_lowp>(_x, _y, _z, _w).data))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(int _x, int _y, int _z, int _w) :
+ data(vcvtq_f32_s32(vec<4, int, aligned_mediump>(_x, _y, _z, _w).data))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(int _x, int _y, int _z, int _w) :
+ data(vcvtq_f32_s32(vec<4, int, aligned_highp>(_x, _y, _z, _w).data))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(uint _x, uint _y, uint _z, uint _w) :
+ data(vcvtq_f32_u32(vec<4, uint, aligned_lowp>(_x, _y, _z, _w).data))
+ {}
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(uint _x, uint _y, uint _z, uint _w) :
+ data(vcvtq_f32_u32(vec<4, uint, aligned_mediump>(_x, _y, _z, _w).data))
+ {}
+
+
+ template<>
+ template<>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(uint _x, uint _y, uint _z, uint _w) :
+ data(vcvtq_f32_u32(vec<4, uint, aligned_highp>(_x, _y, _z, _w).data))
+ {}
+
+#endif
+}//namespace glm
+
+#endif
diff --git a/3rdparty/glm/source/glm/exponential.hpp b/3rdparty/glm/source/glm/exponential.hpp
new file mode 100644
index 0000000..f8fb886
--- /dev/null
+++ b/3rdparty/glm/source/glm/exponential.hpp
@@ -0,0 +1,110 @@
+/// @ref core
+/// @file glm/exponential.hpp
+///
+/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.2 Exponential Functions</a>
+///
+/// @defgroup core_func_exponential Exponential functions
+/// @ingroup core
+///
+/// Provides GLSL exponential functions
+///
+/// These all operate component-wise. The description is per component.
+///
+/// Include <glm/exponential.hpp> to use these core features.
+
+#pragma once
+
+#include "detail/type_vec1.hpp"
+#include "detail/type_vec2.hpp"
+#include "detail/type_vec3.hpp"
+#include "detail/type_vec4.hpp"
+#include <cmath>
+
+namespace glm
+{
+ /// @addtogroup core_func_exponential
+ /// @{
+
+ /// Returns 'base' raised to the power 'exponent'.
+ ///
+ /// @param base Floating point value. pow function is defined for input values of 'base' defined in the range (inf-, inf+) in the limit of the type qualifier.
+ /// @param exponent Floating point value representing the 'exponent'.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/pow.xml">GLSL pow man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.2 Exponential Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> pow(vec<L, T, Q> const& base, vec<L, T, Q> const& exponent);
+
+ /// Returns the natural exponentiation of x, i.e., e^x.
+ ///
+ /// @param v exp function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier.
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/exp.xml">GLSL exp man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.2 Exponential Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> exp(vec<L, T, Q> const& v);
+
+ /// Returns the natural logarithm of v, i.e.,
+ /// returns the value y which satisfies the equation x = e^y.
+ /// Results are undefined if v <= 0.
+ ///
+ /// @param v log function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier.
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/log.xml">GLSL log man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.2 Exponential Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> log(vec<L, T, Q> const& v);
+
+ /// Returns 2 raised to the v power.
+ ///
+ /// @param v exp2 function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier.
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/exp2.xml">GLSL exp2 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.2 Exponential Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> exp2(vec<L, T, Q> const& v);
+
+ /// Returns the base 2 log of x, i.e., returns the value y,
+ /// which satisfies the equation x = 2 ^ y.
+ ///
+ /// @param v log2 function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier.
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/log2.xml">GLSL log2 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.2 Exponential Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> log2(vec<L, T, Q> const& v);
+
+ /// Returns the positive square root of v.
+ ///
+ /// @param v sqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier.
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/sqrt.xml">GLSL sqrt man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.2 Exponential Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> sqrt(vec<L, T, Q> const& v);
+
+ /// Returns the reciprocal of the positive square root of v.
+ ///
+ /// @param v inversesqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier.
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/inversesqrt.xml">GLSL inversesqrt man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.2 Exponential Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> inversesqrt(vec<L, T, Q> const& v);
+
+ /// @}
+}//namespace glm
+
+#include "detail/func_exponential.inl"
diff --git a/3rdparty/glm/source/glm/ext.hpp b/3rdparty/glm/source/glm/ext.hpp
new file mode 100644
index 0000000..39cabc3
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext.hpp
@@ -0,0 +1,255 @@
+/// @file glm/ext.hpp
+///
+/// @ref core (Dependence)
+
+#include "detail/setup.hpp"
+
+#pragma once
+
+#include "glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_EXT_INCLUDED_DISPLAYED)
+# define GLM_MESSAGE_EXT_INCLUDED_DISPLAYED
+# pragma message("GLM: All extensions included (not recommended)")
+#endif//GLM_MESSAGES
+
+#include "./ext/matrix_clip_space.hpp"
+#include "./ext/matrix_common.hpp"
+
+#include "./ext/matrix_double2x2.hpp"
+#include "./ext/matrix_double2x2_precision.hpp"
+#include "./ext/matrix_double2x3.hpp"
+#include "./ext/matrix_double2x3_precision.hpp"
+#include "./ext/matrix_double2x4.hpp"
+#include "./ext/matrix_double2x4_precision.hpp"
+#include "./ext/matrix_double3x2.hpp"
+#include "./ext/matrix_double3x2_precision.hpp"
+#include "./ext/matrix_double3x3.hpp"
+#include "./ext/matrix_double3x3_precision.hpp"
+#include "./ext/matrix_double3x4.hpp"
+#include "./ext/matrix_double3x4_precision.hpp"
+#include "./ext/matrix_double4x2.hpp"
+#include "./ext/matrix_double4x2_precision.hpp"
+#include "./ext/matrix_double4x3.hpp"
+#include "./ext/matrix_double4x3_precision.hpp"
+#include "./ext/matrix_double4x4.hpp"
+#include "./ext/matrix_double4x4_precision.hpp"
+
+#include "./ext/matrix_float2x2.hpp"
+#include "./ext/matrix_float2x2_precision.hpp"
+#include "./ext/matrix_float2x3.hpp"
+#include "./ext/matrix_float2x3_precision.hpp"
+#include "./ext/matrix_float2x4.hpp"
+#include "./ext/matrix_float2x4_precision.hpp"
+#include "./ext/matrix_float3x2.hpp"
+#include "./ext/matrix_float3x2_precision.hpp"
+#include "./ext/matrix_float3x3.hpp"
+#include "./ext/matrix_float3x3_precision.hpp"
+#include "./ext/matrix_float3x4.hpp"
+#include "./ext/matrix_float3x4_precision.hpp"
+#include "./ext/matrix_float4x2.hpp"
+#include "./ext/matrix_float4x2_precision.hpp"
+#include "./ext/matrix_float4x3.hpp"
+#include "./ext/matrix_float4x3_precision.hpp"
+#include "./ext/matrix_float4x4.hpp"
+#include "./ext/matrix_float4x4_precision.hpp"
+
+#include "./ext/matrix_int2x2.hpp"
+#include "./ext/matrix_int2x2_sized.hpp"
+#include "./ext/matrix_int2x3.hpp"
+#include "./ext/matrix_int2x3_sized.hpp"
+#include "./ext/matrix_int2x4.hpp"
+#include "./ext/matrix_int2x4_sized.hpp"
+#include "./ext/matrix_int3x2.hpp"
+#include "./ext/matrix_int3x2_sized.hpp"
+#include "./ext/matrix_int3x3.hpp"
+#include "./ext/matrix_int3x3_sized.hpp"
+#include "./ext/matrix_int3x4.hpp"
+#include "./ext/matrix_int3x4_sized.hpp"
+#include "./ext/matrix_int4x2.hpp"
+#include "./ext/matrix_int4x2_sized.hpp"
+#include "./ext/matrix_int4x3.hpp"
+#include "./ext/matrix_int4x3_sized.hpp"
+#include "./ext/matrix_int4x4.hpp"
+#include "./ext/matrix_int4x4_sized.hpp"
+
+#include "./ext/matrix_uint2x2.hpp"
+#include "./ext/matrix_uint2x2_sized.hpp"
+#include "./ext/matrix_uint2x3.hpp"
+#include "./ext/matrix_uint2x3_sized.hpp"
+#include "./ext/matrix_uint2x4.hpp"
+#include "./ext/matrix_uint2x4_sized.hpp"
+#include "./ext/matrix_uint3x2.hpp"
+#include "./ext/matrix_uint3x2_sized.hpp"
+#include "./ext/matrix_uint3x3.hpp"
+#include "./ext/matrix_uint3x3_sized.hpp"
+#include "./ext/matrix_uint3x4.hpp"
+#include "./ext/matrix_uint3x4_sized.hpp"
+#include "./ext/matrix_uint4x2.hpp"
+#include "./ext/matrix_uint4x2_sized.hpp"
+#include "./ext/matrix_uint4x3.hpp"
+#include "./ext/matrix_uint4x3_sized.hpp"
+#include "./ext/matrix_uint4x4.hpp"
+#include "./ext/matrix_uint4x4_sized.hpp"
+
+#include "./ext/matrix_projection.hpp"
+#include "./ext/matrix_relational.hpp"
+#include "./ext/matrix_transform.hpp"
+
+#include "./ext/quaternion_common.hpp"
+#include "./ext/quaternion_double.hpp"
+#include "./ext/quaternion_double_precision.hpp"
+#include "./ext/quaternion_float.hpp"
+#include "./ext/quaternion_float_precision.hpp"
+#include "./ext/quaternion_exponential.hpp"
+#include "./ext/quaternion_geometric.hpp"
+#include "./ext/quaternion_relational.hpp"
+#include "./ext/quaternion_transform.hpp"
+#include "./ext/quaternion_trigonometric.hpp"
+
+#include "./ext/scalar_common.hpp"
+#include "./ext/scalar_constants.hpp"
+#include "./ext/scalar_integer.hpp"
+#include "./ext/scalar_packing.hpp"
+#include "./ext/scalar_reciprocal.hpp"
+#include "./ext/scalar_relational.hpp"
+#include "./ext/scalar_ulp.hpp"
+
+#include "./ext/scalar_int_sized.hpp"
+#include "./ext/scalar_uint_sized.hpp"
+
+#include "./ext/vector_common.hpp"
+#include "./ext/vector_integer.hpp"
+#include "./ext/vector_packing.hpp"
+#include "./ext/vector_reciprocal.hpp"
+#include "./ext/vector_relational.hpp"
+#include "./ext/vector_ulp.hpp"
+
+#include "./ext/vector_bool1.hpp"
+#include "./ext/vector_bool1_precision.hpp"
+#include "./ext/vector_bool2.hpp"
+#include "./ext/vector_bool2_precision.hpp"
+#include "./ext/vector_bool3.hpp"
+#include "./ext/vector_bool3_precision.hpp"
+#include "./ext/vector_bool4.hpp"
+#include "./ext/vector_bool4_precision.hpp"
+
+#include "./ext/vector_double1.hpp"
+#include "./ext/vector_double1_precision.hpp"
+#include "./ext/vector_double2.hpp"
+#include "./ext/vector_double2_precision.hpp"
+#include "./ext/vector_double3.hpp"
+#include "./ext/vector_double3_precision.hpp"
+#include "./ext/vector_double4.hpp"
+#include "./ext/vector_double4_precision.hpp"
+
+#include "./ext/vector_float1.hpp"
+#include "./ext/vector_float1_precision.hpp"
+#include "./ext/vector_float2.hpp"
+#include "./ext/vector_float2_precision.hpp"
+#include "./ext/vector_float3.hpp"
+#include "./ext/vector_float3_precision.hpp"
+#include "./ext/vector_float4.hpp"
+#include "./ext/vector_float4_precision.hpp"
+
+#include "./ext/vector_int1.hpp"
+#include "./ext/vector_int1_sized.hpp"
+#include "./ext/vector_int2.hpp"
+#include "./ext/vector_int2_sized.hpp"
+#include "./ext/vector_int3.hpp"
+#include "./ext/vector_int3_sized.hpp"
+#include "./ext/vector_int4.hpp"
+#include "./ext/vector_int4_sized.hpp"
+
+#include "./ext/vector_uint1.hpp"
+#include "./ext/vector_uint1_sized.hpp"
+#include "./ext/vector_uint2.hpp"
+#include "./ext/vector_uint2_sized.hpp"
+#include "./ext/vector_uint3.hpp"
+#include "./ext/vector_uint3_sized.hpp"
+#include "./ext/vector_uint4.hpp"
+#include "./ext/vector_uint4_sized.hpp"
+
+#include "./gtc/bitfield.hpp"
+#include "./gtc/color_space.hpp"
+#include "./gtc/constants.hpp"
+#include "./gtc/epsilon.hpp"
+#include "./gtc/integer.hpp"
+#include "./gtc/matrix_access.hpp"
+#include "./gtc/matrix_integer.hpp"
+#include "./gtc/matrix_inverse.hpp"
+#include "./gtc/matrix_transform.hpp"
+#include "./gtc/noise.hpp"
+#include "./gtc/packing.hpp"
+#include "./gtc/quaternion.hpp"
+#include "./gtc/random.hpp"
+#include "./gtc/reciprocal.hpp"
+#include "./gtc/round.hpp"
+#include "./gtc/type_precision.hpp"
+#include "./gtc/type_ptr.hpp"
+#include "./gtc/ulp.hpp"
+#include "./gtc/vec1.hpp"
+#if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+# include "./gtc/type_aligned.hpp"
+#endif
+
+#ifdef GLM_ENABLE_EXPERIMENTAL
+#include "./gtx/associated_min_max.hpp"
+#include "./gtx/bit.hpp"
+#include "./gtx/closest_point.hpp"
+#include "./gtx/color_encoding.hpp"
+#include "./gtx/color_space.hpp"
+#include "./gtx/color_space_YCoCg.hpp"
+#include "./gtx/compatibility.hpp"
+#include "./gtx/component_wise.hpp"
+#include "./gtx/dual_quaternion.hpp"
+#include "./gtx/euler_angles.hpp"
+#include "./gtx/extend.hpp"
+#include "./gtx/extended_min_max.hpp"
+#include "./gtx/fast_exponential.hpp"
+#include "./gtx/fast_square_root.hpp"
+#include "./gtx/fast_trigonometry.hpp"
+#include "./gtx/functions.hpp"
+#include "./gtx/gradient_paint.hpp"
+#include "./gtx/handed_coordinate_space.hpp"
+#include "./gtx/integer.hpp"
+#include "./gtx/intersect.hpp"
+#include "./gtx/log_base.hpp"
+#include "./gtx/matrix_cross_product.hpp"
+#include "./gtx/matrix_interpolation.hpp"
+#include "./gtx/matrix_major_storage.hpp"
+#include "./gtx/matrix_operation.hpp"
+#include "./gtx/matrix_query.hpp"
+#include "./gtx/mixed_product.hpp"
+#include "./gtx/norm.hpp"
+#include "./gtx/normal.hpp"
+#include "./gtx/normalize_dot.hpp"
+#include "./gtx/number_precision.hpp"
+#include "./gtx/optimum_pow.hpp"
+#include "./gtx/orthonormalize.hpp"
+#include "./gtx/perpendicular.hpp"
+#include "./gtx/polar_coordinates.hpp"
+#include "./gtx/projection.hpp"
+#include "./gtx/quaternion.hpp"
+#include "./gtx/raw_data.hpp"
+#include "./gtx/rotate_vector.hpp"
+#include "./gtx/spline.hpp"
+#include "./gtx/std_based_type.hpp"
+#if !((GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP))
+# include "./gtx/string_cast.hpp"
+#endif
+#include "./gtx/transform.hpp"
+#include "./gtx/transform2.hpp"
+#include "./gtx/vec_swizzle.hpp"
+#include "./gtx/vector_angle.hpp"
+#include "./gtx/vector_query.hpp"
+#include "./gtx/wrap.hpp"
+
+#if GLM_HAS_TEMPLATE_ALIASES
+# include "./gtx/scalar_multiplication.hpp"
+#endif
+
+#if GLM_HAS_RANGE_FOR
+# include "./gtx/range.hpp"
+#endif
+#endif//GLM_ENABLE_EXPERIMENTAL
diff --git a/3rdparty/glm/source/glm/ext/matrix_clip_space.hpp b/3rdparty/glm/source/glm/ext/matrix_clip_space.hpp
new file mode 100644
index 0000000..c3874f2
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_clip_space.hpp
@@ -0,0 +1,522 @@
+/// @ref ext_matrix_clip_space
+/// @file glm/ext/matrix_clip_space.hpp
+///
+/// @defgroup ext_matrix_clip_space GLM_EXT_matrix_clip_space
+/// @ingroup ext
+///
+/// Defines functions that generate clip space transformation matrices.
+///
+/// The matrices generated by this extension use standard OpenGL fixed-function
+/// conventions. For example, the lookAt function generates a transform from world
+/// space into the specific eye space that the projective matrix functions
+/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility
+/// specifications defines the particular layout of this eye space.
+///
+/// Include <glm/ext/matrix_clip_space.hpp> to use the features of this extension.
+///
+/// @see ext_matrix_transform
+/// @see ext_matrix_projection
+
+#pragma once
+
+// Dependencies
+#include "../ext/scalar_constants.hpp"
+#include "../geometric.hpp"
+#include "../trigonometric.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_clip_space extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_clip_space
+ /// @{
+
+ /// Creates a matrix for projecting two-dimensional coordinates onto the screen.
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top, T const& zNear, T const& zFar)
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluOrtho2D.xml">gluOrtho2D man page</a>
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho(
+ T left, T right, T bottom, T top);
+
+ /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_ZO(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a matrix for an orthographic parallel viewing volume using right-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_NO(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_ZO(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_NO(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoZO(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoNO(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates.
+ /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates.
+ /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a matrix for an orthographic parallel viewing volume, using the default handedness and default near and far clip planes definition.
+ /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE.
+ ///
+ /// @tparam T A floating-point scalar type
+ ///
+ /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top)
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/glOrtho.xml">glOrtho man page</a>
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho(
+ T left, T right, T bottom, T top, T zNear, T zFar);
+
+ /// Creates a left handed frustum matrix.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_ZO(
+ T left, T right, T bottom, T top, T near, T far);
+
+ /// Creates a left handed frustum matrix.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_NO(
+ T left, T right, T bottom, T top, T near, T far);
+
+ /// Creates a right handed frustum matrix.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_ZO(
+ T left, T right, T bottom, T top, T near, T far);
+
+ /// Creates a right handed frustum matrix.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_NO(
+ T left, T right, T bottom, T top, T near, T far);
+
+ /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumZO(
+ T left, T right, T bottom, T top, T near, T far);
+
+ /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumNO(
+ T left, T right, T bottom, T top, T near, T far);
+
+ /// Creates a left handed frustum matrix.
+ /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH(
+ T left, T right, T bottom, T top, T near, T far);
+
+ /// Creates a right handed frustum matrix.
+ /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH(
+ T left, T right, T bottom, T top, T near, T far);
+
+ /// Creates a frustum matrix with default handedness, using the default handedness and default near and far clip planes definition.
+ /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/glFrustum.xml">glFrustum man page</a>
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> frustum(
+ T left, T right, T bottom, T top, T near, T far);
+
+
+ /// Creates a matrix for a right handed, symetric perspective-view frustum.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_ZO(
+ T fovy, T aspect, T near, T far);
+
+ /// Creates a matrix for a right handed, symetric perspective-view frustum.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_NO(
+ T fovy, T aspect, T near, T far);
+
+ /// Creates a matrix for a left handed, symetric perspective-view frustum.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_ZO(
+ T fovy, T aspect, T near, T far);
+
+ /// Creates a matrix for a left handed, symetric perspective-view frustum.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_NO(
+ T fovy, T aspect, T near, T far);
+
+ /// Creates a matrix for a symetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveZO(
+ T fovy, T aspect, T near, T far);
+
+ /// Creates a matrix for a symetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveNO(
+ T fovy, T aspect, T near, T far);
+
+ /// Creates a matrix for a right handed, symetric perspective-view frustum.
+ /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH(
+ T fovy, T aspect, T near, T far);
+
+ /// Creates a matrix for a left handed, symetric perspective-view frustum.
+ /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH(
+ T fovy, T aspect, T near, T far);
+
+ /// Creates a matrix for a symetric perspective-view frustum based on the default handedness and default near and far clip planes definition.
+ /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE.
+ ///
+ /// @param fovy Specifies the field of view angle in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluPerspective.xml">gluPerspective man page</a>
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspective(
+ T fovy, T aspect, T near, T far);
+
+ /// Builds a perspective projection matrix based on a field of view using right-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_ZO(
+ T fov, T width, T height, T near, T far);
+
+ /// Builds a perspective projection matrix based on a field of view using right-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_NO(
+ T fov, T width, T height, T near, T far);
+
+ /// Builds a perspective projection matrix based on a field of view using left-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_ZO(
+ T fov, T width, T height, T near, T far);
+
+ /// Builds a perspective projection matrix based on a field of view using left-handed coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_NO(
+ T fov, T width, T height, T near, T far);
+
+ /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovZO(
+ T fov, T width, T height, T near, T far);
+
+ /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovNO(
+ T fov, T width, T height, T near, T far);
+
+ /// Builds a right handed perspective projection matrix based on a field of view.
+ /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH(
+ T fov, T width, T height, T near, T far);
+
+ /// Builds a left handed perspective projection matrix based on a field of view.
+ /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH(
+ T fov, T width, T height, T near, T far);
+
+ /// Builds a perspective projection matrix based on a field of view and the default handedness and default near and far clip planes definition.
+ /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE.
+ ///
+ /// @param fov Expressed in radians.
+ /// @param width Width of the viewport
+ /// @param height Height of the viewport
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param far Specifies the distance from the viewer to the far clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFov(
+ T fov, T width, T height, T near, T far);
+
+ /// Creates a matrix for a left handed, symmetric perspective-view frustum with far plane at infinite.
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveLH(
+ T fovy, T aspect, T near);
+
+ /// Creates a matrix for a right handed, symmetric perspective-view frustum with far plane at infinite.
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveRH(
+ T fovy, T aspect, T near);
+
+ /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite with default handedness.
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspective(
+ T fovy, T aspect, T near);
+
+ /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping.
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective(
+ T fovy, T aspect, T near);
+
+ /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping.
+ ///
+ /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians.
+ /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
+ /// @param near Specifies the distance from the viewer to the near clipping plane (always positive).
+ /// @param ep Epsilon
+ ///
+ /// @tparam T A floating-point scalar type
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective(
+ T fovy, T aspect, T near, T ep);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_clip_space.inl"
diff --git a/3rdparty/glm/source/glm/ext/matrix_clip_space.inl b/3rdparty/glm/source/glm/ext/matrix_clip_space.inl
new file mode 100644
index 0000000..7e4df33
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_clip_space.inl
@@ -0,0 +1,555 @@
+namespace glm
+{
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top)
+ {
+ mat<4, 4, T, defaultp> Result(static_cast<T>(1));
+ Result[0][0] = static_cast<T>(2) / (right - left);
+ Result[1][1] = static_cast<T>(2) / (top - bottom);
+ Result[2][2] = - static_cast<T>(1);
+ Result[3][0] = - (right + left) / (right - left);
+ Result[3][1] = - (top + bottom) / (top - bottom);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_ZO(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+ mat<4, 4, T, defaultp> Result(1);
+ Result[0][0] = static_cast<T>(2) / (right - left);
+ Result[1][1] = static_cast<T>(2) / (top - bottom);
+ Result[2][2] = static_cast<T>(1) / (zFar - zNear);
+ Result[3][0] = - (right + left) / (right - left);
+ Result[3][1] = - (top + bottom) / (top - bottom);
+ Result[3][2] = - zNear / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_NO(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+ mat<4, 4, T, defaultp> Result(1);
+ Result[0][0] = static_cast<T>(2) / (right - left);
+ Result[1][1] = static_cast<T>(2) / (top - bottom);
+ Result[2][2] = static_cast<T>(2) / (zFar - zNear);
+ Result[3][0] = - (right + left) / (right - left);
+ Result[3][1] = - (top + bottom) / (top - bottom);
+ Result[3][2] = - (zFar + zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_ZO(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+ mat<4, 4, T, defaultp> Result(1);
+ Result[0][0] = static_cast<T>(2) / (right - left);
+ Result[1][1] = static_cast<T>(2) / (top - bottom);
+ Result[2][2] = - static_cast<T>(1) / (zFar - zNear);
+ Result[3][0] = - (right + left) / (right - left);
+ Result[3][1] = - (top + bottom) / (top - bottom);
+ Result[3][2] = - zNear / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_NO(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+ mat<4, 4, T, defaultp> Result(1);
+ Result[0][0] = static_cast<T>(2) / (right - left);
+ Result[1][1] = static_cast<T>(2) / (top - bottom);
+ Result[2][2] = - static_cast<T>(2) / (zFar - zNear);
+ Result[3][0] = - (right + left) / (right - left);
+ Result[3][1] = - (top + bottom) / (top - bottom);
+ Result[3][2] = - (zFar + zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoZO(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return orthoLH_ZO(left, right, bottom, top, zNear, zFar);
+# else
+ return orthoRH_ZO(left, right, bottom, top, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoNO(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return orthoLH_NO(left, right, bottom, top, zNear, zFar);
+# else
+ return orthoRH_NO(left, right, bottom, top, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return orthoLH_ZO(left, right, bottom, top, zNear, zFar);
+# else
+ return orthoLH_NO(left, right, bottom, top, zNear, zFar);
+# endif
+
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return orthoRH_ZO(left, right, bottom, top, zNear, zFar);
+# else
+ return orthoRH_NO(left, right, bottom, top, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO
+ return orthoLH_ZO(left, right, bottom, top, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO
+ return orthoLH_NO(left, right, bottom, top, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO
+ return orthoRH_ZO(left, right, bottom, top, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO
+ return orthoRH_NO(left, right, bottom, top, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+ mat<4, 4, T, defaultp> Result(0);
+ Result[0][0] = (static_cast<T>(2) * nearVal) / (right - left);
+ Result[1][1] = (static_cast<T>(2) * nearVal) / (top - bottom);
+ Result[2][0] = (right + left) / (right - left);
+ Result[2][1] = (top + bottom) / (top - bottom);
+ Result[2][2] = farVal / (farVal - nearVal);
+ Result[2][3] = static_cast<T>(1);
+ Result[3][2] = -(farVal * nearVal) / (farVal - nearVal);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_NO(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+ mat<4, 4, T, defaultp> Result(0);
+ Result[0][0] = (static_cast<T>(2) * nearVal) / (right - left);
+ Result[1][1] = (static_cast<T>(2) * nearVal) / (top - bottom);
+ Result[2][0] = (right + left) / (right - left);
+ Result[2][1] = (top + bottom) / (top - bottom);
+ Result[2][2] = (farVal + nearVal) / (farVal - nearVal);
+ Result[2][3] = static_cast<T>(1);
+ Result[3][2] = - (static_cast<T>(2) * farVal * nearVal) / (farVal - nearVal);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+ mat<4, 4, T, defaultp> Result(0);
+ Result[0][0] = (static_cast<T>(2) * nearVal) / (right - left);
+ Result[1][1] = (static_cast<T>(2) * nearVal) / (top - bottom);
+ Result[2][0] = (right + left) / (right - left);
+ Result[2][1] = (top + bottom) / (top - bottom);
+ Result[2][2] = farVal / (nearVal - farVal);
+ Result[2][3] = static_cast<T>(-1);
+ Result[3][2] = -(farVal * nearVal) / (farVal - nearVal);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_NO(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+ mat<4, 4, T, defaultp> Result(0);
+ Result[0][0] = (static_cast<T>(2) * nearVal) / (right - left);
+ Result[1][1] = (static_cast<T>(2) * nearVal) / (top - bottom);
+ Result[2][0] = (right + left) / (right - left);
+ Result[2][1] = (top + bottom) / (top - bottom);
+ Result[2][2] = - (farVal + nearVal) / (farVal - nearVal);
+ Result[2][3] = static_cast<T>(-1);
+ Result[3][2] = - (static_cast<T>(2) * farVal * nearVal) / (farVal - nearVal);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumZO(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return frustumLH_ZO(left, right, bottom, top, nearVal, farVal);
+# else
+ return frustumRH_ZO(left, right, bottom, top, nearVal, farVal);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumNO(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return frustumLH_NO(left, right, bottom, top, nearVal, farVal);
+# else
+ return frustumRH_NO(left, right, bottom, top, nearVal, farVal);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return frustumLH_ZO(left, right, bottom, top, nearVal, farVal);
+# else
+ return frustumLH_NO(left, right, bottom, top, nearVal, farVal);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return frustumRH_ZO(left, right, bottom, top, nearVal, farVal);
+# else
+ return frustumRH_NO(left, right, bottom, top, nearVal, farVal);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustum(T left, T right, T bottom, T top, T nearVal, T farVal)
+ {
+# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO
+ return frustumLH_ZO(left, right, bottom, top, nearVal, farVal);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO
+ return frustumLH_NO(left, right, bottom, top, nearVal, farVal);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO
+ return frustumRH_ZO(left, right, bottom, top, nearVal, farVal);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO
+ return frustumRH_NO(left, right, bottom, top, nearVal, farVal);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_ZO(T fovy, T aspect, T zNear, T zFar)
+ {
+ assert(abs(aspect - std::numeric_limits<T>::epsilon()) > static_cast<T>(0));
+
+ T const tanHalfFovy = tan(fovy / static_cast<T>(2));
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = static_cast<T>(1) / (aspect * tanHalfFovy);
+ Result[1][1] = static_cast<T>(1) / (tanHalfFovy);
+ Result[2][2] = zFar / (zNear - zFar);
+ Result[2][3] = - static_cast<T>(1);
+ Result[3][2] = -(zFar * zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_NO(T fovy, T aspect, T zNear, T zFar)
+ {
+ assert(abs(aspect - std::numeric_limits<T>::epsilon()) > static_cast<T>(0));
+
+ T const tanHalfFovy = tan(fovy / static_cast<T>(2));
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = static_cast<T>(1) / (aspect * tanHalfFovy);
+ Result[1][1] = static_cast<T>(1) / (tanHalfFovy);
+ Result[2][2] = - (zFar + zNear) / (zFar - zNear);
+ Result[2][3] = - static_cast<T>(1);
+ Result[3][2] = - (static_cast<T>(2) * zFar * zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_ZO(T fovy, T aspect, T zNear, T zFar)
+ {
+ assert(abs(aspect - std::numeric_limits<T>::epsilon()) > static_cast<T>(0));
+
+ T const tanHalfFovy = tan(fovy / static_cast<T>(2));
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = static_cast<T>(1) / (aspect * tanHalfFovy);
+ Result[1][1] = static_cast<T>(1) / (tanHalfFovy);
+ Result[2][2] = zFar / (zFar - zNear);
+ Result[2][3] = static_cast<T>(1);
+ Result[3][2] = -(zFar * zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_NO(T fovy, T aspect, T zNear, T zFar)
+ {
+ assert(abs(aspect - std::numeric_limits<T>::epsilon()) > static_cast<T>(0));
+
+ T const tanHalfFovy = tan(fovy / static_cast<T>(2));
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = static_cast<T>(1) / (aspect * tanHalfFovy);
+ Result[1][1] = static_cast<T>(1) / (tanHalfFovy);
+ Result[2][2] = (zFar + zNear) / (zFar - zNear);
+ Result[2][3] = static_cast<T>(1);
+ Result[3][2] = - (static_cast<T>(2) * zFar * zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveZO(T fovy, T aspect, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return perspectiveLH_ZO(fovy, aspect, zNear, zFar);
+# else
+ return perspectiveRH_ZO(fovy, aspect, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveNO(T fovy, T aspect, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return perspectiveLH_NO(fovy, aspect, zNear, zFar);
+# else
+ return perspectiveRH_NO(fovy, aspect, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH(T fovy, T aspect, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return perspectiveLH_ZO(fovy, aspect, zNear, zFar);
+# else
+ return perspectiveLH_NO(fovy, aspect, zNear, zFar);
+# endif
+
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH(T fovy, T aspect, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return perspectiveRH_ZO(fovy, aspect, zNear, zFar);
+# else
+ return perspectiveRH_NO(fovy, aspect, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspective(T fovy, T aspect, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO
+ return perspectiveLH_ZO(fovy, aspect, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO
+ return perspectiveLH_NO(fovy, aspect, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO
+ return perspectiveRH_ZO(fovy, aspect, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO
+ return perspectiveRH_NO(fovy, aspect, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_ZO(T fov, T width, T height, T zNear, T zFar)
+ {
+ assert(width > static_cast<T>(0));
+ assert(height > static_cast<T>(0));
+ assert(fov > static_cast<T>(0));
+
+ T const rad = fov;
+ T const h = glm::cos(static_cast<T>(0.5) * rad) / glm::sin(static_cast<T>(0.5) * rad);
+ T const w = h * height / width; ///todo max(width , Height) / min(width , Height)?
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = w;
+ Result[1][1] = h;
+ Result[2][2] = zFar / (zNear - zFar);
+ Result[2][3] = - static_cast<T>(1);
+ Result[3][2] = -(zFar * zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_NO(T fov, T width, T height, T zNear, T zFar)
+ {
+ assert(width > static_cast<T>(0));
+ assert(height > static_cast<T>(0));
+ assert(fov > static_cast<T>(0));
+
+ T const rad = fov;
+ T const h = glm::cos(static_cast<T>(0.5) * rad) / glm::sin(static_cast<T>(0.5) * rad);
+ T const w = h * height / width; ///todo max(width , Height) / min(width , Height)?
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = w;
+ Result[1][1] = h;
+ Result[2][2] = - (zFar + zNear) / (zFar - zNear);
+ Result[2][3] = - static_cast<T>(1);
+ Result[3][2] = - (static_cast<T>(2) * zFar * zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_ZO(T fov, T width, T height, T zNear, T zFar)
+ {
+ assert(width > static_cast<T>(0));
+ assert(height > static_cast<T>(0));
+ assert(fov > static_cast<T>(0));
+
+ T const rad = fov;
+ T const h = glm::cos(static_cast<T>(0.5) * rad) / glm::sin(static_cast<T>(0.5) * rad);
+ T const w = h * height / width; ///todo max(width , Height) / min(width , Height)?
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = w;
+ Result[1][1] = h;
+ Result[2][2] = zFar / (zFar - zNear);
+ Result[2][3] = static_cast<T>(1);
+ Result[3][2] = -(zFar * zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_NO(T fov, T width, T height, T zNear, T zFar)
+ {
+ assert(width > static_cast<T>(0));
+ assert(height > static_cast<T>(0));
+ assert(fov > static_cast<T>(0));
+
+ T const rad = fov;
+ T const h = glm::cos(static_cast<T>(0.5) * rad) / glm::sin(static_cast<T>(0.5) * rad);
+ T const w = h * height / width; ///todo max(width , Height) / min(width , Height)?
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = w;
+ Result[1][1] = h;
+ Result[2][2] = (zFar + zNear) / (zFar - zNear);
+ Result[2][3] = static_cast<T>(1);
+ Result[3][2] = - (static_cast<T>(2) * zFar * zNear) / (zFar - zNear);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovZO(T fov, T width, T height, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return perspectiveFovLH_ZO(fov, width, height, zNear, zFar);
+# else
+ return perspectiveFovRH_ZO(fov, width, height, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovNO(T fov, T width, T height, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return perspectiveFovLH_NO(fov, width, height, zNear, zFar);
+# else
+ return perspectiveFovRH_NO(fov, width, height, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH(T fov, T width, T height, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return perspectiveFovLH_ZO(fov, width, height, zNear, zFar);
+# else
+ return perspectiveFovLH_NO(fov, width, height, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH(T fov, T width, T height, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return perspectiveFovRH_ZO(fov, width, height, zNear, zFar);
+# else
+ return perspectiveFovRH_NO(fov, width, height, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFov(T fov, T width, T height, T zNear, T zFar)
+ {
+# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO
+ return perspectiveFovLH_ZO(fov, width, height, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO
+ return perspectiveFovLH_NO(fov, width, height, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO
+ return perspectiveFovRH_ZO(fov, width, height, zNear, zFar);
+# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO
+ return perspectiveFovRH_NO(fov, width, height, zNear, zFar);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveRH(T fovy, T aspect, T zNear)
+ {
+ T const range = tan(fovy / static_cast<T>(2)) * zNear;
+ T const left = -range * aspect;
+ T const right = range * aspect;
+ T const bottom = -range;
+ T const top = range;
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = (static_cast<T>(2) * zNear) / (right - left);
+ Result[1][1] = (static_cast<T>(2) * zNear) / (top - bottom);
+ Result[2][2] = - static_cast<T>(1);
+ Result[2][3] = - static_cast<T>(1);
+ Result[3][2] = - static_cast<T>(2) * zNear;
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveLH(T fovy, T aspect, T zNear)
+ {
+ T const range = tan(fovy / static_cast<T>(2)) * zNear;
+ T const left = -range * aspect;
+ T const right = range * aspect;
+ T const bottom = -range;
+ T const top = range;
+
+ mat<4, 4, T, defaultp> Result(T(0));
+ Result[0][0] = (static_cast<T>(2) * zNear) / (right - left);
+ Result[1][1] = (static_cast<T>(2) * zNear) / (top - bottom);
+ Result[2][2] = static_cast<T>(1);
+ Result[2][3] = static_cast<T>(1);
+ Result[3][2] = - static_cast<T>(2) * zNear;
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspective(T fovy, T aspect, T zNear)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return infinitePerspectiveLH(fovy, aspect, zNear);
+# else
+ return infinitePerspectiveRH(fovy, aspect, zNear);
+# endif
+ }
+
+ // Infinite projection matrix: http://www.terathon.com/gdc07_lengyel.pdf
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear, T ep)
+ {
+ T const range = tan(fovy / static_cast<T>(2)) * zNear;
+ T const left = -range * aspect;
+ T const right = range * aspect;
+ T const bottom = -range;
+ T const top = range;
+
+ mat<4, 4, T, defaultp> Result(static_cast<T>(0));
+ Result[0][0] = (static_cast<T>(2) * zNear) / (right - left);
+ Result[1][1] = (static_cast<T>(2) * zNear) / (top - bottom);
+ Result[2][2] = ep - static_cast<T>(1);
+ Result[2][3] = static_cast<T>(-1);
+ Result[3][2] = (ep - static_cast<T>(2)) * zNear;
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear)
+ {
+ return tweakedInfinitePerspective(fovy, aspect, zNear, epsilon<T>());
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_common.hpp b/3rdparty/glm/source/glm/ext/matrix_common.hpp
new file mode 100644
index 0000000..05c3799
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_common.hpp
@@ -0,0 +1,36 @@
+/// @ref ext_matrix_common
+/// @file glm/ext/matrix_common.hpp
+///
+/// @defgroup ext_matrix_common GLM_EXT_matrix_common
+/// @ingroup ext
+///
+/// Defines functions for common matrix operations.
+///
+/// Include <glm/ext/matrix_common.hpp> to use the features of this extension.
+///
+/// @see ext_matrix_common
+
+#pragma once
+
+#include "../detail/qualifier.hpp"
+#include "../detail/_fixes.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_transform extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_common
+ /// @{
+
+ template<length_t C, length_t R, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL mat<C, R, T, Q> mix(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, mat<C, R, U, Q> const& a);
+
+ template<length_t C, length_t R, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL mat<C, R, T, Q> mix(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, U a);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_common.inl"
diff --git a/3rdparty/glm/source/glm/ext/matrix_common.inl b/3rdparty/glm/source/glm/ext/matrix_common.inl
new file mode 100644
index 0000000..9d50848
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_common.inl
@@ -0,0 +1,16 @@
+#include "../matrix.hpp"
+
+namespace glm
+{
+ template<length_t C, length_t R, typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<C, R, T, Q> mix(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, U a)
+ {
+ return mat<C, R, U, Q>(x) * (static_cast<U>(1) - a) + mat<C, R, U, Q>(y) * a;
+ }
+
+ template<length_t C, length_t R, typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<C, R, T, Q> mix(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, mat<C, R, U, Q> const& a)
+ {
+ return matrixCompMult(mat<C, R, U, Q>(x), static_cast<U>(1) - a) + matrixCompMult(mat<C, R, U, Q>(y), a);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double2x2.hpp b/3rdparty/glm/source/glm/ext/matrix_double2x2.hpp
new file mode 100644
index 0000000..94dca54
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double2x2.hpp
@@ -0,0 +1,23 @@
+/// @ref core
+/// @file glm/ext/matrix_double2x2.hpp
+
+#pragma once
+#include "../detail/type_mat2x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 2 columns of 2 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<2, 2, double, defaultp> dmat2x2;
+
+ /// 2 columns of 2 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<2, 2, double, defaultp> dmat2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double2x2_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double2x2_precision.hpp
new file mode 100644
index 0000000..9e2c174
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double2x2_precision.hpp
@@ -0,0 +1,49 @@
+/// @ref core
+/// @file glm/ext/matrix_double2x2_precision.hpp
+
+#pragma once
+#include "../detail/type_mat2x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, double, lowp> lowp_dmat2;
+
+ /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, double, mediump> mediump_dmat2;
+
+ /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, double, highp> highp_dmat2;
+
+ /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, double, lowp> lowp_dmat2x2;
+
+ /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, double, mediump> mediump_dmat2x2;
+
+ /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, double, highp> highp_dmat2x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double2x3.hpp b/3rdparty/glm/source/glm/ext/matrix_double2x3.hpp
new file mode 100644
index 0000000..bfef87a
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double2x3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_double2x3.hpp
+
+#pragma once
+#include "../detail/type_mat2x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 2 columns of 3 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<2, 3, double, defaultp> dmat2x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double2x3_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double2x3_precision.hpp
new file mode 100644
index 0000000..098fb60
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double2x3_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_double2x3_precision.hpp
+
+#pragma once
+#include "../detail/type_mat2x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 2 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 3, double, lowp> lowp_dmat2x3;
+
+ /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 3, double, mediump> mediump_dmat2x3;
+
+ /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 3, double, highp> highp_dmat2x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double2x4.hpp b/3rdparty/glm/source/glm/ext/matrix_double2x4.hpp
new file mode 100644
index 0000000..499284b
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double2x4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_double2x4.hpp
+
+#pragma once
+#include "../detail/type_mat2x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 2 columns of 4 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<2, 4, double, defaultp> dmat2x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double2x4_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double2x4_precision.hpp
new file mode 100644
index 0000000..9b61ebc
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double2x4_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_double2x4_precision.hpp
+
+#pragma once
+#include "../detail/type_mat2x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 2 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 4, double, lowp> lowp_dmat2x4;
+
+ /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 4, double, mediump> mediump_dmat2x4;
+
+ /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 4, double, highp> highp_dmat2x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double3x2.hpp b/3rdparty/glm/source/glm/ext/matrix_double3x2.hpp
new file mode 100644
index 0000000..dd23f36
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double3x2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_double3x2.hpp
+
+#pragma once
+#include "../detail/type_mat3x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 3 columns of 2 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<3, 2, double, defaultp> dmat3x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double3x2_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double3x2_precision.hpp
new file mode 100644
index 0000000..068d9e9
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double3x2_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_double3x2_precision.hpp
+
+#pragma once
+#include "../detail/type_mat3x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 3 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 2, double, lowp> lowp_dmat3x2;
+
+ /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 2, double, mediump> mediump_dmat3x2;
+
+ /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 2, double, highp> highp_dmat3x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double3x3.hpp b/3rdparty/glm/source/glm/ext/matrix_double3x3.hpp
new file mode 100644
index 0000000..53572b7
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double3x3.hpp
@@ -0,0 +1,23 @@
+/// @ref core
+/// @file glm/ext/matrix_double3x3.hpp
+
+#pragma once
+#include "../detail/type_mat3x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 3 columns of 3 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<3, 3, double, defaultp> dmat3x3;
+
+ /// 3 columns of 3 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<3, 3, double, defaultp> dmat3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double3x3_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double3x3_precision.hpp
new file mode 100644
index 0000000..8691e78
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double3x3_precision.hpp
@@ -0,0 +1,49 @@
+/// @ref core
+/// @file glm/ext/matrix_double3x3_precision.hpp
+
+#pragma once
+#include "../detail/type_mat3x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, double, lowp> lowp_dmat3;
+
+ /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, double, mediump> mediump_dmat3;
+
+ /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, double, highp> highp_dmat3;
+
+ /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, double, lowp> lowp_dmat3x3;
+
+ /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, double, mediump> mediump_dmat3x3;
+
+ /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, double, highp> highp_dmat3x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double3x4.hpp b/3rdparty/glm/source/glm/ext/matrix_double3x4.hpp
new file mode 100644
index 0000000..c572d63
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double3x4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_double3x4.hpp
+
+#pragma once
+#include "../detail/type_mat3x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 3 columns of 4 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<3, 4, double, defaultp> dmat3x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double3x4_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double3x4_precision.hpp
new file mode 100644
index 0000000..f040217
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double3x4_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_double3x4_precision.hpp
+
+#pragma once
+#include "../detail/type_mat3x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 3 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 4, double, lowp> lowp_dmat3x4;
+
+ /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 4, double, mediump> mediump_dmat3x4;
+
+ /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 4, double, highp> highp_dmat3x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double4x2.hpp b/3rdparty/glm/source/glm/ext/matrix_double4x2.hpp
new file mode 100644
index 0000000..9b229f4
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double4x2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_double4x2.hpp
+
+#pragma once
+#include "../detail/type_mat4x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 4 columns of 2 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<4, 2, double, defaultp> dmat4x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double4x2_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double4x2_precision.hpp
new file mode 100644
index 0000000..6ad18ba
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double4x2_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_double4x2_precision.hpp
+
+#pragma once
+#include "../detail/type_mat4x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 4 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 2, double, lowp> lowp_dmat4x2;
+
+ /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 2, double, mediump> mediump_dmat4x2;
+
+ /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 2, double, highp> highp_dmat4x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double4x3.hpp b/3rdparty/glm/source/glm/ext/matrix_double4x3.hpp
new file mode 100644
index 0000000..dca4cf9
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double4x3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_double4x3.hpp
+
+#pragma once
+#include "../detail/type_mat4x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 4 columns of 3 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<4, 3, double, defaultp> dmat4x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double4x3_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double4x3_precision.hpp
new file mode 100644
index 0000000..f7371de
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double4x3_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_double4x3_precision.hpp
+
+#pragma once
+#include "../detail/type_mat4x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 4 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 3, double, lowp> lowp_dmat4x3;
+
+ /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 3, double, mediump> mediump_dmat4x3;
+
+ /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 3, double, highp> highp_dmat4x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double4x4.hpp b/3rdparty/glm/source/glm/ext/matrix_double4x4.hpp
new file mode 100644
index 0000000..81e1bf6
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double4x4.hpp
@@ -0,0 +1,23 @@
+/// @ref core
+/// @file glm/ext/matrix_double4x4.hpp
+
+#pragma once
+#include "../detail/type_mat4x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 4 columns of 4 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<4, 4, double, defaultp> dmat4x4;
+
+ /// 4 columns of 4 components matrix of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<4, 4, double, defaultp> dmat4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_double4x4_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_double4x4_precision.hpp
new file mode 100644
index 0000000..4c36a84
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_double4x4_precision.hpp
@@ -0,0 +1,49 @@
+/// @ref core
+/// @file glm/ext/matrix_double4x4_precision.hpp
+
+#pragma once
+#include "../detail/type_mat4x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, double, lowp> lowp_dmat4;
+
+ /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, double, mediump> mediump_dmat4;
+
+ /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, double, highp> highp_dmat4;
+
+ /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, double, lowp> lowp_dmat4x4;
+
+ /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, double, mediump> mediump_dmat4x4;
+
+ /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, double, highp> highp_dmat4x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float2x2.hpp b/3rdparty/glm/source/glm/ext/matrix_float2x2.hpp
new file mode 100644
index 0000000..53df921
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float2x2.hpp
@@ -0,0 +1,23 @@
+/// @ref core
+/// @file glm/ext/matrix_float2x2.hpp
+
+#pragma once
+#include "../detail/type_mat2x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 2 columns of 2 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<2, 2, float, defaultp> mat2x2;
+
+ /// 2 columns of 2 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<2, 2, float, defaultp> mat2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float2x2_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float2x2_precision.hpp
new file mode 100644
index 0000000..898b6db
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float2x2_precision.hpp
@@ -0,0 +1,49 @@
+/// @ref core
+/// @file glm/ext/matrix_float2x2_precision.hpp
+
+#pragma once
+#include "../detail/type_mat2x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, float, lowp> lowp_mat2;
+
+ /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, float, mediump> mediump_mat2;
+
+ /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, float, highp> highp_mat2;
+
+ /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, float, lowp> lowp_mat2x2;
+
+ /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, float, mediump> mediump_mat2x2;
+
+ /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 2, float, highp> highp_mat2x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float2x3.hpp b/3rdparty/glm/source/glm/ext/matrix_float2x3.hpp
new file mode 100644
index 0000000..6f68822
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float2x3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_float2x3.hpp
+
+#pragma once
+#include "../detail/type_mat2x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 2 columns of 3 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<2, 3, float, defaultp> mat2x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float2x3_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float2x3_precision.hpp
new file mode 100644
index 0000000..50c1032
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float2x3_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_float2x3_precision.hpp
+
+#pragma once
+#include "../detail/type_mat2x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 2 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 3, float, lowp> lowp_mat2x3;
+
+ /// 2 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 3, float, mediump> mediump_mat2x3;
+
+ /// 2 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 3, float, highp> highp_mat2x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float2x4.hpp b/3rdparty/glm/source/glm/ext/matrix_float2x4.hpp
new file mode 100644
index 0000000..30f30de
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float2x4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_float2x4.hpp
+
+#pragma once
+#include "../detail/type_mat2x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 2 columns of 4 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<2, 4, float, defaultp> mat2x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float2x4_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float2x4_precision.hpp
new file mode 100644
index 0000000..079d638
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float2x4_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_float2x4_precision.hpp
+
+#pragma once
+#include "../detail/type_mat2x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 2 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 4, float, lowp> lowp_mat2x4;
+
+ /// 2 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 4, float, mediump> mediump_mat2x4;
+
+ /// 2 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<2, 4, float, highp> highp_mat2x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float3x2.hpp b/3rdparty/glm/source/glm/ext/matrix_float3x2.hpp
new file mode 100644
index 0000000..d39dd2f
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float3x2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_float3x2.hpp
+
+#pragma once
+#include "../detail/type_mat3x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core
+ /// @{
+
+ /// 3 columns of 2 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<3, 2, float, defaultp> mat3x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float3x2_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float3x2_precision.hpp
new file mode 100644
index 0000000..8572c2a
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float3x2_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_float3x2_precision.hpp
+
+#pragma once
+#include "../detail/type_mat3x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 3 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 2, float, lowp> lowp_mat3x2;
+
+ /// 3 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 2, float, mediump> mediump_mat3x2;
+
+ /// 3 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 2, float, highp> highp_mat3x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float3x3.hpp b/3rdparty/glm/source/glm/ext/matrix_float3x3.hpp
new file mode 100644
index 0000000..177d809
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float3x3.hpp
@@ -0,0 +1,23 @@
+/// @ref core
+/// @file glm/ext/matrix_float3x3.hpp
+
+#pragma once
+#include "../detail/type_mat3x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 3 columns of 3 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<3, 3, float, defaultp> mat3x3;
+
+ /// 3 columns of 3 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<3, 3, float, defaultp> mat3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float3x3_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float3x3_precision.hpp
new file mode 100644
index 0000000..8a900c1
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float3x3_precision.hpp
@@ -0,0 +1,49 @@
+/// @ref core
+/// @file glm/ext/matrix_float3x3_precision.hpp
+
+#pragma once
+#include "../detail/type_mat3x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, float, lowp> lowp_mat3;
+
+ /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, float, mediump> mediump_mat3;
+
+ /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, float, highp> highp_mat3;
+
+ /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, float, lowp> lowp_mat3x3;
+
+ /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, float, mediump> mediump_mat3x3;
+
+ /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 3, float, highp> highp_mat3x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float3x4.hpp b/3rdparty/glm/source/glm/ext/matrix_float3x4.hpp
new file mode 100644
index 0000000..64b8459
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float3x4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_float3x4.hpp
+
+#pragma once
+#include "../detail/type_mat3x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 3 columns of 4 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<3, 4, float, defaultp> mat3x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float3x4_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float3x4_precision.hpp
new file mode 100644
index 0000000..bc36bf1
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float3x4_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_float3x4_precision.hpp
+
+#pragma once
+#include "../detail/type_mat3x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 3 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 4, float, lowp> lowp_mat3x4;
+
+ /// 3 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 4, float, mediump> mediump_mat3x4;
+
+ /// 3 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<3, 4, float, highp> highp_mat3x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float4x2.hpp b/3rdparty/glm/source/glm/ext/matrix_float4x2.hpp
new file mode 100644
index 0000000..1ed5227
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float4x2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_float4x2.hpp
+
+#pragma once
+#include "../detail/type_mat4x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 4 columns of 2 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<4, 2, float, defaultp> mat4x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float4x2_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float4x2_precision.hpp
new file mode 100644
index 0000000..88fd069
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float4x2_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_float2x2_precision.hpp
+
+#pragma once
+#include "../detail/type_mat2x2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 4 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 2, float, lowp> lowp_mat4x2;
+
+ /// 4 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 2, float, mediump> mediump_mat4x2;
+
+ /// 4 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 2, float, highp> highp_mat4x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float4x3.hpp b/3rdparty/glm/source/glm/ext/matrix_float4x3.hpp
new file mode 100644
index 0000000..5dbe765
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float4x3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/matrix_float4x3.hpp
+
+#pragma once
+#include "../detail/type_mat4x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix
+ /// @{
+
+ /// 4 columns of 3 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<4, 3, float, defaultp> mat4x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float4x3_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float4x3_precision.hpp
new file mode 100644
index 0000000..846ed4f
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float4x3_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/matrix_float4x3_precision.hpp
+
+#pragma once
+#include "../detail/type_mat4x3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 4 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 3, float, lowp> lowp_mat4x3;
+
+ /// 4 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 3, float, mediump> mediump_mat4x3;
+
+ /// 4 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 3, float, highp> highp_mat4x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float4x4.hpp b/3rdparty/glm/source/glm/ext/matrix_float4x4.hpp
new file mode 100644
index 0000000..5ba111d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float4x4.hpp
@@ -0,0 +1,23 @@
+/// @ref core
+/// @file glm/ext/matrix_float4x4.hpp
+
+#pragma once
+#include "../detail/type_mat4x4.hpp"
+
+namespace glm
+{
+ /// @ingroup core_matrix
+ /// @{
+
+ /// 4 columns of 4 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<4, 4, float, defaultp> mat4x4;
+
+ /// 4 columns of 4 components matrix of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ typedef mat<4, 4, float, defaultp> mat4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_float4x4_precision.hpp b/3rdparty/glm/source/glm/ext/matrix_float4x4_precision.hpp
new file mode 100644
index 0000000..597149b
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_float4x4_precision.hpp
@@ -0,0 +1,49 @@
+/// @ref core
+/// @file glm/ext/matrix_float4x4_precision.hpp
+
+#pragma once
+#include "../detail/type_mat4x4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_matrix_precision
+ /// @{
+
+ /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, float, lowp> lowp_mat4;
+
+ /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, float, mediump> mediump_mat4;
+
+ /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, float, highp> highp_mat4;
+
+ /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, float, lowp> lowp_mat4x4;
+
+ /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, float, mediump> mediump_mat4x4;
+
+ /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.6 Matrices</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef mat<4, 4, float, highp> highp_mat4x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int2x2.hpp b/3rdparty/glm/source/glm/ext/matrix_int2x2.hpp
new file mode 100644
index 0000000..c6aa068
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int2x2.hpp
@@ -0,0 +1,38 @@
+/// @ref ext_matrix_int2x2
+/// @file glm/ext/matrix_int2x2.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int2x2 GLM_EXT_matrix_int2x2
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int2x2.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x2.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int2x2 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int2x2
+ /// @{
+
+ /// Signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2
+ typedef mat<2, 2, int, defaultp> imat2x2;
+
+ /// Signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2
+ typedef mat<2, 2, int, defaultp> imat2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int2x2_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int2x2_sized.hpp
new file mode 100644
index 0000000..70c0c21
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int2x2_sized.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_matrix_int2x2_sized
+/// @file glm/ext/matrix_int2x2_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int2x2_sized GLM_EXT_matrix_int2x2_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int2x2_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x2.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int2x2_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int2x2_sized
+ /// @{
+
+ /// 8 bit signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2_sized
+ typedef mat<2, 2, int8, defaultp> i8mat2x2;
+
+ /// 16 bit signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2_sized
+ typedef mat<2, 2, int16, defaultp> i16mat2x2;
+
+ /// 32 bit signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2_sized
+ typedef mat<2, 2, int32, defaultp> i32mat2x2;
+
+ /// 64 bit signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2_sized
+ typedef mat<2, 2, int64, defaultp> i64mat2x2;
+
+
+ /// 8 bit signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2_sized
+ typedef mat<2, 2, int8, defaultp> i8mat2;
+
+ /// 16 bit signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2_sized
+ typedef mat<2, 2, int16, defaultp> i16mat2;
+
+ /// 32 bit signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2_sized
+ typedef mat<2, 2, int32, defaultp> i32mat2;
+
+ /// 64 bit signed integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_int2x2_sized
+ typedef mat<2, 2, int64, defaultp> i64mat2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int2x3.hpp b/3rdparty/glm/source/glm/ext/matrix_int2x3.hpp
new file mode 100644
index 0000000..aee415c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int2x3.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_int2x3
+/// @file glm/ext/matrix_int2x3.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int2x3 GLM_EXT_matrix_int2x3
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int2x3.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x3.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int2x3 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int2x3
+ /// @{
+
+ /// Signed integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_int2x3
+ typedef mat<2, 3, int, defaultp> imat2x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int2x3_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int2x3_sized.hpp
new file mode 100644
index 0000000..b5526fe
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int2x3_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_int2x3_sized
+/// @file glm/ext/matrix_int2x3_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int2x3_sized GLM_EXT_matrix_int2x3_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int2x3_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x3.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int2x3_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int2x3_sized
+ /// @{
+
+ /// 8 bit signed integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_int2x3_sized
+ typedef mat<2, 3, int8, defaultp> i8mat2x3;
+
+ /// 16 bit signed integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_int2x3_sized
+ typedef mat<2, 3, int16, defaultp> i16mat2x3;
+
+ /// 32 bit signed integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_int2x3_sized
+ typedef mat<2, 3, int32, defaultp> i32mat2x3;
+
+ /// 64 bit signed integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_int2x3_sized
+ typedef mat<2, 3, int64, defaultp> i64mat2x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int2x4.hpp b/3rdparty/glm/source/glm/ext/matrix_int2x4.hpp
new file mode 100644
index 0000000..4f36331
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int2x4.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_int2x4
+/// @file glm/ext/matrix_int2x4.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int2x4 GLM_EXT_matrix_int2x4
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int2x4.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int2x4 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int2x4
+ /// @{
+
+ /// Signed integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_int2x4
+ typedef mat<2, 4, int, defaultp> imat2x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int2x4_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int2x4_sized.hpp
new file mode 100644
index 0000000..a66a5e7
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int2x4_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_int2x4_sized
+/// @file glm/ext/matrix_int2x4_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int2x4_sized GLM_EXT_matrix_int2x4_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int2x4_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x4.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int2x4_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int2x4_sized
+ /// @{
+
+ /// 8 bit signed integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_int2x4_sized
+ typedef mat<2, 4, int8, defaultp> i8mat2x4;
+
+ /// 16 bit signed integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_int2x4_sized
+ typedef mat<2, 4, int16, defaultp> i16mat2x4;
+
+ /// 32 bit signed integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_int2x4_sized
+ typedef mat<2, 4, int32, defaultp> i32mat2x4;
+
+ /// 64 bit signed integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_int2x4_sized
+ typedef mat<2, 4, int64, defaultp> i64mat2x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int3x2.hpp b/3rdparty/glm/source/glm/ext/matrix_int3x2.hpp
new file mode 100644
index 0000000..3bd563b
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int3x2.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_int3x2
+/// @file glm/ext/matrix_int3x2.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int3x2 GLM_EXT_matrix_int3x2
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int3x2.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x2.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int3x2 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int3x2
+ /// @{
+
+ /// Signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_int3x2
+ typedef mat<3, 2, int, defaultp> imat3x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int3x2_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int3x2_sized.hpp
new file mode 100644
index 0000000..7e34c52
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int3x2_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_int3x2_sized
+/// @file glm/ext/matrix_int3x2_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int3x2_sized GLM_EXT_matrix_int3x2_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int3x2_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x2.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int3x2_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int3x2_sized
+ /// @{
+
+ /// 8 bit signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_int3x2_sized
+ typedef mat<3, 2, int8, defaultp> i8mat3x2;
+
+ /// 16 bit signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_int3x2_sized
+ typedef mat<3, 2, int16, defaultp> i16mat3x2;
+
+ /// 32 bit signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_int3x2_sized
+ typedef mat<3, 2, int32, defaultp> i32mat3x2;
+
+ /// 64 bit signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_int3x2_sized
+ typedef mat<3, 2, int64, defaultp> i64mat3x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int3x3.hpp b/3rdparty/glm/source/glm/ext/matrix_int3x3.hpp
new file mode 100644
index 0000000..287488d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int3x3.hpp
@@ -0,0 +1,38 @@
+/// @ref ext_matrix_int3x3
+/// @file glm/ext/matrix_int3x3.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int3x3 GLM_EXT_matrix_int3x3
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int3x3.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x3.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int3x3 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int3x3
+ /// @{
+
+ /// Signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3
+ typedef mat<3, 3, int, defaultp> imat3x3;
+
+ /// Signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3
+ typedef mat<3, 3, int, defaultp> imat3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int3x3_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int3x3_sized.hpp
new file mode 100644
index 0000000..577e305
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int3x3_sized.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_matrix_int3x3_sized
+/// @file glm/ext/matrix_int3x3_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int3x3_sized GLM_EXT_matrix_int3x3_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int3x3_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x3.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int3x3_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int3x3_sized
+ /// @{
+
+ /// 8 bit signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3_sized
+ typedef mat<3, 3, int8, defaultp> i8mat3x3;
+
+ /// 16 bit signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3_sized
+ typedef mat<3, 3, int16, defaultp> i16mat3x3;
+
+ /// 32 bit signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3_sized
+ typedef mat<3, 3, int32, defaultp> i32mat3x3;
+
+ /// 64 bit signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3_sized
+ typedef mat<3, 3, int64, defaultp> i64mat3x3;
+
+
+ /// 8 bit signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3_sized
+ typedef mat<3, 3, int8, defaultp> i8mat3;
+
+ /// 16 bit signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3_sized
+ typedef mat<3, 3, int16, defaultp> i16mat3;
+
+ /// 32 bit signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3_sized
+ typedef mat<3, 3, int32, defaultp> i32mat3;
+
+ /// 64 bit signed integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_int3x3_sized
+ typedef mat<3, 3, int64, defaultp> i64mat3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int3x4.hpp b/3rdparty/glm/source/glm/ext/matrix_int3x4.hpp
new file mode 100644
index 0000000..08e534d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int3x4.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_int3x4
+/// @file glm/ext/matrix_int3x4.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int3x4 GLM_EXT_matrix_int3x4
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int3x4.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int3x4 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int3x4
+ /// @{
+
+ /// Signed integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_int3x4
+ typedef mat<3, 4, int, defaultp> imat3x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int3x4_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int3x4_sized.hpp
new file mode 100644
index 0000000..692c48c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int3x4_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_int3x4_sized
+/// @file glm/ext/matrix_int3x2_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int3x4_sized GLM_EXT_matrix_int3x4_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int3x4_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x4.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int3x4_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int3x4_sized
+ /// @{
+
+ /// 8 bit signed integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_int3x4_sized
+ typedef mat<3, 4, int8, defaultp> i8mat3x4;
+
+ /// 16 bit signed integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_int3x4_sized
+ typedef mat<3, 4, int16, defaultp> i16mat3x4;
+
+ /// 32 bit signed integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_int3x4_sized
+ typedef mat<3, 4, int32, defaultp> i32mat3x4;
+
+ /// 64 bit signed integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_int3x4_sized
+ typedef mat<3, 4, int64, defaultp> i64mat3x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int4x2.hpp b/3rdparty/glm/source/glm/ext/matrix_int4x2.hpp
new file mode 100644
index 0000000..f756ef2
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int4x2.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_int4x2
+/// @file glm/ext/matrix_int4x2.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int4x2 GLM_EXT_matrix_int4x2
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int4x2.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x2.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int4x2 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int4x2
+ /// @{
+
+ /// Signed integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_int4x2
+ typedef mat<4, 2, int, defaultp> imat4x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int4x2_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int4x2_sized.hpp
new file mode 100644
index 0000000..63a99d6
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int4x2_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_int4x2_sized
+/// @file glm/ext/matrix_int4x2_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int4x2_sized GLM_EXT_matrix_int4x2_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int4x2_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x2.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int4x2_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int4x2_sized
+ /// @{
+
+ /// 8 bit signed integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_int4x2_sized
+ typedef mat<4, 2, int8, defaultp> i8mat4x2;
+
+ /// 16 bit signed integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_int4x2_sized
+ typedef mat<4, 2, int16, defaultp> i16mat4x2;
+
+ /// 32 bit signed integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_int4x2_sized
+ typedef mat<4, 2, int32, defaultp> i32mat4x2;
+
+ /// 64 bit signed integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_int4x2_sized
+ typedef mat<4, 2, int64, defaultp> i64mat4x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int4x3.hpp b/3rdparty/glm/source/glm/ext/matrix_int4x3.hpp
new file mode 100644
index 0000000..d5d97a7
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int4x3.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_int4x3
+/// @file glm/ext/matrix_int4x3.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int4x3 GLM_EXT_matrix_int4x3
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int4x3.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x3.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int4x3 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int4x3
+ /// @{
+
+ /// Signed integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_int4x3
+ typedef mat<4, 3, int, defaultp> imat4x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int4x3_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int4x3_sized.hpp
new file mode 100644
index 0000000..55078fa
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int4x3_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_int4x3_sized
+/// @file glm/ext/matrix_int4x3_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int4x3_sized GLM_EXT_matrix_int4x3_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int4x3_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x3.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int4x3_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int4x3_sized
+ /// @{
+
+ /// 8 bit signed integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_int4x3_sized
+ typedef mat<4, 3, int8, defaultp> i8mat4x3;
+
+ /// 16 bit signed integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_int4x3_sized
+ typedef mat<4, 3, int16, defaultp> i16mat4x3;
+
+ /// 32 bit signed integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_int4x3_sized
+ typedef mat<4, 3, int32, defaultp> i32mat4x3;
+
+ /// 64 bit signed integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_int4x3_sized
+ typedef mat<4, 3, int64, defaultp> i64mat4x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int4x4.hpp b/3rdparty/glm/source/glm/ext/matrix_int4x4.hpp
new file mode 100644
index 0000000..e17cff1
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int4x4.hpp
@@ -0,0 +1,38 @@
+/// @ref ext_matrix_int4x4
+/// @file glm/ext/matrix_int4x4.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int4x4 GLM_EXT_matrix_int4x4
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int4x4.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int4x4 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int4x4
+ /// @{
+
+ /// Signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4
+ typedef mat<4, 4, int, defaultp> imat4x4;
+
+ /// Signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4
+ typedef mat<4, 4, int, defaultp> imat4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_int4x4_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_int4x4_sized.hpp
new file mode 100644
index 0000000..4a11203
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_int4x4_sized.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_matrix_int4x4_sized
+/// @file glm/ext/matrix_int4x4_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int4x4_sized GLM_EXT_matrix_int4x4_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_int4x4_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x4.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_int4x4_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_int4x4_sized
+ /// @{
+
+ /// 8 bit signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4_sized
+ typedef mat<4, 4, int8, defaultp> i8mat4x4;
+
+ /// 16 bit signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4_sized
+ typedef mat<4, 4, int16, defaultp> i16mat4x4;
+
+ /// 32 bit signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4_sized
+ typedef mat<4, 4, int32, defaultp> i32mat4x4;
+
+ /// 64 bit signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4_sized
+ typedef mat<4, 4, int64, defaultp> i64mat4x4;
+
+
+ /// 8 bit signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4_sized
+ typedef mat<4, 4, int8, defaultp> i8mat4;
+
+ /// 16 bit signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4_sized
+ typedef mat<4, 4, int16, defaultp> i16mat4;
+
+ /// 32 bit signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4_sized
+ typedef mat<4, 4, int32, defaultp> i32mat4;
+
+ /// 64 bit signed integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_int4x4_sized
+ typedef mat<4, 4, int64, defaultp> i64mat4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_integer.hpp b/3rdparty/glm/source/glm/ext/matrix_integer.hpp
new file mode 100644
index 0000000..7d7dfc5
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_integer.hpp
@@ -0,0 +1,91 @@
+/// @ref ext_matrix_integer
+/// @file glm/ext/matrix_integer.hpp
+///
+/// @defgroup ext_matrix_integer GLM_EXT_matrix_integer
+/// @ingroup ext
+///
+/// Defines functions that generate common transformation matrices.
+///
+/// The matrices generated by this extension use standard OpenGL fixed-function
+/// conventions. For example, the lookAt function generates a transform from world
+/// space into the specific eye space that the projective matrix functions
+/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility
+/// specifications defines the particular layout of this eye space.
+///
+/// Include <glm/ext/matrix_integer.hpp> to use the features of this extension.
+///
+/// @see ext_matrix_projection
+/// @see ext_matrix_clip_space
+
+#pragma once
+
+// Dependencies
+#include "../gtc/constants.hpp"
+#include "../geometric.hpp"
+#include "../trigonometric.hpp"
+#include "../matrix.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_integer extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_integer
+ /// @{
+
+ /// Multiply matrix x by matrix y component-wise, i.e.,
+ /// result[i][j] is the scalar product of x[i][j] and y[i][j].
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point or signed integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/matrixCompMult.xml">GLSL matrixCompMult man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL mat<C, R, T, Q> matrixCompMult(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y);
+
+ /// Treats the first parameter c as a column vector
+ /// and the second parameter r as a row vector
+ /// and does a linear algebraic matrix multiply c * r.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point or signed integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/outerProduct.xml">GLSL outerProduct man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL typename detail::outerProduct_trait<C, R, T, Q>::type outerProduct(vec<C, T, Q> const& c, vec<R, T, Q> const& r);
+
+ /// Returns the transposed matrix of x
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point or signed integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/transpose.xml">GLSL transpose man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<C, R, T, Q>::transpose_type transpose(mat<C, R, T, Q> const& x);
+
+ /// Return the determinant of a squared matrix.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point or signed integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/determinant.xml">GLSL determinant man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL T determinant(mat<C, R, T, Q> const& m);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_integer.inl"
diff --git a/3rdparty/glm/source/glm/ext/matrix_integer.inl b/3rdparty/glm/source/glm/ext/matrix_integer.inl
new file mode 100644
index 0000000..8b377ce
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_integer.inl
@@ -0,0 +1,38 @@
+namespace glm{
+namespace detail
+{
+ template<length_t C, length_t R, typename T, qualifier Q, bool Aligned>
+ struct compute_matrixCompMult_type<C, R, T, Q, false, Aligned> {
+ GLM_FUNC_QUALIFIER static mat<C, R, T, Q> call(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y)
+ {
+ return detail::compute_matrixCompMult<C, R, T, Q, detail::is_aligned<Q>::value>::call(x, y);
+ }
+ };
+
+ template<length_t DA, length_t DB, typename T, qualifier Q>
+ struct compute_outerProduct_type<DA, DB, T, Q, false> {
+ GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait<DA, DB, T, Q>::type call(vec<DA, T, Q> const& c, vec<DB, T, Q> const& r)
+ {
+ return detail::compute_outerProduct<DA, DB, T, Q>::call(c, r);
+ }
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q, bool Aligned>
+ struct compute_transpose_type<C, R, T, Q, false, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static mat<R, C, T, Q> call(mat<C, R, T, Q> const& m)
+ {
+ return detail::compute_transpose<C, R, T, Q, detail::is_aligned<Q>::value>::call(m);
+ }
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q, bool Aligned>
+ struct compute_determinant_type<C, R, T, Q, false, Aligned>{
+
+ GLM_FUNC_QUALIFIER static T call(mat<C, R, T, Q> const& m)
+ {
+ return detail::compute_determinant<C, R, T, Q, detail::is_aligned<Q>::value>::call(m);
+ }
+ };
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_projection.hpp b/3rdparty/glm/source/glm/ext/matrix_projection.hpp
new file mode 100644
index 0000000..51fd01b
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_projection.hpp
@@ -0,0 +1,149 @@
+/// @ref ext_matrix_projection
+/// @file glm/ext/matrix_projection.hpp
+///
+/// @defgroup ext_matrix_projection GLM_EXT_matrix_projection
+/// @ingroup ext
+///
+/// Functions that generate common projection transformation matrices.
+///
+/// The matrices generated by this extension use standard OpenGL fixed-function
+/// conventions. For example, the lookAt function generates a transform from world
+/// space into the specific eye space that the projective matrix functions
+/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility
+/// specifications defines the particular layout of this eye space.
+///
+/// Include <glm/ext/matrix_projection.hpp> to use the features of this extension.
+///
+/// @see ext_matrix_transform
+/// @see ext_matrix_clip_space
+
+#pragma once
+
+// Dependencies
+#include "../gtc/constants.hpp"
+#include "../geometric.hpp"
+#include "../trigonometric.hpp"
+#include "../matrix.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_projection extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_projection
+ /// @{
+
+ /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @param obj Specify the object coordinates.
+ /// @param model Specifies the current modelview matrix
+ /// @param proj Specifies the current projection matrix
+ /// @param viewport Specifies the current viewport
+ /// @return Return the computed window coordinates.
+ /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double.
+ /// @tparam U Currently supported: Floating-point types and integer types.
+ ///
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluProject.xml">gluProject man page</a>
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> projectZO(
+ vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport);
+
+ /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param obj Specify the object coordinates.
+ /// @param model Specifies the current modelview matrix
+ /// @param proj Specifies the current projection matrix
+ /// @param viewport Specifies the current viewport
+ /// @return Return the computed window coordinates.
+ /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double.
+ /// @tparam U Currently supported: Floating-point types and integer types.
+ ///
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluProject.xml">gluProject man page</a>
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> projectNO(
+ vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport);
+
+ /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates using default near and far clip planes definition.
+ /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE.
+ ///
+ /// @param obj Specify the object coordinates.
+ /// @param model Specifies the current modelview matrix
+ /// @param proj Specifies the current projection matrix
+ /// @param viewport Specifies the current viewport
+ /// @return Return the computed window coordinates.
+ /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double.
+ /// @tparam U Currently supported: Floating-point types and integer types.
+ ///
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluProject.xml">gluProject man page</a>
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> project(
+ vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport);
+
+ /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition)
+ ///
+ /// @param win Specify the window coordinates to be mapped.
+ /// @param model Specifies the modelview matrix
+ /// @param proj Specifies the projection matrix
+ /// @param viewport Specifies the viewport
+ /// @return Returns the computed object coordinates.
+ /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double.
+ /// @tparam U Currently supported: Floating-point types and integer types.
+ ///
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluUnProject.xml">gluUnProject man page</a>
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> unProjectZO(
+ vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport);
+
+ /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates.
+ /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition)
+ ///
+ /// @param win Specify the window coordinates to be mapped.
+ /// @param model Specifies the modelview matrix
+ /// @param proj Specifies the projection matrix
+ /// @param viewport Specifies the viewport
+ /// @return Returns the computed object coordinates.
+ /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double.
+ /// @tparam U Currently supported: Floating-point types and integer types.
+ ///
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluUnProject.xml">gluUnProject man page</a>
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> unProjectNO(
+ vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport);
+
+ /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates using default near and far clip planes definition.
+ /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE.
+ ///
+ /// @param win Specify the window coordinates to be mapped.
+ /// @param model Specifies the modelview matrix
+ /// @param proj Specifies the projection matrix
+ /// @param viewport Specifies the viewport
+ /// @return Returns the computed object coordinates.
+ /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double.
+ /// @tparam U Currently supported: Floating-point types and integer types.
+ ///
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluUnProject.xml">gluUnProject man page</a>
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> unProject(
+ vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport);
+
+ /// Define a picking region
+ ///
+ /// @param center Specify the center of a picking region in window coordinates.
+ /// @param delta Specify the width and height, respectively, of the picking region in window coordinates.
+ /// @param viewport Rendering viewport
+ /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double.
+ /// @tparam U Currently supported: Floating-point types and integer types.
+ ///
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluPickMatrix.xml">gluPickMatrix man page</a>
+ template<typename T, qualifier Q, typename U>
+ GLM_FUNC_DECL mat<4, 4, T, Q> pickMatrix(
+ vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_projection.inl"
diff --git a/3rdparty/glm/source/glm/ext/matrix_projection.inl b/3rdparty/glm/source/glm/ext/matrix_projection.inl
new file mode 100644
index 0000000..2f2c196
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_projection.inl
@@ -0,0 +1,106 @@
+namespace glm
+{
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> projectZO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
+ {
+ vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast<T>(1));
+ tmp = model * tmp;
+ tmp = proj * tmp;
+
+ tmp /= tmp.w;
+ tmp.x = tmp.x * static_cast<T>(0.5) + static_cast<T>(0.5);
+ tmp.y = tmp.y * static_cast<T>(0.5) + static_cast<T>(0.5);
+
+ tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]);
+ tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]);
+
+ return vec<3, T, Q>(tmp);
+ }
+
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> projectNO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
+ {
+ vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast<T>(1));
+ tmp = model * tmp;
+ tmp = proj * tmp;
+
+ tmp /= tmp.w;
+ tmp = tmp * static_cast<T>(0.5) + static_cast<T>(0.5);
+ tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]);
+ tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]);
+
+ return vec<3, T, Q>(tmp);
+ }
+
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> project(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return projectZO(obj, model, proj, viewport);
+# else
+ return projectNO(obj, model, proj, viewport);
+# endif
+ }
+
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectZO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
+ {
+ mat<4, 4, T, Q> Inverse = inverse(proj * model);
+
+ vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1));
+ tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]);
+ tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]);
+ tmp.x = tmp.x * static_cast<T>(2) - static_cast<T>(1);
+ tmp.y = tmp.y * static_cast<T>(2) - static_cast<T>(1);
+
+ vec<4, T, Q> obj = Inverse * tmp;
+ obj /= obj.w;
+
+ return vec<3, T, Q>(obj);
+ }
+
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectNO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
+ {
+ mat<4, 4, T, Q> Inverse = inverse(proj * model);
+
+ vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1));
+ tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]);
+ tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]);
+ tmp = tmp * static_cast<T>(2) - static_cast<T>(1);
+
+ vec<4, T, Q> obj = Inverse * tmp;
+ obj /= obj.w;
+
+ return vec<3, T, Q>(obj);
+ }
+
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> unProject(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT
+ return unProjectZO(win, model, proj, viewport);
+# else
+ return unProjectNO(win, model, proj, viewport);
+# endif
+ }
+
+ template<typename T, qualifier Q, typename U>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> pickMatrix(vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport)
+ {
+ assert(delta.x > static_cast<T>(0) && delta.y > static_cast<T>(0));
+ mat<4, 4, T, Q> Result(static_cast<T>(1));
+
+ if(!(delta.x > static_cast<T>(0) && delta.y > static_cast<T>(0)))
+ return Result; // Error
+
+ vec<3, T, Q> Temp(
+ (static_cast<T>(viewport[2]) - static_cast<T>(2) * (center.x - static_cast<T>(viewport[0]))) / delta.x,
+ (static_cast<T>(viewport[3]) - static_cast<T>(2) * (center.y - static_cast<T>(viewport[1]))) / delta.y,
+ static_cast<T>(0));
+
+ // Translate and scale the picked region to the entire window
+ Result = translate(Result, Temp);
+ return scale(Result, vec<3, T, Q>(static_cast<T>(viewport[2]) / delta.x, static_cast<T>(viewport[3]) / delta.y, static_cast<T>(1)));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_relational.hpp b/3rdparty/glm/source/glm/ext/matrix_relational.hpp
new file mode 100644
index 0000000..20023ad
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_relational.hpp
@@ -0,0 +1,132 @@
+/// @ref ext_matrix_relational
+/// @file glm/ext/matrix_relational.hpp
+///
+/// @defgroup ext_matrix_relational GLM_EXT_matrix_relational
+/// @ingroup ext
+///
+/// Exposes comparison functions for matrix types that take a user defined epsilon values.
+///
+/// Include <glm/ext/matrix_relational.hpp> to use the features of this extension.
+///
+/// @see ext_vector_relational
+/// @see ext_scalar_relational
+/// @see ext_quaternion_relational
+
+#pragma once
+
+// Dependencies
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_relational extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_relational
+ /// @{
+
+ /// Perform a component-wise equal-to comparison of two matrices.
+ /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y);
+
+ /// Perform a component-wise not-equal-to comparison of two matrices.
+ /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y);
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, T epsilon);
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, vec<C, T, Q> const& epsilon);
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, T epsilon);
+
+ /// Returns the component-wise comparison of |x - y| >= epsilon.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, vec<C, T, Q> const& epsilon);
+
+ /// Returns the component-wise comparison between two vectors in term of ULPs.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, int ULPs);
+
+ /// Returns the component-wise comparison between two vectors in term of ULPs.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, vec<C, int, Q> const& ULPs);
+
+ /// Returns the component-wise comparison between two vectors in term of ULPs.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, int ULPs);
+
+ /// Returns the component-wise comparison between two vectors in term of ULPs.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix
+ /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y, vec<C, int, Q> const& ULPs);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_relational.inl"
diff --git a/3rdparty/glm/source/glm/ext/matrix_relational.inl b/3rdparty/glm/source/glm/ext/matrix_relational.inl
new file mode 100644
index 0000000..9cd42b7
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_relational.inl
@@ -0,0 +1,88 @@
+/// @ref ext_vector_relational
+/// @file glm/ext/vector_relational.inl
+
+// Dependency:
+#include "../ext/vector_relational.hpp"
+#include "../common.hpp"
+
+namespace glm
+{
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b)
+ {
+ vec<C, bool, Q> Result(true);
+ for(length_t i = 0; i < C; ++i)
+ Result[i] = all(equal(a[i], b[i]));
+ return Result;
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b, T Epsilon)
+ {
+ return equal(a, b, vec<C, T, Q>(Epsilon));
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b, vec<C, T, Q> const& Epsilon)
+ {
+ vec<C, bool, Q> Result(true);
+ for(length_t i = 0; i < C; ++i)
+ Result[i] = all(equal(a[i], b[i], Epsilon[i]));
+ return Result;
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b)
+ {
+ vec<C, bool, Q> Result(true);
+ for(length_t i = 0; i < C; ++i)
+ Result[i] = any(notEqual(a[i], b[i]));
+ return Result;
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b, T Epsilon)
+ {
+ return notEqual(a, b, vec<C, T, Q>(Epsilon));
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b, vec<C, T, Q> const& Epsilon)
+ {
+ vec<C, bool, Q> Result(true);
+ for(length_t i = 0; i < C; ++i)
+ Result[i] = any(notEqual(a[i], b[i], Epsilon[i]));
+ return Result;
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b, int MaxULPs)
+ {
+ return equal(a, b, vec<C, int, Q>(MaxULPs));
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> equal(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b, vec<C, int, Q> const& MaxULPs)
+ {
+ vec<C, bool, Q> Result(true);
+ for(length_t i = 0; i < C; ++i)
+ Result[i] = all(equal(a[i], b[i], MaxULPs[i]));
+ return Result;
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b, int MaxULPs)
+ {
+ return notEqual(a, b, vec<C, int, Q>(MaxULPs));
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<C, bool, Q> notEqual(mat<C, R, T, Q> const& a, mat<C, R, T, Q> const& b, vec<C, int, Q> const& MaxULPs)
+ {
+ vec<C, bool, Q> Result(true);
+ for(length_t i = 0; i < C; ++i)
+ Result[i] = any(notEqual(a[i], b[i], MaxULPs[i]));
+ return Result;
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_transform.hpp b/3rdparty/glm/source/glm/ext/matrix_transform.hpp
new file mode 100644
index 0000000..cbd187e
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_transform.hpp
@@ -0,0 +1,144 @@
+/// @ref ext_matrix_transform
+/// @file glm/ext/matrix_transform.hpp
+///
+/// @defgroup ext_matrix_transform GLM_EXT_matrix_transform
+/// @ingroup ext
+///
+/// Defines functions that generate common transformation matrices.
+///
+/// The matrices generated by this extension use standard OpenGL fixed-function
+/// conventions. For example, the lookAt function generates a transform from world
+/// space into the specific eye space that the projective matrix functions
+/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility
+/// specifications defines the particular layout of this eye space.
+///
+/// Include <glm/ext/matrix_transform.hpp> to use the features of this extension.
+///
+/// @see ext_matrix_projection
+/// @see ext_matrix_clip_space
+
+#pragma once
+
+// Dependencies
+#include "../gtc/constants.hpp"
+#include "../geometric.hpp"
+#include "../trigonometric.hpp"
+#include "../matrix.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_transform extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_transform
+ /// @{
+
+ /// Builds an identity matrix.
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType identity();
+
+ /// Builds a translation 4 * 4 matrix created from a vector of 3 components.
+ ///
+ /// @param m Input matrix multiplied by this translation matrix.
+ /// @param v Coordinates of a translation vector.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ ///
+ /// @code
+ /// #include <glm/glm.hpp>
+ /// #include <glm/gtc/matrix_transform.hpp>
+ /// ...
+ /// glm::mat4 m = glm::translate(glm::mat4(1.0f), glm::vec3(1.0f));
+ /// // m[0][0] == 1.0f, m[0][1] == 0.0f, m[0][2] == 0.0f, m[0][3] == 0.0f
+ /// // m[1][0] == 0.0f, m[1][1] == 1.0f, m[1][2] == 0.0f, m[1][3] == 0.0f
+ /// // m[2][0] == 0.0f, m[2][1] == 0.0f, m[2][2] == 1.0f, m[2][3] == 0.0f
+ /// // m[3][0] == 1.0f, m[3][1] == 1.0f, m[3][2] == 1.0f, m[3][3] == 1.0f
+ /// @endcode
+ ///
+ /// @see - translate(mat<4, 4, T, Q> const& m, T x, T y, T z)
+ /// @see - translate(vec<3, T, Q> const& v)
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/glTranslate.xml">glTranslate man page</a>
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> translate(
+ mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v);
+
+ /// Builds a rotation 4 * 4 matrix created from an axis vector and an angle.
+ ///
+ /// @param m Input matrix multiplied by this rotation matrix.
+ /// @param angle Rotation angle expressed in radians.
+ /// @param axis Rotation axis, recommended to be normalized.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ ///
+ /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z)
+ /// @see - rotate(T angle, vec<3, T, Q> const& v)
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/glRotate.xml">glRotate man page</a>
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> rotate(
+ mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& axis);
+
+ /// Builds a scale 4 * 4 matrix created from 3 scalars.
+ ///
+ /// @param m Input matrix multiplied by this scale matrix.
+ /// @param v Ratio of scaling for each axis.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ ///
+ /// @see - scale(mat<4, 4, T, Q> const& m, T x, T y, T z)
+ /// @see - scale(vec<3, T, Q> const& v)
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/glScale.xml">glScale man page</a>
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> scale(
+ mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v);
+
+ /// Build a right handed look at view matrix.
+ ///
+ /// @param eye Position of the camera
+ /// @param center Position where the camera is looking at
+ /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1)
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ ///
+ /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal)
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> lookAtRH(
+ vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up);
+
+ /// Build a left handed look at view matrix.
+ ///
+ /// @param eye Position of the camera
+ /// @param center Position where the camera is looking at
+ /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1)
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ ///
+ /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal)
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> lookAtLH(
+ vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up);
+
+ /// Build a look at view matrix based on the default handedness.
+ ///
+ /// @param eye Position of the camera
+ /// @param center Position where the camera is looking at
+ /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1)
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ ///
+ /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal)
+ /// @see <a href="https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluLookAt.xml">gluLookAt man page</a>
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> lookAt(
+ vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_transform.inl"
diff --git a/3rdparty/glm/source/glm/ext/matrix_transform.inl b/3rdparty/glm/source/glm/ext/matrix_transform.inl
new file mode 100644
index 0000000..01cefab
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_transform.inl
@@ -0,0 +1,153 @@
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType identity()
+ {
+ return detail::init_gentype<genType, detail::genTypeTrait<genType>::GENTYPE>::identity();
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v)
+ {
+ mat<4, 4, T, Q> Result(m);
+ Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v)
+ {
+ T const a = angle;
+ T const c = cos(a);
+ T const s = sin(a);
+
+ vec<3, T, Q> axis(normalize(v));
+ vec<3, T, Q> temp((T(1) - c) * axis);
+
+ mat<4, 4, T, Q> Rotate;
+ Rotate[0][0] = c + temp[0] * axis[0];
+ Rotate[0][1] = temp[0] * axis[1] + s * axis[2];
+ Rotate[0][2] = temp[0] * axis[2] - s * axis[1];
+
+ Rotate[1][0] = temp[1] * axis[0] - s * axis[2];
+ Rotate[1][1] = c + temp[1] * axis[1];
+ Rotate[1][2] = temp[1] * axis[2] + s * axis[0];
+
+ Rotate[2][0] = temp[2] * axis[0] + s * axis[1];
+ Rotate[2][1] = temp[2] * axis[1] - s * axis[0];
+ Rotate[2][2] = c + temp[2] * axis[2];
+
+ mat<4, 4, T, Q> Result;
+ Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2];
+ Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2];
+ Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2];
+ Result[3] = m[3];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate_slow(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v)
+ {
+ T const a = angle;
+ T const c = cos(a);
+ T const s = sin(a);
+ mat<4, 4, T, Q> Result;
+
+ vec<3, T, Q> axis = normalize(v);
+
+ Result[0][0] = c + (static_cast<T>(1) - c) * axis.x * axis.x;
+ Result[0][1] = (static_cast<T>(1) - c) * axis.x * axis.y + s * axis.z;
+ Result[0][2] = (static_cast<T>(1) - c) * axis.x * axis.z - s * axis.y;
+ Result[0][3] = static_cast<T>(0);
+
+ Result[1][0] = (static_cast<T>(1) - c) * axis.y * axis.x - s * axis.z;
+ Result[1][1] = c + (static_cast<T>(1) - c) * axis.y * axis.y;
+ Result[1][2] = (static_cast<T>(1) - c) * axis.y * axis.z + s * axis.x;
+ Result[1][3] = static_cast<T>(0);
+
+ Result[2][0] = (static_cast<T>(1) - c) * axis.z * axis.x + s * axis.y;
+ Result[2][1] = (static_cast<T>(1) - c) * axis.z * axis.y - s * axis.x;
+ Result[2][2] = c + (static_cast<T>(1) - c) * axis.z * axis.z;
+ Result[2][3] = static_cast<T>(0);
+
+ Result[3] = vec<4, T, Q>(0, 0, 0, 1);
+ return m * Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v)
+ {
+ mat<4, 4, T, Q> Result;
+ Result[0] = m[0] * v[0];
+ Result[1] = m[1] * v[1];
+ Result[2] = m[2] * v[2];
+ Result[3] = m[3];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale_slow(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v)
+ {
+ mat<4, 4, T, Q> Result(T(1));
+ Result[0][0] = v.x;
+ Result[1][1] = v.y;
+ Result[2][2] = v.z;
+ return m * Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtRH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up)
+ {
+ vec<3, T, Q> const f(normalize(center - eye));
+ vec<3, T, Q> const s(normalize(cross(f, up)));
+ vec<3, T, Q> const u(cross(s, f));
+
+ mat<4, 4, T, Q> Result(1);
+ Result[0][0] = s.x;
+ Result[1][0] = s.y;
+ Result[2][0] = s.z;
+ Result[0][1] = u.x;
+ Result[1][1] = u.y;
+ Result[2][1] = u.z;
+ Result[0][2] =-f.x;
+ Result[1][2] =-f.y;
+ Result[2][2] =-f.z;
+ Result[3][0] =-dot(s, eye);
+ Result[3][1] =-dot(u, eye);
+ Result[3][2] = dot(f, eye);
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtLH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up)
+ {
+ vec<3, T, Q> const f(normalize(center - eye));
+ vec<3, T, Q> const s(normalize(cross(up, f)));
+ vec<3, T, Q> const u(cross(f, s));
+
+ mat<4, 4, T, Q> Result(1);
+ Result[0][0] = s.x;
+ Result[1][0] = s.y;
+ Result[2][0] = s.z;
+ Result[0][1] = u.x;
+ Result[1][1] = u.y;
+ Result[2][1] = u.z;
+ Result[0][2] = f.x;
+ Result[1][2] = f.y;
+ Result[2][2] = f.z;
+ Result[3][0] = -dot(s, eye);
+ Result[3][1] = -dot(u, eye);
+ Result[3][2] = -dot(f, eye);
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAt(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up)
+ {
+# if (GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT)
+ return lookAtLH(eye, center, up);
+# else
+ return lookAtRH(eye, center, up);
+# endif
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint2x2.hpp b/3rdparty/glm/source/glm/ext/matrix_uint2x2.hpp
new file mode 100644
index 0000000..034771a
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint2x2.hpp
@@ -0,0 +1,38 @@
+/// @ref ext_matrix_uint2x2
+/// @file glm/ext/matrix_uint2x2.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint2x2 GLM_EXT_matrix_uint2x2
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint2x2.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x2.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint2x2 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint2x2
+ /// @{
+
+ /// Unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2
+ typedef mat<2, 2, uint, defaultp> umat2x2;
+
+ /// Unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2
+ typedef mat<2, 2, uint, defaultp> umat2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint2x2_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint2x2_sized.hpp
new file mode 100644
index 0000000..4555324
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint2x2_sized.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_matrix_uint2x2_sized
+/// @file glm/ext/matrix_uint2x2_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint2x2_sized GLM_EXT_matrix_uint2x2_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint2x2_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x2.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint2x2_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint2x2_sized
+ /// @{
+
+ /// 8 bit unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2_sized
+ typedef mat<2, 2, uint8, defaultp> u8mat2x2;
+
+ /// 16 bit unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2_sized
+ typedef mat<2, 2, uint16, defaultp> u16mat2x2;
+
+ /// 32 bit unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2_sized
+ typedef mat<2, 2, uint32, defaultp> u32mat2x2;
+
+ /// 64 bit unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2_sized
+ typedef mat<2, 2, uint64, defaultp> u64mat2x2;
+
+
+ /// 8 bit unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2_sized
+ typedef mat<2, 2, uint8, defaultp> u8mat2;
+
+ /// 16 bit unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2_sized
+ typedef mat<2, 2, uint16, defaultp> u16mat2;
+
+ /// 32 bit unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2_sized
+ typedef mat<2, 2, uint32, defaultp> u32mat2;
+
+ /// 64 bit unsigned integer 2x2 matrix.
+ ///
+ /// @see ext_matrix_uint2x2_sized
+ typedef mat<2, 2, uint64, defaultp> u64mat2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint2x3.hpp b/3rdparty/glm/source/glm/ext/matrix_uint2x3.hpp
new file mode 100644
index 0000000..7de62f6
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint2x3.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_uint2x3
+/// @file glm/ext/matrix_uint2x3.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int2x3 GLM_EXT_matrix_uint2x3
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint2x3.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x3.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint2x3 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint2x3
+ /// @{
+
+ /// Unsigned integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_uint2x3
+ typedef mat<2, 3, uint, defaultp> umat2x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint2x3_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint2x3_sized.hpp
new file mode 100644
index 0000000..db7939c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint2x3_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_uint2x3_sized
+/// @file glm/ext/matrix_uint2x3_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint2x3_sized GLM_EXT_matrix_uint2x3_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint2x3_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x3.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint2x3_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint2x3_sized
+ /// @{
+
+ /// 8 bit unsigned integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_uint2x3_sized
+ typedef mat<2, 3, uint8, defaultp> u8mat2x3;
+
+ /// 16 bit unsigned integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_uint2x3_sized
+ typedef mat<2, 3, uint16, defaultp> u16mat2x3;
+
+ /// 32 bit unsigned integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_uint2x3_sized
+ typedef mat<2, 3, uint32, defaultp> u32mat2x3;
+
+ /// 64 bit unsigned integer 2x3 matrix.
+ ///
+ /// @see ext_matrix_uint2x3_sized
+ typedef mat<2, 3, uint64, defaultp> u64mat2x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint2x4.hpp b/3rdparty/glm/source/glm/ext/matrix_uint2x4.hpp
new file mode 100644
index 0000000..0f99350
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint2x4.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_uint2x4
+/// @file glm/ext/matrix_uint2x4.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint2x4 GLM_EXT_matrix_int2x4
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint2x4.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint2x4 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint2x4
+ /// @{
+
+ /// Unsigned integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_uint2x4
+ typedef mat<2, 4, uint, defaultp> umat2x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint2x4_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint2x4_sized.hpp
new file mode 100644
index 0000000..5cb8e54
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint2x4_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_uint2x4_sized
+/// @file glm/ext/matrixu_uint2x4_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint2x4_sized GLM_EXT_matrix_uint2x4_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint2x4_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x4.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint2x4_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint2x4_sized
+ /// @{
+
+ /// 8 bit unsigned integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_uint2x4_sized
+ typedef mat<2, 4, uint8, defaultp> u8mat2x4;
+
+ /// 16 bit unsigned integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_uint2x4_sized
+ typedef mat<2, 4, uint16, defaultp> u16mat2x4;
+
+ /// 32 bit unsigned integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_uint2x4_sized
+ typedef mat<2, 4, uint32, defaultp> u32mat2x4;
+
+ /// 64 bit unsigned integer 2x4 matrix.
+ ///
+ /// @see ext_matrix_uint2x4_sized
+ typedef mat<2, 4, uint64, defaultp> u64mat2x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint3x2.hpp b/3rdparty/glm/source/glm/ext/matrix_uint3x2.hpp
new file mode 100644
index 0000000..47f4873
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint3x2.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_uint3x2
+/// @file glm/ext/matrix_uint3x2.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_int3x2 GLM_EXT_matrix_uint3x2
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint3x2.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x2.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint3x2 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint3x2
+ /// @{
+
+ /// Unsigned integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_uint3x2
+ typedef mat<3, 2, uint, defaultp> umat3x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint3x2_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint3x2_sized.hpp
new file mode 100644
index 0000000..c81af8f
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint3x2_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_uint3x2_sized
+/// @file glm/ext/matrix_uint3x2_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint3x2_sized GLM_EXT_matrix_uint3x2_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint3x2_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x2.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint3x2_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint3x2_sized
+ /// @{
+
+ /// 8 bit signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_uint3x2_sized
+ typedef mat<3, 2, uint8, defaultp> u8mat3x2;
+
+ /// 16 bit signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_uint3x2_sized
+ typedef mat<3, 2, uint16, defaultp> u16mat3x2;
+
+ /// 32 bit signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_uint3x2_sized
+ typedef mat<3, 2, uint32, defaultp> u32mat3x2;
+
+ /// 64 bit signed integer 3x2 matrix.
+ ///
+ /// @see ext_matrix_uint3x2_sized
+ typedef mat<3, 2, uint64, defaultp> u64mat3x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint3x3.hpp b/3rdparty/glm/source/glm/ext/matrix_uint3x3.hpp
new file mode 100644
index 0000000..1004c0d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint3x3.hpp
@@ -0,0 +1,38 @@
+/// @ref ext_matrix_uint3x3
+/// @file glm/ext/matrix_uint3x3.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint3x3 GLM_EXT_matrix_uint3x3
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint3x3.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x3.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint3x3 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint3x3
+ /// @{
+
+ /// Unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3
+ typedef mat<3, 3, uint, defaultp> umat3x3;
+
+ /// Unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3
+ typedef mat<3, 3, uint, defaultp> umat3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint3x3_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint3x3_sized.hpp
new file mode 100644
index 0000000..41a8be7
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint3x3_sized.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_matrix_uint3x3_sized
+/// @file glm/ext/matrix_uint3x3_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint3x3_sized GLM_EXT_matrix_uint3x3_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint3x3_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x3.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint3x3_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint3x3_sized
+ /// @{
+
+ /// 8 bit unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3_sized
+ typedef mat<3, 3, uint8, defaultp> u8mat3x3;
+
+ /// 16 bit unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3_sized
+ typedef mat<3, 3, uint16, defaultp> u16mat3x3;
+
+ /// 32 bit unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3_sized
+ typedef mat<3, 3, uint32, defaultp> u32mat3x3;
+
+ /// 64 bit unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3_sized
+ typedef mat<3, 3, uint64, defaultp> u64mat3x3;
+
+
+ /// 8 bit unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3_sized
+ typedef mat<3, 3, uint8, defaultp> u8mat3;
+
+ /// 16 bit unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3_sized
+ typedef mat<3, 3, uint16, defaultp> u16mat3;
+
+ /// 32 bit unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3_sized
+ typedef mat<3, 3, uint32, defaultp> u32mat3;
+
+ /// 64 bit unsigned integer 3x3 matrix.
+ ///
+ /// @see ext_matrix_uint3x3_sized
+ typedef mat<3, 3, uint64, defaultp> u64mat3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint3x4.hpp b/3rdparty/glm/source/glm/ext/matrix_uint3x4.hpp
new file mode 100644
index 0000000..c6dd78c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint3x4.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_uint3x4
+/// @file glm/ext/matrix_uint3x4.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint3x4 GLM_EXT_matrix_uint3x4
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint3x4.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint3x4 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint3x4
+ /// @{
+
+ /// Signed integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_uint3x4
+ typedef mat<3, 4, uint, defaultp> umat3x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint3x4_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint3x4_sized.hpp
new file mode 100644
index 0000000..2ce28ad
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint3x4_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_uint3x4_sized
+/// @file glm/ext/matrix_uint3x2_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint3x4_sized GLM_EXT_matrix_uint3x4_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint3x4_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x4.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint3x4_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint3x4_sized
+ /// @{
+
+ /// 8 bit unsigned integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_uint3x4_sized
+ typedef mat<3, 4, uint8, defaultp> u8mat3x4;
+
+ /// 16 bit unsigned integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_uint3x4_sized
+ typedef mat<3, 4, uint16, defaultp> u16mat3x4;
+
+ /// 32 bit unsigned integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_uint3x4_sized
+ typedef mat<3, 4, uint32, defaultp> u32mat3x4;
+
+ /// 64 bit unsigned integer 3x4 matrix.
+ ///
+ /// @see ext_matrix_uint3x4_sized
+ typedef mat<3, 4, uint64, defaultp> u64mat3x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint4x2.hpp b/3rdparty/glm/source/glm/ext/matrix_uint4x2.hpp
new file mode 100644
index 0000000..0446f57
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint4x2.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_uint4x2
+/// @file glm/ext/matrix_uint4x2.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint4x2 GLM_EXT_matrix_uint4x2
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint4x2.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x2.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint4x2 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint4x2
+ /// @{
+
+ /// Unsigned integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_uint4x2
+ typedef mat<4, 2, uint, defaultp> umat4x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint4x2_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint4x2_sized.hpp
new file mode 100644
index 0000000..57a66bf
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint4x2_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_uint4x2_sized
+/// @file glm/ext/matrix_uint4x2_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint4x2_sized GLM_EXT_matrix_uint4x2_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint4x2_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x2.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint4x2_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint4x2_sized
+ /// @{
+
+ /// 8 bit unsigned integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_uint4x2_sized
+ typedef mat<4, 2, uint8, defaultp> u8mat4x2;
+
+ /// 16 bit unsigned integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_uint4x2_sized
+ typedef mat<4, 2, uint16, defaultp> u16mat4x2;
+
+ /// 32 bit unsigned integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_uint4x2_sized
+ typedef mat<4, 2, uint32, defaultp> u32mat4x2;
+
+ /// 64 bit unsigned integer 4x2 matrix.
+ ///
+ /// @see ext_matrix_uint4x2_sized
+ typedef mat<4, 2, uint64, defaultp> u64mat4x2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint4x3.hpp b/3rdparty/glm/source/glm/ext/matrix_uint4x3.hpp
new file mode 100644
index 0000000..54c24e4
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint4x3.hpp
@@ -0,0 +1,33 @@
+/// @ref ext_matrix_uint4x3
+/// @file glm/ext/matrix_uint4x3.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint4x3 GLM_EXT_matrix_uint4x3
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint4x3.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x3.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint4x3 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint4x3
+ /// @{
+
+ /// Unsigned integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_uint4x3
+ typedef mat<4, 3, uint, defaultp> umat4x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint4x3_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint4x3_sized.hpp
new file mode 100644
index 0000000..2e61124
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint4x3_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_matrix_uint4x3_sized
+/// @file glm/ext/matrix_uint4x3_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint4x3_sized GLM_EXT_matrix_uint4x3_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint4x3_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x3.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint4x3_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint4x3_sized
+ /// @{
+
+ /// 8 bit unsigned integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_uint4x3_sized
+ typedef mat<4, 3, uint8, defaultp> u8mat4x3;
+
+ /// 16 bit unsigned integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_uint4x3_sized
+ typedef mat<4, 3, uint16, defaultp> u16mat4x3;
+
+ /// 32 bit unsigned integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_uint4x3_sized
+ typedef mat<4, 3, uint32, defaultp> u32mat4x3;
+
+ /// 64 bit unsigned integer 4x3 matrix.
+ ///
+ /// @see ext_matrix_uint4x3_sized
+ typedef mat<4, 3, uint64, defaultp> u64mat4x3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint4x4.hpp b/3rdparty/glm/source/glm/ext/matrix_uint4x4.hpp
new file mode 100644
index 0000000..5cc8455
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint4x4.hpp
@@ -0,0 +1,38 @@
+/// @ref ext_matrix_uint4x4
+/// @file glm/ext/matrix_uint4x4.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint4x4 GLM_EXT_matrix_uint4x4
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint4x4.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint4x4 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint4x4
+ /// @{
+
+ /// Unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4
+ typedef mat<4, 4, uint, defaultp> umat4x4;
+
+ /// Unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4
+ typedef mat<4, 4, uint, defaultp> umat4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/matrix_uint4x4_sized.hpp b/3rdparty/glm/source/glm/ext/matrix_uint4x4_sized.hpp
new file mode 100644
index 0000000..bb10bd2
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/matrix_uint4x4_sized.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_matrix_uint4x4_sized
+/// @file glm/ext/matrix_uint4x4_sized.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_matrix_uint4x4_sized GLM_EXT_matrix_uint4x4_sized
+/// @ingroup ext
+///
+/// Include <glm/ext/matrix_uint4x4_sized.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat4x4.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_matrix_uint4x4_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_matrix_uint4x4_sized
+ /// @{
+
+ /// 8 bit unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4_sized
+ typedef mat<4, 4, uint8, defaultp> u8mat4x4;
+
+ /// 16 bit unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4_sized
+ typedef mat<4, 4, uint16, defaultp> u16mat4x4;
+
+ /// 32 bit unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4_sized
+ typedef mat<4, 4, uint32, defaultp> u32mat4x4;
+
+ /// 64 bit unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4_sized
+ typedef mat<4, 4, uint64, defaultp> u64mat4x4;
+
+
+ /// 8 bit unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4_sized
+ typedef mat<4, 4, uint8, defaultp> u8mat4;
+
+ /// 16 bit unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4_sized
+ typedef mat<4, 4, uint16, defaultp> u16mat4;
+
+ /// 32 bit unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4_sized
+ typedef mat<4, 4, uint32, defaultp> u32mat4;
+
+ /// 64 bit unsigned integer 4x4 matrix.
+ ///
+ /// @see ext_matrix_uint4x4_sized
+ typedef mat<4, 4, uint64, defaultp> u64mat4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/quaternion_common.hpp b/3rdparty/glm/source/glm/ext/quaternion_common.hpp
new file mode 100644
index 0000000..f519d55
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_common.hpp
@@ -0,0 +1,135 @@
+/// @ref ext_quaternion_common
+/// @file glm/ext/quaternion_common.hpp
+///
+/// @defgroup ext_quaternion_common GLM_EXT_quaternion_common
+/// @ingroup ext
+///
+/// Provides common functions for quaternion types
+///
+/// Include <glm/ext/quaternion_common.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_common
+/// @see ext_vector_common
+/// @see ext_quaternion_float
+/// @see ext_quaternion_double
+/// @see ext_quaternion_exponential
+/// @see ext_quaternion_geometric
+/// @see ext_quaternion_relational
+/// @see ext_quaternion_trigonometric
+/// @see ext_quaternion_transform
+
+#pragma once
+
+// Dependency:
+#include "../ext/scalar_constants.hpp"
+#include "../ext/quaternion_geometric.hpp"
+#include "../common.hpp"
+#include "../trigonometric.hpp"
+#include "../exponential.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_common extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_common
+ /// @{
+
+ /// Spherical linear interpolation of two quaternions.
+ /// The interpolation is oriented and the rotation is performed at constant speed.
+ /// For short path spherical linear interpolation, use the slerp function.
+ ///
+ /// @param x A quaternion
+ /// @param y A quaternion
+ /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1].
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ ///
+ /// @see - slerp(qua<T, Q> const& x, qua<T, Q> const& y, T const& a)
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> mix(qua<T, Q> const& x, qua<T, Q> const& y, T a);
+
+ /// Linear interpolation of two quaternions.
+ /// The interpolation is oriented.
+ ///
+ /// @param x A quaternion
+ /// @param y A quaternion
+ /// @param a Interpolation factor. The interpolation is defined in the range [0, 1].
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> lerp(qua<T, Q> const& x, qua<T, Q> const& y, T a);
+
+ /// Spherical linear interpolation of two quaternions.
+ /// The interpolation always take the short path and the rotation is performed at constant speed.
+ ///
+ /// @param x A quaternion
+ /// @param y A quaternion
+ /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1].
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> slerp(qua<T, Q> const& x, qua<T, Q> const& y, T a);
+
+ /// Spherical linear interpolation of two quaternions with multiple spins over rotation axis.
+ /// The interpolation always take the short path when the spin count is positive and long path
+ /// when count is negative. Rotation is performed at constant speed.
+ ///
+ /// @param x A quaternion
+ /// @param y A quaternion
+ /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1].
+ /// @param k Additional spin count. If Value is negative interpolation will be on "long" path.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam S An integer scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, typename S, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> slerp(qua<T, Q> const& x, qua<T, Q> const& y, T a, S k);
+
+ /// Returns the q conjugate.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> conjugate(qua<T, Q> const& q);
+
+ /// Returns the q inverse.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> inverse(qua<T, Q> const& q);
+
+ /// Returns true if x holds a NaN (not a number)
+ /// representation in the underlying implementation's set of
+ /// floating point representations. Returns false otherwise,
+ /// including for implementations with no NaN
+ /// representations.
+ ///
+ /// /!\ When using compiler fast math, this function may fail.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> isnan(qua<T, Q> const& x);
+
+ /// Returns true if x holds a positive infinity or negative
+ /// infinity representation in the underlying implementation's
+ /// set of floating point representations. Returns false
+ /// otherwise, including for implementations with no infinity
+ /// representations.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> isinf(qua<T, Q> const& x);
+
+ /// @}
+} //namespace glm
+
+#include "quaternion_common.inl"
diff --git a/3rdparty/glm/source/glm/ext/quaternion_common.inl b/3rdparty/glm/source/glm/ext/quaternion_common.inl
new file mode 100644
index 0000000..0e4a3bb
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_common.inl
@@ -0,0 +1,144 @@
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> mix(qua<T, Q> const& x, qua<T, Q> const& y, T a)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'mix' only accept floating-point inputs");
+
+ T const cosTheta = dot(x, y);
+
+ // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator
+ if(cosTheta > static_cast<T>(1) - epsilon<T>())
+ {
+ // Linear interpolation
+ return qua<T, Q>(
+ mix(x.w, y.w, a),
+ mix(x.x, y.x, a),
+ mix(x.y, y.y, a),
+ mix(x.z, y.z, a));
+ }
+ else
+ {
+ // Essential Mathematics, page 467
+ T angle = acos(cosTheta);
+ return (sin((static_cast<T>(1) - a) * angle) * x + sin(a * angle) * y) / sin(angle);
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> lerp(qua<T, Q> const& x, qua<T, Q> const& y, T a)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'lerp' only accept floating-point inputs");
+
+ // Lerp is only defined in [0, 1]
+ assert(a >= static_cast<T>(0));
+ assert(a <= static_cast<T>(1));
+
+ return x * (static_cast<T>(1) - a) + (y * a);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> slerp(qua<T, Q> const& x, qua<T, Q> const& y, T a)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'slerp' only accept floating-point inputs");
+
+ qua<T, Q> z = y;
+
+ T cosTheta = dot(x, y);
+
+ // If cosTheta < 0, the interpolation will take the long way around the sphere.
+ // To fix this, one quat must be negated.
+ if(cosTheta < static_cast<T>(0))
+ {
+ z = -y;
+ cosTheta = -cosTheta;
+ }
+
+ // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator
+ if(cosTheta > static_cast<T>(1) - epsilon<T>())
+ {
+ // Linear interpolation
+ return qua<T, Q>(
+ mix(x.w, z.w, a),
+ mix(x.x, z.x, a),
+ mix(x.y, z.y, a),
+ mix(x.z, z.z, a));
+ }
+ else
+ {
+ // Essential Mathematics, page 467
+ T angle = acos(cosTheta);
+ return (sin((static_cast<T>(1) - a) * angle) * x + sin(a * angle) * z) / sin(angle);
+ }
+ }
+
+ template<typename T, typename S, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> slerp(qua<T, Q> const& x, qua<T, Q> const& y, T a, S k)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'slerp' only accept floating-point inputs");
+ GLM_STATIC_ASSERT(std::numeric_limits<S>::is_integer, "'slerp' only accept integer for spin count");
+
+ qua<T, Q> z = y;
+
+ T cosTheta = dot(x, y);
+
+ // If cosTheta < 0, the interpolation will take the long way around the sphere.
+ // To fix this, one quat must be negated.
+ if (cosTheta < static_cast<T>(0))
+ {
+ z = -y;
+ cosTheta = -cosTheta;
+ }
+
+ // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator
+ if (cosTheta > static_cast<T>(1) - epsilon<T>())
+ {
+ // Linear interpolation
+ return qua<T, Q>(
+ mix(x.w, z.w, a),
+ mix(x.x, z.x, a),
+ mix(x.y, z.y, a),
+ mix(x.z, z.z, a));
+ }
+ else
+ {
+ // Graphics Gems III, page 96
+ T angle = acos(cosTheta);
+ T phi = angle + k * glm::pi<T>();
+ return (sin(angle - a * phi)* x + sin(a * phi) * z) / sin(angle);
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> conjugate(qua<T, Q> const& q)
+ {
+ return qua<T, Q>(q.w, -q.x, -q.y, -q.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> inverse(qua<T, Q> const& q)
+ {
+ return conjugate(q) / dot(q, q);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> isnan(qua<T, Q> const& q)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isnan' only accept floating-point inputs");
+
+ return vec<4, bool, Q>(isnan(q.x), isnan(q.y), isnan(q.z), isnan(q.w));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> isinf(qua<T, Q> const& q)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isinf' only accept floating-point inputs");
+
+ return vec<4, bool, Q>(isinf(q.x), isinf(q.y), isinf(q.z), isinf(q.w));
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "quaternion_common_simd.inl"
+#endif
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_common_simd.inl b/3rdparty/glm/source/glm/ext/quaternion_common_simd.inl
new file mode 100644
index 0000000..ddfc8a4
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_common_simd.inl
@@ -0,0 +1,18 @@
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+namespace glm{
+namespace detail
+{
+ template<qualifier Q>
+ struct compute_dot<qua<float, Q>, float, true>
+ {
+ static GLM_FUNC_QUALIFIER float call(qua<float, Q> const& x, qua<float, Q> const& y)
+ {
+ return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data));
+ }
+ };
+}//namespace detail
+}//namespace glm
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_double.hpp b/3rdparty/glm/source/glm/ext/quaternion_double.hpp
new file mode 100644
index 0000000..63b24de
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_double.hpp
@@ -0,0 +1,39 @@
+/// @ref ext_quaternion_double
+/// @file glm/ext/quaternion_double.hpp
+///
+/// @defgroup ext_quaternion_double GLM_EXT_quaternion_double
+/// @ingroup ext
+///
+/// Exposes double-precision floating point quaternion type.
+///
+/// Include <glm/ext/quaternion_double.hpp> to use the features of this extension.
+///
+/// @see ext_quaternion_float
+/// @see ext_quaternion_double_precision
+/// @see ext_quaternion_common
+/// @see ext_quaternion_exponential
+/// @see ext_quaternion_geometric
+/// @see ext_quaternion_relational
+/// @see ext_quaternion_transform
+/// @see ext_quaternion_trigonometric
+
+#pragma once
+
+// Dependency:
+#include "../detail/type_quat.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_double extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_double
+ /// @{
+
+ /// Quaternion of double-precision floating-point numbers.
+ typedef qua<double, defaultp> dquat;
+
+ /// @}
+} //namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_double_precision.hpp b/3rdparty/glm/source/glm/ext/quaternion_double_precision.hpp
new file mode 100644
index 0000000..8aa24a1
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_double_precision.hpp
@@ -0,0 +1,42 @@
+/// @ref ext_quaternion_double_precision
+/// @file glm/ext/quaternion_double_precision.hpp
+///
+/// @defgroup ext_quaternion_double_precision GLM_EXT_quaternion_double_precision
+/// @ingroup ext
+///
+/// Exposes double-precision floating point quaternion type with various precision in term of ULPs.
+///
+/// Include <glm/ext/quaternion_double_precision.hpp> to use the features of this extension.
+
+#pragma once
+
+// Dependency:
+#include "../detail/type_quat.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_double_precision extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_double_precision
+ /// @{
+
+ /// Quaternion of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see ext_quaternion_double_precision
+ typedef qua<double, lowp> lowp_dquat;
+
+ /// Quaternion of medium double-qualifier floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see ext_quaternion_double_precision
+ typedef qua<double, mediump> mediump_dquat;
+
+ /// Quaternion of high double-qualifier floating-point numbers using high precision arithmetic in term of ULPs.
+ ///
+ /// @see ext_quaternion_double_precision
+ typedef qua<double, highp> highp_dquat;
+
+ /// @}
+} //namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_exponential.hpp b/3rdparty/glm/source/glm/ext/quaternion_exponential.hpp
new file mode 100644
index 0000000..affe297
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_exponential.hpp
@@ -0,0 +1,63 @@
+/// @ref ext_quaternion_exponential
+/// @file glm/ext/quaternion_exponential.hpp
+///
+/// @defgroup ext_quaternion_exponential GLM_EXT_quaternion_exponential
+/// @ingroup ext
+///
+/// Provides exponential functions for quaternion types
+///
+/// Include <glm/ext/quaternion_exponential.hpp> to use the features of this extension.
+///
+/// @see core_exponential
+/// @see ext_quaternion_float
+/// @see ext_quaternion_double
+
+#pragma once
+
+// Dependency:
+#include "../common.hpp"
+#include "../trigonometric.hpp"
+#include "../geometric.hpp"
+#include "../ext/scalar_constants.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_exponential extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_transform
+ /// @{
+
+ /// Returns a exponential of a quaternion.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> exp(qua<T, Q> const& q);
+
+ /// Returns a logarithm of a quaternion
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> log(qua<T, Q> const& q);
+
+ /// Returns a quaternion raised to a power.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> pow(qua<T, Q> const& q, T y);
+
+ /// Returns the square root of a quaternion
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> sqrt(qua<T, Q> const& q);
+
+ /// @}
+} //namespace glm
+
+#include "quaternion_exponential.inl"
diff --git a/3rdparty/glm/source/glm/ext/quaternion_exponential.inl b/3rdparty/glm/source/glm/ext/quaternion_exponential.inl
new file mode 100644
index 0000000..dd24b6c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_exponential.inl
@@ -0,0 +1,89 @@
+#include "scalar_constants.hpp"
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> exp(qua<T, Q> const& q)
+ {
+ vec<3, T, Q> u(q.x, q.y, q.z);
+ T const Angle = glm::length(u);
+ if (Angle < epsilon<T>())
+ return qua<T, Q>();
+
+ vec<3, T, Q> const v(u / Angle);
+ return qua<T, Q>(cos(Angle), sin(Angle) * v);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> log(qua<T, Q> const& q)
+ {
+ vec<3, T, Q> u(q.x, q.y, q.z);
+ T Vec3Len = length(u);
+
+ if (Vec3Len < epsilon<T>())
+ {
+ if(q.w > static_cast<T>(0))
+ return qua<T, Q>(log(q.w), static_cast<T>(0), static_cast<T>(0), static_cast<T>(0));
+ else if(q.w < static_cast<T>(0))
+ return qua<T, Q>(log(-q.w), pi<T>(), static_cast<T>(0), static_cast<T>(0));
+ else
+ return qua<T, Q>(std::numeric_limits<T>::infinity(), std::numeric_limits<T>::infinity(), std::numeric_limits<T>::infinity(), std::numeric_limits<T>::infinity());
+ }
+ else
+ {
+ T t = atan(Vec3Len, T(q.w)) / Vec3Len;
+ T QuatLen2 = Vec3Len * Vec3Len + q.w * q.w;
+ return qua<T, Q>(static_cast<T>(0.5) * log(QuatLen2), t * q.x, t * q.y, t * q.z);
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> pow(qua<T, Q> const& x, T y)
+ {
+ //Raising to the power of 0 should yield 1
+ //Needed to prevent a division by 0 error later on
+ if(y > -epsilon<T>() && y < epsilon<T>())
+ return qua<T, Q>(1,0,0,0);
+
+ //To deal with non-unit quaternions
+ T magnitude = sqrt(x.x * x.x + x.y * x.y + x.z * x.z + x.w *x.w);
+
+ T Angle;
+ if(abs(x.w / magnitude) > cos_one_over_two<T>())
+ {
+ //Scalar component is close to 1; using it to recover angle would lose precision
+ //Instead, we use the non-scalar components since sin() is accurate around 0
+
+ //Prevent a division by 0 error later on
+ T VectorMagnitude = x.x * x.x + x.y * x.y + x.z * x.z;
+ //Despite the compiler might say, we actually want to compare
+ //VectorMagnitude to 0. here; we could use denorm_int() compiling a
+ //project with unsafe maths optimizations might make the comparison
+ //always false, even when VectorMagnitude is 0.
+ if (VectorMagnitude < std::numeric_limits<T>::min()) {
+ //Equivalent to raising a real number to a power
+ return qua<T, Q>(pow(x.w, y), 0, 0, 0);
+ }
+
+ Angle = asin(sqrt(VectorMagnitude) / magnitude);
+ }
+ else
+ {
+ //Scalar component is small, shouldn't cause loss of precision
+ Angle = acos(x.w / magnitude);
+ }
+
+ T NewAngle = Angle * y;
+ T Div = sin(NewAngle) / sin(Angle);
+ T Mag = pow(magnitude, y - static_cast<T>(1));
+ return qua<T, Q>(cos(NewAngle) * magnitude * Mag, x.x * Div * Mag, x.y * Div * Mag, x.z * Div * Mag);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> sqrt(qua<T, Q> const& x)
+ {
+ return pow(x, static_cast<T>(0.5));
+ }
+}//namespace glm
+
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_float.hpp b/3rdparty/glm/source/glm/ext/quaternion_float.hpp
new file mode 100644
index 0000000..ca42a60
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_float.hpp
@@ -0,0 +1,39 @@
+/// @ref ext_quaternion_float
+/// @file glm/ext/quaternion_float.hpp
+///
+/// @defgroup ext_quaternion_float GLM_EXT_quaternion_float
+/// @ingroup ext
+///
+/// Exposes single-precision floating point quaternion type.
+///
+/// Include <glm/ext/quaternion_float.hpp> to use the features of this extension.
+///
+/// @see ext_quaternion_double
+/// @see ext_quaternion_float_precision
+/// @see ext_quaternion_common
+/// @see ext_quaternion_exponential
+/// @see ext_quaternion_geometric
+/// @see ext_quaternion_relational
+/// @see ext_quaternion_transform
+/// @see ext_quaternion_trigonometric
+
+#pragma once
+
+// Dependency:
+#include "../detail/type_quat.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_float extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_float
+ /// @{
+
+ /// Quaternion of single-precision floating-point numbers.
+ typedef qua<float, defaultp> quat;
+
+ /// @}
+} //namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_float_precision.hpp b/3rdparty/glm/source/glm/ext/quaternion_float_precision.hpp
new file mode 100644
index 0000000..f9e4f5c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_float_precision.hpp
@@ -0,0 +1,36 @@
+/// @ref ext_quaternion_float_precision
+/// @file glm/ext/quaternion_float_precision.hpp
+///
+/// @defgroup ext_quaternion_float_precision GLM_EXT_quaternion_float_precision
+/// @ingroup ext
+///
+/// Exposes single-precision floating point quaternion type with various precision in term of ULPs.
+///
+/// Include <glm/ext/quaternion_float_precision.hpp> to use the features of this extension.
+
+#pragma once
+
+// Dependency:
+#include "../detail/type_quat.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_float_precision extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_float_precision
+ /// @{
+
+ /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef qua<float, lowp> lowp_quat;
+
+ /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef qua<float, mediump> mediump_quat;
+
+ /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef qua<float, highp> highp_quat;
+
+ /// @}
+} //namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_geometric.hpp b/3rdparty/glm/source/glm/ext/quaternion_geometric.hpp
new file mode 100644
index 0000000..6d98bbe
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_geometric.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_quaternion_geometric
+/// @file glm/ext/quaternion_geometric.hpp
+///
+/// @defgroup ext_quaternion_geometric GLM_EXT_quaternion_geometric
+/// @ingroup ext
+///
+/// Provides geometric functions for quaternion types
+///
+/// Include <glm/ext/quaternion_geometric.hpp> to use the features of this extension.
+///
+/// @see core_geometric
+/// @see ext_quaternion_float
+/// @see ext_quaternion_double
+
+#pragma once
+
+// Dependency:
+#include "../geometric.hpp"
+#include "../exponential.hpp"
+#include "../ext/vector_relational.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_geometric extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_geometric
+ /// @{
+
+ /// Returns the norm of a quaternions
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_quaternion_geometric
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T length(qua<T, Q> const& q);
+
+ /// Returns the normalized quaternion.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_quaternion_geometric
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> normalize(qua<T, Q> const& q);
+
+ /// Returns dot product of q1 and q2, i.e., q1[0] * q2[0] + q1[1] * q2[1] + ...
+ ///
+ /// @tparam T Floating-point scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_quaternion_geometric
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T dot(qua<T, Q> const& x, qua<T, Q> const& y);
+
+ /// Compute a cross product.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_quaternion_geometric
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> cross(qua<T, Q> const& q1, qua<T, Q> const& q2);
+
+ /// @}
+} //namespace glm
+
+#include "quaternion_geometric.inl"
diff --git a/3rdparty/glm/source/glm/ext/quaternion_geometric.inl b/3rdparty/glm/source/glm/ext/quaternion_geometric.inl
new file mode 100644
index 0000000..e155ac5
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_geometric.inl
@@ -0,0 +1,36 @@
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T dot(qua<T, Q> const& x, qua<T, Q> const& y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'dot' accepts only floating-point inputs");
+ return detail::compute_dot<qua<T, Q>, T, detail::is_aligned<Q>::value>::call(x, y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T length(qua<T, Q> const& q)
+ {
+ return glm::sqrt(dot(q, q));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> normalize(qua<T, Q> const& q)
+ {
+ T len = length(q);
+ if(len <= static_cast<T>(0)) // Problem
+ return qua<T, Q>(static_cast<T>(1), static_cast<T>(0), static_cast<T>(0), static_cast<T>(0));
+ T oneOverLen = static_cast<T>(1) / len;
+ return qua<T, Q>(q.w * oneOverLen, q.x * oneOverLen, q.y * oneOverLen, q.z * oneOverLen);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> cross(qua<T, Q> const& q1, qua<T, Q> const& q2)
+ {
+ return qua<T, Q>(
+ q1.w * q2.w - q1.x * q2.x - q1.y * q2.y - q1.z * q2.z,
+ q1.w * q2.x + q1.x * q2.w + q1.y * q2.z - q1.z * q2.y,
+ q1.w * q2.y + q1.y * q2.w + q1.z * q2.x - q1.x * q2.z,
+ q1.w * q2.z + q1.z * q2.w + q1.x * q2.y - q1.y * q2.x);
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_relational.hpp b/3rdparty/glm/source/glm/ext/quaternion_relational.hpp
new file mode 100644
index 0000000..7aa121d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_relational.hpp
@@ -0,0 +1,62 @@
+/// @ref ext_quaternion_relational
+/// @file glm/ext/quaternion_relational.hpp
+///
+/// @defgroup ext_quaternion_relational GLM_EXT_quaternion_relational
+/// @ingroup ext
+///
+/// Exposes comparison functions for quaternion types that take a user defined epsilon values.
+///
+/// Include <glm/ext/quaternion_relational.hpp> to use the features of this extension.
+///
+/// @see core_vector_relational
+/// @see ext_vector_relational
+/// @see ext_matrix_relational
+/// @see ext_quaternion_float
+/// @see ext_quaternion_double
+
+#pragma once
+
+// Dependency:
+#include "../vector_relational.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_relational extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_relational
+ /// @{
+
+ /// Returns the component-wise comparison of result x == y.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> equal(qua<T, Q> const& x, qua<T, Q> const& y);
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> equal(qua<T, Q> const& x, qua<T, Q> const& y, T epsilon);
+
+ /// Returns the component-wise comparison of result x != y.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua<T, Q> const& x, qua<T, Q> const& y);
+
+ /// Returns the component-wise comparison of |x - y| >= epsilon.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua<T, Q> const& x, qua<T, Q> const& y, T epsilon);
+
+ /// @}
+} //namespace glm
+
+#include "quaternion_relational.inl"
diff --git a/3rdparty/glm/source/glm/ext/quaternion_relational.inl b/3rdparty/glm/source/glm/ext/quaternion_relational.inl
new file mode 100644
index 0000000..b1713e9
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_relational.inl
@@ -0,0 +1,35 @@
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua<T, Q> const& x, qua<T, Q> const& y)
+ {
+ vec<4, bool, Q> Result;
+ for(length_t i = 0; i < x.length(); ++i)
+ Result[i] = x[i] == y[i];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua<T, Q> const& x, qua<T, Q> const& y, T epsilon)
+ {
+ vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w);
+ return lessThan(abs(v), vec<4, T, Q>(epsilon));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua<T, Q> const& x, qua<T, Q> const& y)
+ {
+ vec<4, bool, Q> Result;
+ for(length_t i = 0; i < x.length(); ++i)
+ Result[i] = x[i] != y[i];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua<T, Q> const& x, qua<T, Q> const& y, T epsilon)
+ {
+ vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w);
+ return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon));
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_transform.hpp b/3rdparty/glm/source/glm/ext/quaternion_transform.hpp
new file mode 100644
index 0000000..a9cc5c2
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_transform.hpp
@@ -0,0 +1,47 @@
+/// @ref ext_quaternion_transform
+/// @file glm/ext/quaternion_transform.hpp
+///
+/// @defgroup ext_quaternion_transform GLM_EXT_quaternion_transform
+/// @ingroup ext
+///
+/// Provides transformation functions for quaternion types
+///
+/// Include <glm/ext/quaternion_transform.hpp> to use the features of this extension.
+///
+/// @see ext_quaternion_float
+/// @see ext_quaternion_double
+/// @see ext_quaternion_exponential
+/// @see ext_quaternion_geometric
+/// @see ext_quaternion_relational
+/// @see ext_quaternion_trigonometric
+
+#pragma once
+
+// Dependency:
+#include "../common.hpp"
+#include "../trigonometric.hpp"
+#include "../geometric.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_transform extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_transform
+ /// @{
+
+ /// Rotates a quaternion from a vector of 3 components axis and an angle.
+ ///
+ /// @param q Source orientation
+ /// @param angle Angle expressed in radians.
+ /// @param axis Axis of the rotation
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> rotate(qua<T, Q> const& q, T const& angle, vec<3, T, Q> const& axis);
+ /// @}
+} //namespace glm
+
+#include "quaternion_transform.inl"
diff --git a/3rdparty/glm/source/glm/ext/quaternion_transform.inl b/3rdparty/glm/source/glm/ext/quaternion_transform.inl
new file mode 100644
index 0000000..b87ecb6
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_transform.inl
@@ -0,0 +1,24 @@
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> rotate(qua<T, Q> const& q, T const& angle, vec<3, T, Q> const& v)
+ {
+ vec<3, T, Q> Tmp = v;
+
+ // Axis of rotation must be normalised
+ T len = glm::length(Tmp);
+ if(abs(len - static_cast<T>(1)) > static_cast<T>(0.001))
+ {
+ T oneOverLen = static_cast<T>(1) / len;
+ Tmp.x *= oneOverLen;
+ Tmp.y *= oneOverLen;
+ Tmp.z *= oneOverLen;
+ }
+
+ T const AngleRad(angle);
+ T const Sin = sin(AngleRad * static_cast<T>(0.5));
+
+ return q * qua<T, Q>(cos(AngleRad * static_cast<T>(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin);
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/quaternion_trigonometric.hpp b/3rdparty/glm/source/glm/ext/quaternion_trigonometric.hpp
new file mode 100644
index 0000000..76cea27
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_trigonometric.hpp
@@ -0,0 +1,63 @@
+/// @ref ext_quaternion_trigonometric
+/// @file glm/ext/quaternion_trigonometric.hpp
+///
+/// @defgroup ext_quaternion_trigonometric GLM_EXT_quaternion_trigonometric
+/// @ingroup ext
+///
+/// Provides trigonometric functions for quaternion types
+///
+/// Include <glm/ext/quaternion_trigonometric.hpp> to use the features of this extension.
+///
+/// @see ext_quaternion_float
+/// @see ext_quaternion_double
+/// @see ext_quaternion_exponential
+/// @see ext_quaternion_geometric
+/// @see ext_quaternion_relational
+/// @see ext_quaternion_transform
+
+#pragma once
+
+// Dependency:
+#include "../trigonometric.hpp"
+#include "../exponential.hpp"
+#include "scalar_constants.hpp"
+#include "vector_relational.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_quaternion_trigonometric extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_quaternion_trigonometric
+ /// @{
+
+ /// Returns the quaternion rotation angle.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T angle(qua<T, Q> const& x);
+
+ /// Returns the q rotation axis.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> axis(qua<T, Q> const& x);
+
+ /// Build a quaternion from an angle and a normalized axis.
+ ///
+ /// @param angle Angle expressed in radians.
+ /// @param axis Axis of the quaternion, must be normalized.
+ ///
+ /// @tparam T A floating-point scalar type
+ /// @tparam Q A value from qualifier enum
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> angleAxis(T const& angle, vec<3, T, Q> const& axis);
+
+ /// @}
+} //namespace glm
+
+#include "quaternion_trigonometric.inl"
diff --git a/3rdparty/glm/source/glm/ext/quaternion_trigonometric.inl b/3rdparty/glm/source/glm/ext/quaternion_trigonometric.inl
new file mode 100644
index 0000000..896449a
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/quaternion_trigonometric.inl
@@ -0,0 +1,37 @@
+#include "scalar_constants.hpp"
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T angle(qua<T, Q> const& x)
+ {
+ if (abs(x.w) > cos_one_over_two<T>())
+ {
+ T const a = asin(sqrt(x.x * x.x + x.y * x.y + x.z * x.z)) * static_cast<T>(2);
+ if(x.w < static_cast<T>(0))
+ return pi<T>() * static_cast<T>(2) - a;
+ return a;
+ }
+
+ return acos(x.w) * static_cast<T>(2);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> axis(qua<T, Q> const& x)
+ {
+ T const tmp1 = static_cast<T>(1) - x.w * x.w;
+ if(tmp1 <= static_cast<T>(0))
+ return vec<3, T, Q>(0, 0, 1);
+ T const tmp2 = static_cast<T>(1) / sqrt(tmp1);
+ return vec<3, T, Q>(x.x * tmp2, x.y * tmp2, x.z * tmp2);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> angleAxis(T const& angle, vec<3, T, Q> const& v)
+ {
+ T const a(angle);
+ T const s = glm::sin(a * static_cast<T>(0.5));
+
+ return qua<T, Q>(glm::cos(a * static_cast<T>(0.5)), v * s);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/scalar_common.hpp b/3rdparty/glm/source/glm/ext/scalar_common.hpp
new file mode 100644
index 0000000..df04b6b
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_common.hpp
@@ -0,0 +1,181 @@
+/// @ref ext_scalar_common
+/// @file glm/ext/scalar_common.hpp
+///
+/// @defgroup ext_scalar_common GLM_EXT_scalar_common
+/// @ingroup ext
+///
+/// Exposes min and max functions for 3 to 4 scalar parameters.
+///
+/// Include <glm/ext/scalar_common.hpp> to use the features of this extension.
+///
+/// @see core_func_common
+/// @see ext_vector_common
+
+#pragma once
+
+// Dependency:
+#include "../common.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_common extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_scalar_common
+ /// @{
+
+ /// Returns the minimum component-wise values of 3 inputs
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T min(T a, T b, T c);
+
+ /// Returns the minimum component-wise values of 4 inputs
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T min(T a, T b, T c, T d);
+
+ /// Returns the maximum component-wise values of 3 inputs
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T max(T a, T b, T c);
+
+ /// Returns the maximum component-wise values of 4 inputs
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T max(T a, T b, T c, T d);
+
+ /// Returns the minimum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmin">std::fmin documentation</a>
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T fmin(T a, T b);
+
+ /// Returns the minimum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmin">std::fmin documentation</a>
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T fmin(T a, T b, T c);
+
+ /// Returns the minimum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmin">std::fmin documentation</a>
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T fmin(T a, T b, T c, T d);
+
+ /// Returns the maximum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmax">std::fmax documentation</a>
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T fmax(T a, T b);
+
+ /// Returns the maximum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmax">std::fmax documentation</a>
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T fmax(T a, T b, T C);
+
+ /// Returns the maximum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam T A floating-point scalar type.
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmax">std::fmax documentation</a>
+ /// @see ext_scalar_common
+ template<typename T>
+ GLM_FUNC_DECL T fmax(T a, T b, T C, T D);
+
+ /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam genType Floating-point scalar types.
+ ///
+ /// @see ext_scalar_common
+ template<typename genType>
+ GLM_FUNC_DECL genType fclamp(genType x, genType minVal, genType maxVal);
+
+ /// Simulate GL_CLAMP OpenGL wrap mode
+ ///
+ /// @tparam genType Floating-point scalar types.
+ ///
+ /// @see ext_scalar_common extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType clamp(genType const& Texcoord);
+
+ /// Simulate GL_REPEAT OpenGL wrap mode
+ ///
+ /// @tparam genType Floating-point scalar types.
+ ///
+ /// @see ext_scalar_common extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType repeat(genType const& Texcoord);
+
+ /// Simulate GL_MIRRORED_REPEAT OpenGL wrap mode
+ ///
+ /// @tparam genType Floating-point scalar types.
+ ///
+ /// @see ext_scalar_common extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType mirrorClamp(genType const& Texcoord);
+
+ /// Simulate GL_MIRROR_REPEAT OpenGL wrap mode
+ ///
+ /// @tparam genType Floating-point scalar types.
+ ///
+ /// @see ext_scalar_common extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType mirrorRepeat(genType const& Texcoord);
+
+ /// Returns a value equal to the nearest integer to x.
+ /// The fraction 0.5 will round in a direction chosen by the
+ /// implementation, presumably the direction that is fastest.
+ ///
+ /// @param x The values of the argument must be greater or equal to zero.
+ /// @tparam genType floating point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/round.xml">GLSL round man page</a>
+ /// @see ext_scalar_common extension.
+ template<typename genType>
+ GLM_FUNC_DECL int iround(genType const& x);
+
+ /// Returns a value equal to the nearest integer to x.
+ /// The fraction 0.5 will round in a direction chosen by the
+ /// implementation, presumably the direction that is fastest.
+ ///
+ /// @param x The values of the argument must be greater or equal to zero.
+ /// @tparam genType floating point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/round.xml">GLSL round man page</a>
+ /// @see ext_scalar_common extension.
+ template<typename genType>
+ GLM_FUNC_DECL uint uround(genType const& x);
+
+ /// @}
+}//namespace glm
+
+#include "scalar_common.inl"
diff --git a/3rdparty/glm/source/glm/ext/scalar_common.inl b/3rdparty/glm/source/glm/ext/scalar_common.inl
new file mode 100644
index 0000000..2807a37
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_common.inl
@@ -0,0 +1,170 @@
+namespace glm
+{
+ template<typename T>
+ GLM_FUNC_QUALIFIER T min(T a, T b, T c)
+ {
+ return glm::min(glm::min(a, b), c);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T min(T a, T b, T c, T d)
+ {
+ return glm::min(glm::min(a, b), glm::min(c, d));
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T max(T a, T b, T c)
+ {
+ return glm::max(glm::max(a, b), c);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T max(T a, T b, T c, T d)
+ {
+ return glm::max(glm::max(a, b), glm::max(c, d));
+ }
+
+# if GLM_HAS_CXX11_STL
+ using std::fmin;
+# else
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fmin(T a, T b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmin' only accept floating-point input");
+
+ if (isnan(a))
+ return b;
+ return min(a, b);
+ }
+# endif
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fmin(T a, T b, T c)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmin' only accept floating-point input");
+
+ if (isnan(a))
+ return fmin(b, c);
+ if (isnan(b))
+ return fmin(a, c);
+ if (isnan(c))
+ return min(a, b);
+ return min(a, b, c);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fmin(T a, T b, T c, T d)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmin' only accept floating-point input");
+
+ if (isnan(a))
+ return fmin(b, c, d);
+ if (isnan(b))
+ return min(a, fmin(c, d));
+ if (isnan(c))
+ return fmin(min(a, b), d);
+ if (isnan(d))
+ return min(a, b, c);
+ return min(a, b, c, d);
+ }
+
+
+# if GLM_HAS_CXX11_STL
+ using std::fmax;
+# else
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fmax(T a, T b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmax' only accept floating-point input");
+
+ if (isnan(a))
+ return b;
+ return max(a, b);
+ }
+# endif
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fmax(T a, T b, T c)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmax' only accept floating-point input");
+
+ if (isnan(a))
+ return fmax(b, c);
+ if (isnan(b))
+ return fmax(a, c);
+ if (isnan(c))
+ return max(a, b);
+ return max(a, b, c);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fmax(T a, T b, T c, T d)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmax' only accept floating-point input");
+
+ if (isnan(a))
+ return fmax(b, c, d);
+ if (isnan(b))
+ return max(a, fmax(c, d));
+ if (isnan(c))
+ return fmax(max(a, b), d);
+ if (isnan(d))
+ return max(a, b, c);
+ return max(a, b, c, d);
+ }
+
+ // fclamp
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fclamp(genType x, genType minVal, genType maxVal)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'fclamp' only accept floating-point or integer inputs");
+ return fmin(fmax(x, minVal), maxVal);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType clamp(genType const& Texcoord)
+ {
+ return glm::clamp(Texcoord, static_cast<genType>(0), static_cast<genType>(1));
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType repeat(genType const& Texcoord)
+ {
+ return glm::fract(Texcoord);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType mirrorClamp(genType const& Texcoord)
+ {
+ return glm::fract(glm::abs(Texcoord));
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType mirrorRepeat(genType const& Texcoord)
+ {
+ genType const Abs = glm::abs(Texcoord);
+ genType const Clamp = glm::mod(glm::floor(Abs), static_cast<genType>(2));
+ genType const Floor = glm::floor(Abs);
+ genType const Rest = Abs - Floor;
+ genType const Mirror = Clamp + Rest;
+ return mix(Rest, static_cast<genType>(1) - Rest, Mirror >= static_cast<genType>(1));
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER int iround(genType const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'iround' only accept floating-point inputs");
+ assert(static_cast<genType>(0.0) <= x);
+
+ return static_cast<int>(x + static_cast<genType>(0.5));
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER uint uround(genType const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'uround' only accept floating-point inputs");
+ assert(static_cast<genType>(0.0) <= x);
+
+ return static_cast<uint>(x + static_cast<genType>(0.5));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/scalar_constants.hpp b/3rdparty/glm/source/glm/ext/scalar_constants.hpp
new file mode 100644
index 0000000..74e210d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_constants.hpp
@@ -0,0 +1,40 @@
+/// @ref ext_scalar_constants
+/// @file glm/ext/scalar_constants.hpp
+///
+/// @defgroup ext_scalar_constants GLM_EXT_scalar_constants
+/// @ingroup ext
+///
+/// Provides a list of constants and precomputed useful values.
+///
+/// Include <glm/ext/scalar_constants.hpp> to use the features of this extension.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_constants extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_scalar_constants
+ /// @{
+
+ /// Return the epsilon constant for floating point types.
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType epsilon();
+
+ /// Return the pi constant for floating point types.
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType pi();
+
+ /// Return the value of cos(1 / 2) for floating point types.
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType cos_one_over_two();
+
+ /// @}
+} //namespace glm
+
+#include "scalar_constants.inl"
diff --git a/3rdparty/glm/source/glm/ext/scalar_constants.inl b/3rdparty/glm/source/glm/ext/scalar_constants.inl
new file mode 100644
index 0000000..b475adf
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_constants.inl
@@ -0,0 +1,24 @@
+#include <limits>
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType epsilon()
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'epsilon' only accepts floating-point inputs");
+ return std::numeric_limits<genType>::epsilon();
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType pi()
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'pi' only accepts floating-point inputs");
+ return static_cast<genType>(3.14159265358979323846264338327950288);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType cos_one_over_two()
+ {
+ return genType(0.877582561890372716130286068203503191);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/ext/scalar_int_sized.hpp b/3rdparty/glm/source/glm/ext/scalar_int_sized.hpp
new file mode 100644
index 0000000..8e9c511
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_int_sized.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_scalar_int_sized
+/// @file glm/ext/scalar_int_sized.hpp
+///
+/// @defgroup ext_scalar_int_sized GLM_EXT_scalar_int_sized
+/// @ingroup ext
+///
+/// Exposes sized signed integer scalar types.
+///
+/// Include <glm/ext/scalar_int_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_uint_sized
+
+#pragma once
+
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_int_sized extension included")
+#endif
+
+namespace glm{
+namespace detail
+{
+# if GLM_HAS_EXTENDED_INTEGER_TYPE
+ typedef std::int8_t int8;
+ typedef std::int16_t int16;
+ typedef std::int32_t int32;
+# else
+ typedef signed char int8;
+ typedef signed short int16;
+ typedef signed int int32;
+#endif//
+
+ template<>
+ struct is_int<int8>
+ {
+ enum test {value = ~0};
+ };
+
+ template<>
+ struct is_int<int16>
+ {
+ enum test {value = ~0};
+ };
+
+ template<>
+ struct is_int<int64>
+ {
+ enum test {value = ~0};
+ };
+}//namespace detail
+
+
+ /// @addtogroup ext_scalar_int_sized
+ /// @{
+
+ /// 8 bit signed integer type.
+ typedef detail::int8 int8;
+
+ /// 16 bit signed integer type.
+ typedef detail::int16 int16;
+
+ /// 32 bit signed integer type.
+ typedef detail::int32 int32;
+
+ /// 64 bit signed integer type.
+ typedef detail::int64 int64;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/scalar_integer.hpp b/3rdparty/glm/source/glm/ext/scalar_integer.hpp
new file mode 100644
index 0000000..a2ca8a2
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_integer.hpp
@@ -0,0 +1,92 @@
+/// @ref ext_scalar_integer
+/// @file glm/ext/scalar_integer.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_scalar_integer GLM_EXT_scalar_integer
+/// @ingroup ext
+///
+/// Include <glm/ext/scalar_integer.hpp> to use the features of this extension.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../detail/_vectorize.hpp"
+#include "../detail/type_float.hpp"
+#include "../vector_relational.hpp"
+#include "../common.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_integer extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_scalar_integer
+ /// @{
+
+ /// Return true if the value is a power of two number.
+ ///
+ /// @see ext_scalar_integer
+ template<typename genIUType>
+ GLM_FUNC_DECL bool isPowerOfTwo(genIUType v);
+
+ /// Return the power of two number which value is just higher the input value,
+ /// round up to a power of two.
+ ///
+ /// @see ext_scalar_integer
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType nextPowerOfTwo(genIUType v);
+
+ /// Return the power of two number which value is just lower the input value,
+ /// round down to a power of two.
+ ///
+ /// @see ext_scalar_integer
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType prevPowerOfTwo(genIUType v);
+
+ /// Return true if the 'Value' is a multiple of 'Multiple'.
+ ///
+ /// @see ext_scalar_integer
+ template<typename genIUType>
+ GLM_FUNC_DECL bool isMultiple(genIUType v, genIUType Multiple);
+
+ /// Higher multiple number of Source.
+ ///
+ /// @tparam genIUType Integer scalar or vector types.
+ ///
+ /// @param v Source value to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see ext_scalar_integer
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType nextMultiple(genIUType v, genIUType Multiple);
+
+ /// Lower multiple number of Source.
+ ///
+ /// @tparam genIUType Integer scalar or vector types.
+ ///
+ /// @param v Source value to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see ext_scalar_integer
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType prevMultiple(genIUType v, genIUType Multiple);
+
+ /// Returns the bit number of the Nth significant bit set to
+ /// 1 in the binary representation of value.
+ /// If value bitcount is less than the Nth significant bit, -1 will be returned.
+ ///
+ /// @tparam genIUType Signed or unsigned integer scalar types.
+ ///
+ /// @see ext_scalar_integer
+ template<typename genIUType>
+ GLM_FUNC_DECL int findNSB(genIUType x, int significantBitCount);
+
+ /// @}
+} //namespace glm
+
+#include "scalar_integer.inl"
diff --git a/3rdparty/glm/source/glm/ext/scalar_integer.inl b/3rdparty/glm/source/glm/ext/scalar_integer.inl
new file mode 100644
index 0000000..efba960
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_integer.inl
@@ -0,0 +1,243 @@
+#include "../integer.hpp"
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q, bool compute = false>
+ struct compute_ceilShift
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T)
+ {
+ return v;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q>
+ struct compute_ceilShift<L, T, Q, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T Shift)
+ {
+ return v | (v >> Shift);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q, bool isSigned = true>
+ struct compute_ceilPowerOfTwo
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(!std::numeric_limits<T>::is_iec559, "'ceilPowerOfTwo' only accept integer scalar or vector inputs");
+
+ vec<L, T, Q> const Sign(sign(x));
+
+ vec<L, T, Q> v(abs(x));
+
+ v = v - static_cast<T>(1);
+ v = v | (v >> static_cast<T>(1));
+ v = v | (v >> static_cast<T>(2));
+ v = v | (v >> static_cast<T>(4));
+ v = compute_ceilShift<L, T, Q, sizeof(T) >= 2>::call(v, 8);
+ v = compute_ceilShift<L, T, Q, sizeof(T) >= 4>::call(v, 16);
+ v = compute_ceilShift<L, T, Q, sizeof(T) >= 8>::call(v, 32);
+ return (v + static_cast<T>(1)) * Sign;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q>
+ struct compute_ceilPowerOfTwo<L, T, Q, false>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(!std::numeric_limits<T>::is_iec559, "'ceilPowerOfTwo' only accept integer scalar or vector inputs");
+
+ vec<L, T, Q> v(x);
+
+ v = v - static_cast<T>(1);
+ v = v | (v >> static_cast<T>(1));
+ v = v | (v >> static_cast<T>(2));
+ v = v | (v >> static_cast<T>(4));
+ v = compute_ceilShift<L, T, Q, sizeof(T) >= 2>::call(v, 8);
+ v = compute_ceilShift<L, T, Q, sizeof(T) >= 4>::call(v, 16);
+ v = compute_ceilShift<L, T, Q, sizeof(T) >= 8>::call(v, 32);
+ return v + static_cast<T>(1);
+ }
+ };
+
+ template<bool is_float, bool is_signed>
+ struct compute_ceilMultiple{};
+
+ template<>
+ struct compute_ceilMultiple<true, true>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ if(Source > genType(0))
+ return Source + (Multiple - std::fmod(Source, Multiple));
+ else
+ return Source + std::fmod(-Source, Multiple);
+ }
+ };
+
+ template<>
+ struct compute_ceilMultiple<false, false>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ genType Tmp = Source - genType(1);
+ return Tmp + (Multiple - (Tmp % Multiple));
+ }
+ };
+
+ template<>
+ struct compute_ceilMultiple<false, true>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ assert(Multiple > genType(0));
+ if(Source > genType(0))
+ {
+ genType Tmp = Source - genType(1);
+ return Tmp + (Multiple - (Tmp % Multiple));
+ }
+ else
+ return Source + (-Source % Multiple);
+ }
+ };
+
+ template<bool is_float, bool is_signed>
+ struct compute_floorMultiple{};
+
+ template<>
+ struct compute_floorMultiple<true, true>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ if(Source >= genType(0))
+ return Source - std::fmod(Source, Multiple);
+ else
+ return Source - std::fmod(Source, Multiple) - Multiple;
+ }
+ };
+
+ template<>
+ struct compute_floorMultiple<false, false>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ if(Source >= genType(0))
+ return Source - Source % Multiple;
+ else
+ {
+ genType Tmp = Source + genType(1);
+ return Tmp - Tmp % Multiple - Multiple;
+ }
+ }
+ };
+
+ template<>
+ struct compute_floorMultiple<false, true>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ if(Source >= genType(0))
+ return Source - Source % Multiple;
+ else
+ {
+ genType Tmp = Source + genType(1);
+ return Tmp - Tmp % Multiple - Multiple;
+ }
+ }
+ };
+}//namespace detail
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER bool isPowerOfTwo(genIUType Value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'isPowerOfTwo' only accept integer inputs");
+
+ genIUType const Result = glm::abs(Value);
+ return !(Result & (Result - 1));
+ }
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType nextPowerOfTwo(genIUType value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'nextPowerOfTwo' only accept integer inputs");
+
+ return detail::compute_ceilPowerOfTwo<1, genIUType, defaultp, std::numeric_limits<genIUType>::is_signed>::call(vec<1, genIUType, defaultp>(value)).x;
+ }
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType prevPowerOfTwo(genIUType value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'prevPowerOfTwo' only accept integer inputs");
+
+ return isPowerOfTwo(value) ? value : static_cast<genIUType>(static_cast<genIUType>(1) << static_cast<genIUType>(findMSB(value)));
+ }
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER bool isMultiple(genIUType Value, genIUType Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'isMultiple' only accept integer inputs");
+
+ return isMultiple(vec<1, genIUType>(Value), vec<1, genIUType>(Multiple)).x;
+ }
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType nextMultiple(genIUType Source, genIUType Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'nextMultiple' only accept integer inputs");
+
+ return detail::compute_ceilMultiple<std::numeric_limits<genIUType>::is_iec559, std::numeric_limits<genIUType>::is_signed>::call(Source, Multiple);
+ }
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType prevMultiple(genIUType Source, genIUType Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'prevMultiple' only accept integer inputs");
+
+ return detail::compute_floorMultiple<std::numeric_limits<genIUType>::is_iec559, std::numeric_limits<genIUType>::is_signed>::call(Source, Multiple);
+ }
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER int findNSB(genIUType x, int significantBitCount)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findNSB' only accept integer inputs");
+
+ if(bitCount(x) < significantBitCount)
+ return -1;
+
+ genIUType const One = static_cast<genIUType>(1);
+ int bitPos = 0;
+
+ genIUType key = x;
+ int nBitCount = significantBitCount;
+ int Step = sizeof(x) * 8 / 2;
+ while (key > One)
+ {
+ genIUType Mask = static_cast<genIUType>((One << Step) - One);
+ genIUType currentKey = key & Mask;
+ int currentBitCount = bitCount(currentKey);
+ if (nBitCount > currentBitCount)
+ {
+ nBitCount -= currentBitCount;
+ bitPos += Step;
+ key >>= static_cast<genIUType>(Step);
+ }
+ else
+ {
+ key = key & Mask;
+ }
+
+ Step >>= 1;
+ }
+
+ return static_cast<int>(bitPos);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/scalar_packing.hpp b/3rdparty/glm/source/glm/ext/scalar_packing.hpp
new file mode 100644
index 0000000..18b85b7
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_packing.hpp
@@ -0,0 +1,32 @@
+/// @ref ext_scalar_packing
+/// @file glm/ext/scalar_packing.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_scalar_packing GLM_EXT_scalar_packing
+/// @ingroup ext
+///
+/// Include <glm/ext/scalar_packing.hpp> to use the features of this extension.
+///
+/// This extension provides a set of function to convert scalar values to packed
+/// formats.
+
+#pragma once
+
+// Dependency:
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_packing extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_scalar_packing
+ /// @{
+
+
+ /// @}
+}// namespace glm
+
+#include "scalar_packing.inl"
diff --git a/3rdparty/imguicolortextedit/source b/3rdparty/glm/source/glm/ext/scalar_packing.inl
index e69de29..e69de29 100644
--- a/3rdparty/imguicolortextedit/source
+++ b/3rdparty/glm/source/glm/ext/scalar_packing.inl
diff --git a/3rdparty/glm/source/glm/ext/scalar_reciprocal.hpp b/3rdparty/glm/source/glm/ext/scalar_reciprocal.hpp
new file mode 100644
index 0000000..1c7b81d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_reciprocal.hpp
@@ -0,0 +1,135 @@
+/// @ref ext_scalar_reciprocal
+/// @file glm/ext/scalar_reciprocal.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_scalar_reciprocal GLM_EXT_scalar_reciprocal
+/// @ingroup ext
+///
+/// Include <glm/ext/scalar_reciprocal.hpp> to use the features of this extension.
+///
+/// Define secant, cosecant and cotangent functions.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_reciprocal extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_scalar_reciprocal
+ /// @{
+
+ /// Secant function.
+ /// hypotenuse / adjacent or 1 / cos(x)
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType sec(genType angle);
+
+ /// Cosecant function.
+ /// hypotenuse / opposite or 1 / sin(x)
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType csc(genType angle);
+
+ /// Cotangent function.
+ /// adjacent / opposite or 1 / tan(x)
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType cot(genType angle);
+
+ /// Inverse secant function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType asec(genType x);
+
+ /// Inverse cosecant function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType acsc(genType x);
+
+ /// Inverse cotangent function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType acot(genType x);
+
+ /// Secant hyperbolic function.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType sech(genType angle);
+
+ /// Cosecant hyperbolic function.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType csch(genType angle);
+
+ /// Cotangent hyperbolic function.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType coth(genType angle);
+
+ /// Inverse secant hyperbolic function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType asech(genType x);
+
+ /// Inverse cosecant hyperbolic function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType acsch(genType x);
+
+ /// Inverse cotangent hyperbolic function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_scalar_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType acoth(genType x);
+
+ /// @}
+}//namespace glm
+
+#include "scalar_reciprocal.inl"
diff --git a/3rdparty/glm/source/glm/ext/scalar_reciprocal.inl b/3rdparty/glm/source/glm/ext/scalar_reciprocal.inl
new file mode 100644
index 0000000..570a406
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_reciprocal.inl
@@ -0,0 +1,107 @@
+/// @ref ext_scalar_reciprocal
+
+#include "../trigonometric.hpp"
+#include <limits>
+
+namespace glm
+{
+ // sec
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType sec(genType angle)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'sec' only accept floating-point values");
+ return genType(1) / glm::cos(angle);
+ }
+
+ // csc
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType csc(genType angle)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'csc' only accept floating-point values");
+ return genType(1) / glm::sin(angle);
+ }
+
+ // cot
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType cot(genType angle)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'cot' only accept floating-point values");
+
+ genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0);
+ return glm::tan(pi_over_2 - angle);
+ }
+
+ // asec
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType asec(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'asec' only accept floating-point values");
+ return acos(genType(1) / x);
+ }
+
+ // acsc
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType acsc(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'acsc' only accept floating-point values");
+ return asin(genType(1) / x);
+ }
+
+ // acot
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType acot(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'acot' only accept floating-point values");
+
+ genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0);
+ return pi_over_2 - atan(x);
+ }
+
+ // sech
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType sech(genType angle)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'sech' only accept floating-point values");
+ return genType(1) / glm::cosh(angle);
+ }
+
+ // csch
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType csch(genType angle)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'csch' only accept floating-point values");
+ return genType(1) / glm::sinh(angle);
+ }
+
+ // coth
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType coth(genType angle)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'coth' only accept floating-point values");
+ return glm::cosh(angle) / glm::sinh(angle);
+ }
+
+ // asech
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType asech(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'asech' only accept floating-point values");
+ return acosh(genType(1) / x);
+ }
+
+ // acsch
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType acsch(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'acsch' only accept floating-point values");
+ return asinh(genType(1) / x);
+ }
+
+ // acoth
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType acoth(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'acoth' only accept floating-point values");
+ return atanh(genType(1) / x);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/scalar_relational.hpp b/3rdparty/glm/source/glm/ext/scalar_relational.hpp
new file mode 100644
index 0000000..3076a5e
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_relational.hpp
@@ -0,0 +1,65 @@
+/// @ref ext_scalar_relational
+/// @file glm/ext/scalar_relational.hpp
+///
+/// @defgroup ext_scalar_relational GLM_EXT_scalar_relational
+/// @ingroup ext
+///
+/// Exposes comparison functions for scalar types that take a user defined epsilon values.
+///
+/// Include <glm/ext/scalar_relational.hpp> to use the features of this extension.
+///
+/// @see core_vector_relational
+/// @see ext_vector_relational
+/// @see ext_matrix_relational
+
+#pragma once
+
+// Dependencies
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_relational extension included")
+#endif
+
+namespace glm
+{
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam genType Floating-point or integer scalar types
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon);
+
+ /// Returns the component-wise comparison of |x - y| >= epsilon.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam genType Floating-point or integer scalar types
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon);
+
+ /// Returns the component-wise comparison between two scalars in term of ULPs.
+ /// True if this expression is satisfied.
+ ///
+ /// @param x First operand.
+ /// @param y Second operand.
+ /// @param ULPs Maximum difference in ULPs between the two operators to consider them equal.
+ ///
+ /// @tparam genType Floating-point or integer scalar types
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int ULPs);
+
+ /// Returns the component-wise comparison between two scalars in term of ULPs.
+ /// True if this expression is not satisfied.
+ ///
+ /// @param x First operand.
+ /// @param y Second operand.
+ /// @param ULPs Maximum difference in ULPs between the two operators to consider them not equal.
+ ///
+ /// @tparam genType Floating-point or integer scalar types
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs);
+
+ /// @}
+}//namespace glm
+
+#include "scalar_relational.inl"
diff --git a/3rdparty/glm/source/glm/ext/scalar_relational.inl b/3rdparty/glm/source/glm/ext/scalar_relational.inl
new file mode 100644
index 0000000..c85583e
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_relational.inl
@@ -0,0 +1,40 @@
+#include "../common.hpp"
+#include "../ext/scalar_int_sized.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+#include "../detail/type_float.hpp"
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon)
+ {
+ return abs(x - y) <= epsilon;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon)
+ {
+ return abs(x - y) > epsilon;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int MaxULPs)
+ {
+ detail::float_t<genType> const a(x);
+ detail::float_t<genType> const b(y);
+
+ // Different signs means they do not match.
+ if(a.negative() != b.negative())
+ return false;
+
+ // Find the difference in ULPs.
+ typename detail::float_t<genType>::int_type const DiffULPs = abs(a.i - b.i);
+ return DiffULPs <= MaxULPs;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs)
+ {
+ return !equal(x, y, ULPs);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/scalar_uint_sized.hpp b/3rdparty/glm/source/glm/ext/scalar_uint_sized.hpp
new file mode 100644
index 0000000..fd5267f
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_uint_sized.hpp
@@ -0,0 +1,70 @@
+/// @ref ext_scalar_uint_sized
+/// @file glm/ext/scalar_uint_sized.hpp
+///
+/// @defgroup ext_scalar_uint_sized GLM_EXT_scalar_uint_sized
+/// @ingroup ext
+///
+/// Exposes sized unsigned integer scalar types.
+///
+/// Include <glm/ext/scalar_uint_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_int_sized
+
+#pragma once
+
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_uint_sized extension included")
+#endif
+
+namespace glm{
+namespace detail
+{
+# if GLM_HAS_EXTENDED_INTEGER_TYPE
+ typedef std::uint8_t uint8;
+ typedef std::uint16_t uint16;
+ typedef std::uint32_t uint32;
+# else
+ typedef unsigned char uint8;
+ typedef unsigned short uint16;
+ typedef unsigned int uint32;
+#endif
+
+ template<>
+ struct is_int<uint8>
+ {
+ enum test {value = ~0};
+ };
+
+ template<>
+ struct is_int<uint16>
+ {
+ enum test {value = ~0};
+ };
+
+ template<>
+ struct is_int<uint64>
+ {
+ enum test {value = ~0};
+ };
+}//namespace detail
+
+
+ /// @addtogroup ext_scalar_uint_sized
+ /// @{
+
+ /// 8 bit unsigned integer type.
+ typedef detail::uint8 uint8;
+
+ /// 16 bit unsigned integer type.
+ typedef detail::uint16 uint16;
+
+ /// 32 bit unsigned integer type.
+ typedef detail::uint32 uint32;
+
+ /// 64 bit unsigned integer type.
+ typedef detail::uint64 uint64;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/scalar_ulp.hpp b/3rdparty/glm/source/glm/ext/scalar_ulp.hpp
new file mode 100644
index 0000000..941ada3
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_ulp.hpp
@@ -0,0 +1,74 @@
+/// @ref ext_scalar_ulp
+/// @file glm/ext/scalar_ulp.hpp
+///
+/// @defgroup ext_scalar_ulp GLM_EXT_scalar_ulp
+/// @ingroup ext
+///
+/// Allow the measurement of the accuracy of a function against a reference
+/// implementation. This extension works on floating-point data and provide results
+/// in ULP.
+///
+/// Include <glm/ext/scalar_ulp.hpp> to use the features of this extension.
+///
+/// @see ext_vector_ulp
+/// @see ext_scalar_relational
+
+#pragma once
+
+// Dependencies
+#include "../ext/scalar_int_sized.hpp"
+#include "../common.hpp"
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_scalar_ulp extension included")
+#endif
+
+namespace glm
+{
+ /// Return the next ULP value(s) after the input value(s).
+ ///
+ /// @tparam genType A floating-point scalar type.
+ ///
+ /// @see ext_scalar_ulp
+ template<typename genType>
+ GLM_FUNC_DECL genType nextFloat(genType x);
+
+ /// Return the previous ULP value(s) before the input value(s).
+ ///
+ /// @tparam genType A floating-point scalar type.
+ ///
+ /// @see ext_scalar_ulp
+ template<typename genType>
+ GLM_FUNC_DECL genType prevFloat(genType x);
+
+ /// Return the value(s) ULP distance after the input value(s).
+ ///
+ /// @tparam genType A floating-point scalar type.
+ ///
+ /// @see ext_scalar_ulp
+ template<typename genType>
+ GLM_FUNC_DECL genType nextFloat(genType x, int ULPs);
+
+ /// Return the value(s) ULP distance before the input value(s).
+ ///
+ /// @tparam genType A floating-point scalar type.
+ ///
+ /// @see ext_scalar_ulp
+ template<typename genType>
+ GLM_FUNC_DECL genType prevFloat(genType x, int ULPs);
+
+ /// Return the distance in the number of ULP between 2 single-precision floating-point scalars.
+ ///
+ /// @see ext_scalar_ulp
+ GLM_FUNC_DECL int floatDistance(float x, float y);
+
+ /// Return the distance in the number of ULP between 2 double-precision floating-point scalars.
+ ///
+ /// @see ext_scalar_ulp
+ GLM_FUNC_DECL int64 floatDistance(double x, double y);
+
+ /// @}
+}//namespace glm
+
+#include "scalar_ulp.inl"
diff --git a/3rdparty/glm/source/glm/ext/scalar_ulp.inl b/3rdparty/glm/source/glm/ext/scalar_ulp.inl
new file mode 100644
index 0000000..308df15
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/scalar_ulp.inl
@@ -0,0 +1,284 @@
+/// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+///
+/// Developed at SunPro, a Sun Microsystems, Inc. business.
+/// Permission to use, copy, modify, and distribute this
+/// software is freely granted, provided that this notice
+/// is preserved.
+
+#include "../detail/type_float.hpp"
+#include "../ext/scalar_constants.hpp"
+#include <cmath>
+#include <cfloat>
+
+#if(GLM_COMPILER & GLM_COMPILER_VC)
+# pragma warning(push)
+# pragma warning(disable : 4127)
+#endif
+
+typedef union
+{
+ float value;
+ /* FIXME: Assumes 32 bit int. */
+ unsigned int word;
+} ieee_float_shape_type;
+
+typedef union
+{
+ double value;
+ struct
+ {
+ int lsw;
+ int msw;
+ } parts;
+} ieee_double_shape_type;
+
+#define GLM_EXTRACT_WORDS(ix0,ix1,d) \
+ do { \
+ ieee_double_shape_type ew_u; \
+ ew_u.value = (d); \
+ (ix0) = ew_u.parts.msw; \
+ (ix1) = ew_u.parts.lsw; \
+ } while (0)
+
+#define GLM_GET_FLOAT_WORD(i,d) \
+ do { \
+ ieee_float_shape_type gf_u; \
+ gf_u.value = (d); \
+ (i) = gf_u.word; \
+ } while (0)
+
+#define GLM_SET_FLOAT_WORD(d,i) \
+ do { \
+ ieee_float_shape_type sf_u; \
+ sf_u.word = (i); \
+ (d) = sf_u.value; \
+ } while (0)
+
+#define GLM_INSERT_WORDS(d,ix0,ix1) \
+ do { \
+ ieee_double_shape_type iw_u; \
+ iw_u.parts.msw = (ix0); \
+ iw_u.parts.lsw = (ix1); \
+ (d) = iw_u.value; \
+ } while (0)
+
+namespace glm{
+namespace detail
+{
+ GLM_FUNC_QUALIFIER float nextafterf(float x, float y)
+ {
+ volatile float t;
+ int hx, hy, ix, iy;
+
+ GLM_GET_FLOAT_WORD(hx, x);
+ GLM_GET_FLOAT_WORD(hy, y);
+ ix = hx & 0x7fffffff; // |x|
+ iy = hy & 0x7fffffff; // |y|
+
+ if((ix > 0x7f800000) || // x is nan
+ (iy > 0x7f800000)) // y is nan
+ return x + y;
+ if(abs(y - x) <= epsilon<float>())
+ return y; // x=y, return y
+ if(ix == 0)
+ { // x == 0
+ GLM_SET_FLOAT_WORD(x, (hy & 0x80000000) | 1);// return +-minsubnormal
+ t = x * x;
+ if(abs(t - x) <= epsilon<float>())
+ return t;
+ else
+ return x; // raise underflow flag
+ }
+ if(hx >= 0)
+ { // x > 0
+ if(hx > hy) // x > y, x -= ulp
+ hx -= 1;
+ else // x < y, x += ulp
+ hx += 1;
+ }
+ else
+ { // x < 0
+ if(hy >= 0 || hx > hy) // x < y, x -= ulp
+ hx -= 1;
+ else // x > y, x += ulp
+ hx += 1;
+ }
+ hy = hx & 0x7f800000;
+ if(hy >= 0x7f800000)
+ return x + x; // overflow
+ if(hy < 0x00800000) // underflow
+ {
+ t = x * x;
+ if(abs(t - x) > epsilon<float>())
+ { // raise underflow flag
+ GLM_SET_FLOAT_WORD(y, hx);
+ return y;
+ }
+ }
+ GLM_SET_FLOAT_WORD(x, hx);
+ return x;
+ }
+
+ GLM_FUNC_QUALIFIER double nextafter(double x, double y)
+ {
+ volatile double t;
+ int hx, hy, ix, iy;
+ unsigned int lx, ly;
+
+ GLM_EXTRACT_WORDS(hx, lx, x);
+ GLM_EXTRACT_WORDS(hy, ly, y);
+ ix = hx & 0x7fffffff; // |x|
+ iy = hy & 0x7fffffff; // |y|
+
+ if(((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0) || // x is nan
+ ((iy >= 0x7ff00000) && ((iy - 0x7ff00000) | ly) != 0)) // y is nan
+ return x + y;
+ if(abs(y - x) <= epsilon<double>())
+ return y; // x=y, return y
+ if((ix | lx) == 0)
+ { // x == 0
+ GLM_INSERT_WORDS(x, hy & 0x80000000, 1); // return +-minsubnormal
+ t = x * x;
+ if(abs(t - x) <= epsilon<double>())
+ return t;
+ else
+ return x; // raise underflow flag
+ }
+ if(hx >= 0) { // x > 0
+ if(hx > hy || ((hx == hy) && (lx > ly))) { // x > y, x -= ulp
+ if(lx == 0) hx -= 1;
+ lx -= 1;
+ }
+ else { // x < y, x += ulp
+ lx += 1;
+ if(lx == 0) hx += 1;
+ }
+ }
+ else { // x < 0
+ if(hy >= 0 || hx > hy || ((hx == hy) && (lx > ly))){// x < y, x -= ulp
+ if(lx == 0) hx -= 1;
+ lx -= 1;
+ }
+ else { // x > y, x += ulp
+ lx += 1;
+ if(lx == 0) hx += 1;
+ }
+ }
+ hy = hx & 0x7ff00000;
+ if(hy >= 0x7ff00000)
+ return x + x; // overflow
+ if(hy < 0x00100000)
+ { // underflow
+ t = x * x;
+ if(abs(t - x) > epsilon<double>())
+ { // raise underflow flag
+ GLM_INSERT_WORDS(y, hx, lx);
+ return y;
+ }
+ }
+ GLM_INSERT_WORDS(x, hx, lx);
+ return x;
+ }
+}//namespace detail
+}//namespace glm
+
+#if(GLM_COMPILER & GLM_COMPILER_VC)
+# pragma warning(pop)
+#endif
+
+namespace glm
+{
+ template<>
+ GLM_FUNC_QUALIFIER float nextFloat(float x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::nextafter(x, std::numeric_limits<float>::max());
+# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)))
+ return detail::nextafterf(x, FLT_MAX);
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+ return __builtin_nextafterf(x, FLT_MAX);
+# else
+ return nextafterf(x, FLT_MAX);
+# endif
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER double nextFloat(double x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::nextafter(x, std::numeric_limits<double>::max());
+# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)))
+ return detail::nextafter(x, std::numeric_limits<double>::max());
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+ return __builtin_nextafter(x, DBL_MAX);
+# else
+ return nextafter(x, DBL_MAX);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T nextFloat(T x, int ULPs)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'next_float' only accept floating-point input");
+ assert(ULPs >= 0);
+
+ T temp = x;
+ for(int i = 0; i < ULPs; ++i)
+ temp = nextFloat(temp);
+ return temp;
+ }
+
+ GLM_FUNC_QUALIFIER float prevFloat(float x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::nextafter(x, std::numeric_limits<float>::min());
+# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)))
+ return detail::nextafterf(x, FLT_MIN);
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+ return __builtin_nextafterf(x, FLT_MIN);
+# else
+ return nextafterf(x, FLT_MIN);
+# endif
+ }
+
+ GLM_FUNC_QUALIFIER double prevFloat(double x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::nextafter(x, std::numeric_limits<double>::min());
+# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)))
+ return _nextafter(x, DBL_MIN);
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+ return __builtin_nextafter(x, DBL_MIN);
+# else
+ return nextafter(x, DBL_MIN);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T prevFloat(T x, int ULPs)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'prev_float' only accept floating-point input");
+ assert(ULPs >= 0);
+
+ T temp = x;
+ for(int i = 0; i < ULPs; ++i)
+ temp = prevFloat(temp);
+ return temp;
+ }
+
+ GLM_FUNC_QUALIFIER int floatDistance(float x, float y)
+ {
+ detail::float_t<float> const a(x);
+ detail::float_t<float> const b(y);
+
+ return abs(a.i - b.i);
+ }
+
+ GLM_FUNC_QUALIFIER int64 floatDistance(double x, double y)
+ {
+ detail::float_t<double> const a(x);
+ detail::float_t<double> const b(y);
+
+ return abs(a.i - b.i);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_bool1.hpp b/3rdparty/glm/source/glm/ext/vector_bool1.hpp
new file mode 100644
index 0000000..002c320
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_bool1.hpp
@@ -0,0 +1,30 @@
+/// @ref ext_vector_bool1
+/// @file glm/ext/vector_bool1.hpp
+///
+/// @defgroup ext_vector_bool1 GLM_EXT_vector_bool1
+/// @ingroup ext
+///
+/// Exposes bvec1 vector type.
+///
+/// Include <glm/ext/vector_bool1.hpp> to use the features of this extension.
+///
+/// @see ext_vector_bool1_precision extension.
+
+#pragma once
+
+#include "../detail/type_vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_bool1 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_bool1
+ /// @{
+
+ /// 1 components vector of boolean.
+ typedef vec<1, bool, defaultp> bvec1;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_bool1_precision.hpp b/3rdparty/glm/source/glm/ext/vector_bool1_precision.hpp
new file mode 100644
index 0000000..e62d3cf
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_bool1_precision.hpp
@@ -0,0 +1,34 @@
+/// @ref ext_vector_bool1_precision
+/// @file glm/ext/vector_bool1_precision.hpp
+///
+/// @defgroup ext_vector_bool1_precision GLM_EXT_vector_bool1_precision
+/// @ingroup ext
+///
+/// Exposes highp_bvec1, mediump_bvec1 and lowp_bvec1 types.
+///
+/// Include <glm/ext/vector_bool1_precision.hpp> to use the features of this extension.
+
+#pragma once
+
+#include "../detail/type_vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_bool1_precision extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_bool1_precision
+ /// @{
+
+ /// 1 component vector of bool values.
+ typedef vec<1, bool, highp> highp_bvec1;
+
+ /// 1 component vector of bool values.
+ typedef vec<1, bool, mediump> mediump_bvec1;
+
+ /// 1 component vector of bool values.
+ typedef vec<1, bool, lowp> lowp_bvec1;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_bool2.hpp b/3rdparty/glm/source/glm/ext/vector_bool2.hpp
new file mode 100644
index 0000000..52288b7
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_bool2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_bool2.hpp
+
+#pragma once
+#include "../detail/type_vec2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 2 components vector of boolean.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<2, bool, defaultp> bvec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_bool2_precision.hpp b/3rdparty/glm/source/glm/ext/vector_bool2_precision.hpp
new file mode 100644
index 0000000..4370933
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_bool2_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/vector_bool2_precision.hpp
+
+#pragma once
+#include "../detail/type_vec2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 2 components vector of high qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, bool, highp> highp_bvec2;
+
+ /// 2 components vector of medium qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, bool, mediump> mediump_bvec2;
+
+ /// 2 components vector of low qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, bool, lowp> lowp_bvec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_bool3.hpp b/3rdparty/glm/source/glm/ext/vector_bool3.hpp
new file mode 100644
index 0000000..90a0b7e
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_bool3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_bool3.hpp
+
+#pragma once
+#include "../detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 3 components vector of boolean.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<3, bool, defaultp> bvec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_bool3_precision.hpp b/3rdparty/glm/source/glm/ext/vector_bool3_precision.hpp
new file mode 100644
index 0000000..89cd2d3
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_bool3_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/vector_bool3_precision.hpp
+
+#pragma once
+#include "../detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 3 components vector of high qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, bool, highp> highp_bvec3;
+
+ /// 3 components vector of medium qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, bool, mediump> mediump_bvec3;
+
+ /// 3 components vector of low qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, bool, lowp> lowp_bvec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_bool4.hpp b/3rdparty/glm/source/glm/ext/vector_bool4.hpp
new file mode 100644
index 0000000..18aa71b
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_bool4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_bool4.hpp
+
+#pragma once
+#include "../detail/type_vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 4 components vector of boolean.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<4, bool, defaultp> bvec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_bool4_precision.hpp b/3rdparty/glm/source/glm/ext/vector_bool4_precision.hpp
new file mode 100644
index 0000000..79786e5
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_bool4_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/vector_bool4_precision.hpp
+
+#pragma once
+#include "../detail/type_vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 4 components vector of high qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, bool, highp> highp_bvec4;
+
+ /// 4 components vector of medium qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, bool, mediump> mediump_bvec4;
+
+ /// 4 components vector of low qualifier bool numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, bool, lowp> lowp_bvec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_common.hpp b/3rdparty/glm/source/glm/ext/vector_common.hpp
new file mode 100644
index 0000000..c0a2858
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_common.hpp
@@ -0,0 +1,228 @@
+/// @ref ext_vector_common
+/// @file glm/ext/vector_common.hpp
+///
+/// @defgroup ext_vector_common GLM_EXT_vector_common
+/// @ingroup ext
+///
+/// Exposes min and max functions for 3 to 4 vector parameters.
+///
+/// Include <glm/ext/vector_common.hpp> to use the features of this extension.
+///
+/// @see core_common
+/// @see ext_scalar_common
+
+#pragma once
+
+// Dependency:
+#include "../ext/scalar_common.hpp"
+#include "../common.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_common extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_common
+ /// @{
+
+ /// Return the minimum component-wise values of 3 inputs
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c);
+
+ /// Return the minimum component-wise values of 4 inputs
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c, vec<L, T, Q> const& d);
+
+ /// Return the maximum component-wise values of 3 inputs
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& z);
+
+ /// Return the maximum component-wise values of 4 inputs
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> max( vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& z, vec<L, T, Q> const& w);
+
+ /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmin">std::fmin documentation</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmin(vec<L, T, Q> const& x, T y);
+
+ /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmin">std::fmin documentation</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmin(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmin">std::fmin documentation</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmin(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c);
+
+ /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmin">std::fmin documentation</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmin(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c, vec<L, T, Q> const& d);
+
+ /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmax">std::fmax documentation</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmax(vec<L, T, Q> const& a, T b);
+
+ /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmax">std::fmax documentation</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmax(vec<L, T, Q> const& a, vec<L, T, Q> const& b);
+
+ /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmax">std::fmax documentation</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmax(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c);
+
+ /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://en.cppreference.com/w/cpp/numeric/math/fmax">std::fmax documentation</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmax(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c, vec<L, T, Q> const& d);
+
+ /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_common
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fclamp(vec<L, T, Q> const& x, T minVal, T maxVal);
+
+ /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_common
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fclamp(vec<L, T, Q> const& x, vec<L, T, Q> const& minVal, vec<L, T, Q> const& maxVal);
+
+ /// Simulate GL_CLAMP OpenGL wrap mode
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_common extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> clamp(vec<L, T, Q> const& Texcoord);
+
+ /// Simulate GL_REPEAT OpenGL wrap mode
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_common extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> repeat(vec<L, T, Q> const& Texcoord);
+
+ /// Simulate GL_MIRRORED_REPEAT OpenGL wrap mode
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_common extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> mirrorClamp(vec<L, T, Q> const& Texcoord);
+
+ /// Simulate GL_MIRROR_REPEAT OpenGL wrap mode
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_common extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> mirrorRepeat(vec<L, T, Q> const& Texcoord);
+
+ /// Returns a value equal to the nearest integer to x.
+ /// The fraction 0.5 will round in a direction chosen by the
+ /// implementation, presumably the direction that is fastest.
+ ///
+ /// @param x The values of the argument must be greater or equal to zero.
+ /// @tparam T floating point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/round.xml">GLSL round man page</a>
+ /// @see ext_vector_common extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int, Q> iround(vec<L, T, Q> const& x);
+
+ /// Returns a value equal to the nearest integer to x.
+ /// The fraction 0.5 will round in a direction chosen by the
+ /// implementation, presumably the direction that is fastest.
+ ///
+ /// @param x The values of the argument must be greater or equal to zero.
+ /// @tparam T floating point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/round.xml">GLSL round man page</a>
+ /// @see ext_vector_common extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, uint, Q> uround(vec<L, T, Q> const& x);
+
+ /// @}
+}//namespace glm
+
+#include "vector_common.inl"
diff --git a/3rdparty/glm/source/glm/ext/vector_common.inl b/3rdparty/glm/source/glm/ext/vector_common.inl
new file mode 100644
index 0000000..67817fc
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_common.inl
@@ -0,0 +1,147 @@
+#include "../detail/_vectorize.hpp"
+
+namespace glm
+{
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& z)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || std::numeric_limits<T>::is_integer, "'min' only accept floating-point or integer inputs");
+ return glm::min(glm::min(x, y), z);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& z, vec<L, T, Q> const& w)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || std::numeric_limits<T>::is_integer, "'min' only accept floating-point or integer inputs");
+ return glm::min(glm::min(x, y), glm::min(z, w));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& z)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || std::numeric_limits<T>::is_integer, "'max' only accept floating-point or integer inputs");
+ return glm::max(glm::max(x, y), z);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& z, vec<L, T, Q> const& w)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559 || std::numeric_limits<T>::is_integer, "'max' only accept floating-point or integer inputs");
+ return glm::max(glm::max(x, y), glm::max(z, w));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmin(vec<L, T, Q> const& a, T b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmin' only accept floating-point inputs");
+ return detail::functor2<vec, L, T, Q>::call(fmin, a, vec<L, T, Q>(b));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmin(vec<L, T, Q> const& a, vec<L, T, Q> const& b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmin' only accept floating-point inputs");
+ return detail::functor2<vec, L, T, Q>::call(fmin, a, b);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmin(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmin' only accept floating-point inputs");
+ return fmin(fmin(a, b), c);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmin(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c, vec<L, T, Q> const& d)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmin' only accept floating-point inputs");
+ return fmin(fmin(a, b), fmin(c, d));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmax(vec<L, T, Q> const& a, T b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmax' only accept floating-point inputs");
+ return detail::functor2<vec, L, T, Q>::call(fmax, a, vec<L, T, Q>(b));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmax(vec<L, T, Q> const& a, vec<L, T, Q> const& b)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmax' only accept floating-point inputs");
+ return detail::functor2<vec, L, T, Q>::call(fmax, a, b);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmax(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmax' only accept floating-point inputs");
+ return fmax(fmax(a, b), c);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmax(vec<L, T, Q> const& a, vec<L, T, Q> const& b, vec<L, T, Q> const& c, vec<L, T, Q> const& d)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fmax' only accept floating-point inputs");
+ return fmax(fmax(a, b), fmax(c, d));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fclamp(vec<L, T, Q> const& x, T minVal, T maxVal)
+ {
+ return fmin(fmax(x, vec<L, T, Q>(minVal)), vec<L, T, Q>(maxVal));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fclamp(vec<L, T, Q> const& x, vec<L, T, Q> const& minVal, vec<L, T, Q> const& maxVal)
+ {
+ return fmin(fmax(x, minVal), maxVal);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> clamp(vec<L, T, Q> const& Texcoord)
+ {
+ return glm::clamp(Texcoord, vec<L, T, Q>(0), vec<L, T, Q>(1));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> repeat(vec<L, T, Q> const& Texcoord)
+ {
+ return glm::fract(Texcoord);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> mirrorClamp(vec<L, T, Q> const& Texcoord)
+ {
+ return glm::fract(glm::abs(Texcoord));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> mirrorRepeat(vec<L, T, Q> const& Texcoord)
+ {
+ vec<L, T, Q> const Abs = glm::abs(Texcoord);
+ vec<L, T, Q> const Clamp = glm::mod(glm::floor(Abs), vec<L, T, Q>(2));
+ vec<L, T, Q> const Floor = glm::floor(Abs);
+ vec<L, T, Q> const Rest = Abs - Floor;
+ vec<L, T, Q> const Mirror = Clamp + Rest;
+ return mix(Rest, vec<L, T, Q>(1) - Rest, glm::greaterThanEqual(Mirror, vec<L, T, Q>(1)));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int, Q> iround(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'iround' only accept floating-point inputs");
+ assert(all(lessThanEqual(vec<L, T, Q>(0), x)));
+
+ return vec<L, int, Q>(x + static_cast<T>(0.5));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, uint, Q> uround(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'uround' only accept floating-point inputs");
+ assert(all(lessThanEqual(vec<L, T, Q>(0), x)));
+
+ return vec<L, uint, Q>(x + static_cast<T>(0.5));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_double1.hpp b/3rdparty/glm/source/glm/ext/vector_double1.hpp
new file mode 100644
index 0000000..3882667
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_double1.hpp
@@ -0,0 +1,31 @@
+/// @ref ext_vector_double1
+/// @file glm/ext/vector_double1.hpp
+///
+/// @defgroup ext_vector_double1 GLM_EXT_vector_double1
+/// @ingroup ext
+///
+/// Exposes double-precision floating point vector type with one component.
+///
+/// Include <glm/ext/vector_double1.hpp> to use the features of this extension.
+///
+/// @see ext_vector_double1_precision extension.
+/// @see ext_vector_float1 extension.
+
+#pragma once
+
+#include "../detail/type_vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_double1 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_double1
+ /// @{
+
+ /// 1 components vector of double-precision floating-point numbers.
+ typedef vec<1, double, defaultp> dvec1;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_double1_precision.hpp b/3rdparty/glm/source/glm/ext/vector_double1_precision.hpp
new file mode 100644
index 0000000..1d47195
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_double1_precision.hpp
@@ -0,0 +1,36 @@
+/// @ref ext_vector_double1_precision
+/// @file glm/ext/vector_double1_precision.hpp
+///
+/// @defgroup ext_vector_double1_precision GLM_EXT_vector_double1_precision
+/// @ingroup ext
+///
+/// Exposes highp_dvec1, mediump_dvec1 and lowp_dvec1 types.
+///
+/// Include <glm/ext/vector_double1_precision.hpp> to use the features of this extension.
+///
+/// @see ext_vector_double1
+
+#pragma once
+
+#include "../detail/type_vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_double1_precision extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_double1_precision
+ /// @{
+
+ /// 1 component vector of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<1, double, highp> highp_dvec1;
+
+ /// 1 component vector of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<1, double, mediump> mediump_dvec1;
+
+ /// 1 component vector of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<1, double, lowp> lowp_dvec1;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_double2.hpp b/3rdparty/glm/source/glm/ext/vector_double2.hpp
new file mode 100644
index 0000000..60e3577
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_double2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_double2.hpp
+
+#pragma once
+#include "../detail/type_vec2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 2 components vector of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<2, double, defaultp> dvec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_double2_precision.hpp b/3rdparty/glm/source/glm/ext/vector_double2_precision.hpp
new file mode 100644
index 0000000..fa53940
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_double2_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/vector_double2_precision.hpp
+
+#pragma once
+#include "../detail/type_vec2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 2 components vector of high double-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, double, highp> highp_dvec2;
+
+ /// 2 components vector of medium double-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, double, mediump> mediump_dvec2;
+
+ /// 2 components vector of low double-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, double, lowp> lowp_dvec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_double3.hpp b/3rdparty/glm/source/glm/ext/vector_double3.hpp
new file mode 100644
index 0000000..6dfe4c6
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_double3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_double3.hpp
+
+#pragma once
+#include "../detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 3 components vector of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<3, double, defaultp> dvec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_double3_precision.hpp b/3rdparty/glm/source/glm/ext/vector_double3_precision.hpp
new file mode 100644
index 0000000..a8cfa37
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_double3_precision.hpp
@@ -0,0 +1,34 @@
+/// @ref core
+/// @file glm/ext/vector_double3_precision.hpp
+
+#pragma once
+#include "../detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 3 components vector of high double-qualifier floating-point numbers.
+ /// There is no guarantee on the actual qualifier.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, double, highp> highp_dvec3;
+
+ /// 3 components vector of medium double-qualifier floating-point numbers.
+ /// There is no guarantee on the actual qualifier.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, double, mediump> mediump_dvec3;
+
+ /// 3 components vector of low double-qualifier floating-point numbers.
+ /// There is no guarantee on the actual qualifier.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, double, lowp> lowp_dvec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_double4.hpp b/3rdparty/glm/source/glm/ext/vector_double4.hpp
new file mode 100644
index 0000000..87f225f
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_double4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_double4.hpp
+
+#pragma once
+#include "../detail/type_vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 4 components vector of double-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<4, double, defaultp> dvec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_double4_precision.hpp b/3rdparty/glm/source/glm/ext/vector_double4_precision.hpp
new file mode 100644
index 0000000..09cafa1
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_double4_precision.hpp
@@ -0,0 +1,35 @@
+/// @ref core
+/// @file glm/ext/vector_double4_precision.hpp
+
+#pragma once
+#include "../detail/setup.hpp"
+#include "../detail/type_vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 4 components vector of high double-qualifier floating-point numbers.
+ /// There is no guarantee on the actual qualifier.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, double, highp> highp_dvec4;
+
+ /// 4 components vector of medium double-qualifier floating-point numbers.
+ /// There is no guarantee on the actual qualifier.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, double, mediump> mediump_dvec4;
+
+ /// 4 components vector of low double-qualifier floating-point numbers.
+ /// There is no guarantee on the actual qualifier.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, double, lowp> lowp_dvec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_float1.hpp b/3rdparty/glm/source/glm/ext/vector_float1.hpp
new file mode 100644
index 0000000..28acc2c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_float1.hpp
@@ -0,0 +1,31 @@
+/// @ref ext_vector_float1
+/// @file glm/ext/vector_float1.hpp
+///
+/// @defgroup ext_vector_float1 GLM_EXT_vector_float1
+/// @ingroup ext
+///
+/// Exposes single-precision floating point vector type with one component.
+///
+/// Include <glm/ext/vector_float1.hpp> to use the features of this extension.
+///
+/// @see ext_vector_float1_precision extension.
+/// @see ext_vector_double1 extension.
+
+#pragma once
+
+#include "../detail/type_vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_float1 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_float1
+ /// @{
+
+ /// 1 components vector of single-precision floating-point numbers.
+ typedef vec<1, float, defaultp> vec1;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_float1_precision.hpp b/3rdparty/glm/source/glm/ext/vector_float1_precision.hpp
new file mode 100644
index 0000000..6e8dad8
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_float1_precision.hpp
@@ -0,0 +1,36 @@
+/// @ref ext_vector_float1_precision
+/// @file glm/ext/vector_float1_precision.hpp
+///
+/// @defgroup ext_vector_float1_precision GLM_EXT_vector_float1_precision
+/// @ingroup ext
+///
+/// Exposes highp_vec1, mediump_vec1 and lowp_vec1 types.
+///
+/// Include <glm/ext/vector_float1_precision.hpp> to use the features of this extension.
+///
+/// @see ext_vector_float1 extension.
+
+#pragma once
+
+#include "../detail/type_vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_float1_precision extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_float1_precision
+ /// @{
+
+ /// 1 component vector of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<1, float, highp> highp_vec1;
+
+ /// 1 component vector of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<1, float, mediump> mediump_vec1;
+
+ /// 1 component vector of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<1, float, lowp> lowp_vec1;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_float2.hpp b/3rdparty/glm/source/glm/ext/vector_float2.hpp
new file mode 100644
index 0000000..d31545d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_float2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_float2.hpp
+
+#pragma once
+#include "../detail/type_vec2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 2 components vector of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<2, float, defaultp> vec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_float2_precision.hpp b/3rdparty/glm/source/glm/ext/vector_float2_precision.hpp
new file mode 100644
index 0000000..23c0820
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_float2_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/vector_float2_precision.hpp
+
+#pragma once
+#include "../detail/type_vec2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 2 components vector of high single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, float, highp> highp_vec2;
+
+ /// 2 components vector of medium single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, float, mediump> mediump_vec2;
+
+ /// 2 components vector of low single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<2, float, lowp> lowp_vec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_float3.hpp b/3rdparty/glm/source/glm/ext/vector_float3.hpp
new file mode 100644
index 0000000..cd79a62
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_float3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_float3.hpp
+
+#pragma once
+#include "../detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 3 components vector of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<3, float, defaultp> vec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_float3_precision.hpp b/3rdparty/glm/source/glm/ext/vector_float3_precision.hpp
new file mode 100644
index 0000000..be640b5
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_float3_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/vector_float3_precision.hpp
+
+#pragma once
+#include "../detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 3 components vector of high single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, float, highp> highp_vec3;
+
+ /// 3 components vector of medium single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, float, mediump> mediump_vec3;
+
+ /// 3 components vector of low single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<3, float, lowp> lowp_vec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_float4.hpp b/3rdparty/glm/source/glm/ext/vector_float4.hpp
new file mode 100644
index 0000000..d84adcc
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_float4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_float4.hpp
+
+#pragma once
+#include "../detail/type_vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 4 components vector of single-precision floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<4, float, defaultp> vec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_float4_precision.hpp b/3rdparty/glm/source/glm/ext/vector_float4_precision.hpp
new file mode 100644
index 0000000..aede838
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_float4_precision.hpp
@@ -0,0 +1,31 @@
+/// @ref core
+/// @file glm/ext/vector_float4_precision.hpp
+
+#pragma once
+#include "../detail/type_vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector_precision
+ /// @{
+
+ /// 4 components vector of high single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, float, highp> highp_vec4;
+
+ /// 4 components vector of medium single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, float, mediump> mediump_vec4;
+
+ /// 4 components vector of low single-qualifier floating-point numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a>
+ typedef vec<4, float, lowp> lowp_vec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_int1.hpp b/3rdparty/glm/source/glm/ext/vector_int1.hpp
new file mode 100644
index 0000000..dc86038
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_int1.hpp
@@ -0,0 +1,32 @@
+/// @ref ext_vector_int1
+/// @file glm/ext/vector_int1.hpp
+///
+/// @defgroup ext_vector_int1 GLM_EXT_vector_int1
+/// @ingroup ext
+///
+/// Exposes ivec1 vector type.
+///
+/// Include <glm/ext/vector_int1.hpp> to use the features of this extension.
+///
+/// @see ext_vector_uint1 extension.
+/// @see ext_vector_int1_precision extension.
+
+#pragma once
+
+#include "../detail/type_vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_int1 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_int1
+ /// @{
+
+ /// 1 component vector of signed integer numbers.
+ typedef vec<1, int, defaultp> ivec1;
+
+ /// @}
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/vector_int1_sized.hpp b/3rdparty/glm/source/glm/ext/vector_int1_sized.hpp
new file mode 100644
index 0000000..de0d4cf
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_int1_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_vector_int1_sized
+/// @file glm/ext/vector_int1_sized.hpp
+///
+/// @defgroup ext_vector_int1_sized GLM_EXT_vector_int1_sized
+/// @ingroup ext
+///
+/// Exposes sized signed integer vector types.
+///
+/// Include <glm/ext/vector_int1_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_int_sized
+/// @see ext_vector_uint1_sized
+
+#pragma once
+
+#include "../ext/vector_int1.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_int1_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_int1_sized
+ /// @{
+
+ /// 8 bit signed integer vector of 1 component type.
+ ///
+ /// @see ext_vector_int1_sized
+ typedef vec<1, int8, defaultp> i8vec1;
+
+ /// 16 bit signed integer vector of 1 component type.
+ ///
+ /// @see ext_vector_int1_sized
+ typedef vec<1, int16, defaultp> i16vec1;
+
+ /// 32 bit signed integer vector of 1 component type.
+ ///
+ /// @see ext_vector_int1_sized
+ typedef vec<1, int32, defaultp> i32vec1;
+
+ /// 64 bit signed integer vector of 1 component type.
+ ///
+ /// @see ext_vector_int1_sized
+ typedef vec<1, int64, defaultp> i64vec1;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_int2.hpp b/3rdparty/glm/source/glm/ext/vector_int2.hpp
new file mode 100644
index 0000000..aef803e
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_int2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_int2.hpp
+
+#pragma once
+#include "../detail/type_vec2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 2 components vector of signed integer numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<2, int, defaultp> ivec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_int2_sized.hpp b/3rdparty/glm/source/glm/ext/vector_int2_sized.hpp
new file mode 100644
index 0000000..1fd57ee
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_int2_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_vector_int2_sized
+/// @file glm/ext/vector_int2_sized.hpp
+///
+/// @defgroup ext_vector_int2_sized GLM_EXT_vector_int2_sized
+/// @ingroup ext
+///
+/// Exposes sized signed integer vector of 2 components type.
+///
+/// Include <glm/ext/vector_int2_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_int_sized
+/// @see ext_vector_uint2_sized
+
+#pragma once
+
+#include "../ext/vector_int2.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_int2_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_int2_sized
+ /// @{
+
+ /// 8 bit signed integer vector of 2 components type.
+ ///
+ /// @see ext_vector_int2_sized
+ typedef vec<2, int8, defaultp> i8vec2;
+
+ /// 16 bit signed integer vector of 2 components type.
+ ///
+ /// @see ext_vector_int2_sized
+ typedef vec<2, int16, defaultp> i16vec2;
+
+ /// 32 bit signed integer vector of 2 components type.
+ ///
+ /// @see ext_vector_int2_sized
+ typedef vec<2, int32, defaultp> i32vec2;
+
+ /// 64 bit signed integer vector of 2 components type.
+ ///
+ /// @see ext_vector_int2_sized
+ typedef vec<2, int64, defaultp> i64vec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_int3.hpp b/3rdparty/glm/source/glm/ext/vector_int3.hpp
new file mode 100644
index 0000000..4767e61
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_int3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_int3.hpp
+
+#pragma once
+#include "../detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 3 components vector of signed integer numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<3, int, defaultp> ivec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_int3_sized.hpp b/3rdparty/glm/source/glm/ext/vector_int3_sized.hpp
new file mode 100644
index 0000000..085a3fe
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_int3_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_vector_int3_sized
+/// @file glm/ext/vector_int3_sized.hpp
+///
+/// @defgroup ext_vector_int3_sized GLM_EXT_vector_int3_sized
+/// @ingroup ext
+///
+/// Exposes sized signed integer vector of 3 components type.
+///
+/// Include <glm/ext/vector_int3_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_int_sized
+/// @see ext_vector_uint3_sized
+
+#pragma once
+
+#include "../ext/vector_int3.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_int3_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_int3_sized
+ /// @{
+
+ /// 8 bit signed integer vector of 3 components type.
+ ///
+ /// @see ext_vector_int3_sized
+ typedef vec<3, int8, defaultp> i8vec3;
+
+ /// 16 bit signed integer vector of 3 components type.
+ ///
+ /// @see ext_vector_int3_sized
+ typedef vec<3, int16, defaultp> i16vec3;
+
+ /// 32 bit signed integer vector of 3 components type.
+ ///
+ /// @see ext_vector_int3_sized
+ typedef vec<3, int32, defaultp> i32vec3;
+
+ /// 64 bit signed integer vector of 3 components type.
+ ///
+ /// @see ext_vector_int3_sized
+ typedef vec<3, int64, defaultp> i64vec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_int4.hpp b/3rdparty/glm/source/glm/ext/vector_int4.hpp
new file mode 100644
index 0000000..bb23adf
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_int4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_int4.hpp
+
+#pragma once
+#include "../detail/type_vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 4 components vector of signed integer numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<4, int, defaultp> ivec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_int4_sized.hpp b/3rdparty/glm/source/glm/ext/vector_int4_sized.hpp
new file mode 100644
index 0000000..c63d465
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_int4_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_vector_int4_sized
+/// @file glm/ext/vector_int4_sized.hpp
+///
+/// @defgroup ext_vector_int4_sized GLM_EXT_vector_int4_sized
+/// @ingroup ext
+///
+/// Exposes sized signed integer vector of 4 components type.
+///
+/// Include <glm/ext/vector_int4_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_int_sized
+/// @see ext_vector_uint4_sized
+
+#pragma once
+
+#include "../ext/vector_int4.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_int4_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_int4_sized
+ /// @{
+
+ /// 8 bit signed integer vector of 4 components type.
+ ///
+ /// @see ext_vector_int4_sized
+ typedef vec<4, int8, defaultp> i8vec4;
+
+ /// 16 bit signed integer vector of 4 components type.
+ ///
+ /// @see ext_vector_int4_sized
+ typedef vec<4, int16, defaultp> i16vec4;
+
+ /// 32 bit signed integer vector of 4 components type.
+ ///
+ /// @see ext_vector_int4_sized
+ typedef vec<4, int32, defaultp> i32vec4;
+
+ /// 64 bit signed integer vector of 4 components type.
+ ///
+ /// @see ext_vector_int4_sized
+ typedef vec<4, int64, defaultp> i64vec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_integer.hpp b/3rdparty/glm/source/glm/ext/vector_integer.hpp
new file mode 100644
index 0000000..1304dd8
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_integer.hpp
@@ -0,0 +1,149 @@
+/// @ref ext_vector_integer
+/// @file glm/ext/vector_integer.hpp
+///
+/// @see core (dependence)
+/// @see ext_vector_integer (dependence)
+///
+/// @defgroup ext_vector_integer GLM_EXT_vector_integer
+/// @ingroup ext
+///
+/// Include <glm/ext/vector_integer.hpp> to use the features of this extension.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../detail/_vectorize.hpp"
+#include "../vector_relational.hpp"
+#include "../common.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_integer extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_integer
+ /// @{
+
+ /// Return true if the value is a power of two number.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> isPowerOfTwo(vec<L, T, Q> const& v);
+
+ /// Return the power of two number which value is just higher the input value,
+ /// round up to a power of two.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> nextPowerOfTwo(vec<L, T, Q> const& v);
+
+ /// Return the power of two number which value is just lower the input value,
+ /// round down to a power of two.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prevPowerOfTwo(vec<L, T, Q> const& v);
+
+ /// Return true if the 'Value' is a multiple of 'Multiple'.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> isMultiple(vec<L, T, Q> const& v, T Multiple);
+
+ /// Return true if the 'Value' is a multiple of 'Multiple'.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> isMultiple(vec<L, T, Q> const& v, vec<L, T, Q> const& Multiple);
+
+ /// Higher multiple number of Source.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @param v Source values to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> nextMultiple(vec<L, T, Q> const& v, T Multiple);
+
+ /// Higher multiple number of Source.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @param v Source values to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> nextMultiple(vec<L, T, Q> const& v, vec<L, T, Q> const& Multiple);
+
+ /// Lower multiple number of Source.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @param v Source values to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prevMultiple(vec<L, T, Q> const& v, T Multiple);
+
+ /// Lower multiple number of Source.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed or unsigned integer scalar types.
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @param v Source values to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prevMultiple(vec<L, T, Q> const& v, vec<L, T, Q> const& Multiple);
+
+ /// Returns the bit number of the Nth significant bit set to
+ /// 1 in the binary representation of value.
+ /// If value bitcount is less than the Nth significant bit, -1 will be returned.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Signed or unsigned integer scalar types.
+ ///
+ /// @see ext_vector_integer
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int, Q> findNSB(vec<L, T, Q> const& Source, vec<L, int, Q> SignificantBitCount);
+
+ /// @}
+} //namespace glm
+
+#include "vector_integer.inl"
diff --git a/3rdparty/glm/source/glm/ext/vector_integer.inl b/3rdparty/glm/source/glm/ext/vector_integer.inl
new file mode 100644
index 0000000..cefb132
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_integer.inl
@@ -0,0 +1,85 @@
+#include "scalar_integer.hpp"
+
+namespace glm
+{
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> isPowerOfTwo(vec<L, T, Q> const& Value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'isPowerOfTwo' only accept integer inputs");
+
+ vec<L, T, Q> const Result(abs(Value));
+ return equal(Result & (Result - vec<L, T, Q>(1)), vec<L, T, Q>(0));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> nextPowerOfTwo(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'nextPowerOfTwo' only accept integer inputs");
+
+ return detail::compute_ceilPowerOfTwo<L, T, Q, std::numeric_limits<T>::is_signed>::call(v);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prevPowerOfTwo(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'prevPowerOfTwo' only accept integer inputs");
+
+ return detail::functor1<vec, L, T, T, Q>::call(prevPowerOfTwo, v);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> isMultiple(vec<L, T, Q> const& Value, T Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'isMultiple' only accept integer inputs");
+
+ return equal(Value % Multiple, vec<L, T, Q>(0));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> isMultiple(vec<L, T, Q> const& Value, vec<L, T, Q> const& Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'isMultiple' only accept integer inputs");
+
+ return equal(Value % Multiple, vec<L, T, Q>(0));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> nextMultiple(vec<L, T, Q> const& Source, T Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'nextMultiple' only accept integer inputs");
+
+ return detail::functor2<vec, L, T, Q>::call(nextMultiple, Source, vec<L, T, Q>(Multiple));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> nextMultiple(vec<L, T, Q> const& Source, vec<L, T, Q> const& Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'nextMultiple' only accept integer inputs");
+
+ return detail::functor2<vec, L, T, Q>::call(nextMultiple, Source, Multiple);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prevMultiple(vec<L, T, Q> const& Source, T Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'prevMultiple' only accept integer inputs");
+
+ return detail::functor2<vec, L, T, Q>::call(prevMultiple, Source, vec<L, T, Q>(Multiple));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prevMultiple(vec<L, T, Q> const& Source, vec<L, T, Q> const& Multiple)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'prevMultiple' only accept integer inputs");
+
+ return detail::functor2<vec, L, T, Q>::call(prevMultiple, Source, Multiple);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int, Q> findNSB(vec<L, T, Q> const& Source, vec<L, int, Q> SignificantBitCount)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'findNSB' only accept integer inputs");
+
+ return detail::functor2_vec_int<L, T, Q>::call(findNSB, Source, SignificantBitCount);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_packing.hpp b/3rdparty/glm/source/glm/ext/vector_packing.hpp
new file mode 100644
index 0000000..76e5d0c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_packing.hpp
@@ -0,0 +1,32 @@
+/// @ref ext_vector_packing
+/// @file glm/ext/vector_packing.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup ext_vector_packing GLM_EXT_vector_packing
+/// @ingroup ext
+///
+/// Include <glm/ext/vector_packing.hpp> to use the features of this extension.
+///
+/// This extension provides a set of function to convert vectors to packed
+/// formats.
+
+#pragma once
+
+// Dependency:
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_packing extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_packing
+ /// @{
+
+
+ /// @}
+}// namespace glm
+
+#include "vector_packing.inl"
diff --git a/3rdparty/tracy/tracy b/3rdparty/glm/source/glm/ext/vector_packing.inl
index e69de29..e69de29 100644
--- a/3rdparty/tracy/tracy
+++ b/3rdparty/glm/source/glm/ext/vector_packing.inl
diff --git a/3rdparty/glm/source/glm/ext/vector_reciprocal.hpp b/3rdparty/glm/source/glm/ext/vector_reciprocal.hpp
new file mode 100644
index 0000000..b383e3c
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_reciprocal.hpp
@@ -0,0 +1,135 @@
+/// @ref ext_vector_reciprocal
+/// @file glm/ext/vector_reciprocal.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_reciprocal GLM_EXT_vector_reciprocal
+/// @ingroup ext
+///
+/// Include <glm/ext/vector_reciprocal.hpp> to use the features of this extension.
+///
+/// Define secant, cosecant and cotangent functions.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_reciprocal extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_reciprocal
+ /// @{
+
+ /// Secant function.
+ /// hypotenuse / adjacent or 1 / cos(x)
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType sec(genType angle);
+
+ /// Cosecant function.
+ /// hypotenuse / opposite or 1 / sin(x)
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType csc(genType angle);
+
+ /// Cotangent function.
+ /// adjacent / opposite or 1 / tan(x)
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType cot(genType angle);
+
+ /// Inverse secant function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType asec(genType x);
+
+ /// Inverse cosecant function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType acsc(genType x);
+
+ /// Inverse cotangent function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType acot(genType x);
+
+ /// Secant hyperbolic function.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType sech(genType angle);
+
+ /// Cosecant hyperbolic function.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType csch(genType angle);
+
+ /// Cotangent hyperbolic function.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType coth(genType angle);
+
+ /// Inverse secant hyperbolic function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType asech(genType x);
+
+ /// Inverse cosecant hyperbolic function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType acsch(genType x);
+
+ /// Inverse cotangent hyperbolic function.
+ ///
+ /// @return Return an angle expressed in radians.
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see ext_vector_reciprocal
+ template<typename genType>
+ GLM_FUNC_DECL genType acoth(genType x);
+
+ /// @}
+}//namespace glm
+
+#include "vector_reciprocal.inl"
diff --git a/3rdparty/glm/source/glm/ext/vector_reciprocal.inl b/3rdparty/glm/source/glm/ext/vector_reciprocal.inl
new file mode 100644
index 0000000..0d3c25f
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_reciprocal.inl
@@ -0,0 +1,105 @@
+/// @ref ext_vector_reciprocal
+
+#include "../trigonometric.hpp"
+#include <limits>
+
+namespace glm
+{
+ // sec
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> sec(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'sec' only accept floating-point inputs");
+ return static_cast<T>(1) / detail::functor1<vec, L, T, T, Q>::call(cos, x);
+ }
+
+ // csc
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> csc(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'csc' only accept floating-point inputs");
+ return static_cast<T>(1) / detail::functor1<vec, L, T, T, Q>::call(sin, x);
+ }
+
+ // cot
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> cot(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'cot' only accept floating-point inputs");
+ T const pi_over_2 = static_cast<T>(3.1415926535897932384626433832795 / 2.0);
+ return detail::functor1<vec, L, T, T, Q>::call(tan, pi_over_2 - x);
+ }
+
+ // asec
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> asec(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'asec' only accept floating-point inputs");
+ return detail::functor1<vec, L, T, T, Q>::call(acos, static_cast<T>(1) / x);
+ }
+
+ // acsc
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> acsc(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'acsc' only accept floating-point inputs");
+ return detail::functor1<vec, L, T, T, Q>::call(asin, static_cast<T>(1) / x);
+ }
+
+ // acot
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> acot(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'acot' only accept floating-point inputs");
+ T const pi_over_2 = static_cast<T>(3.1415926535897932384626433832795 / 2.0);
+ return pi_over_2 - detail::functor1<vec, L, T, T, Q>::call(atan, x);
+ }
+
+ // sech
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> sech(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'sech' only accept floating-point inputs");
+ return static_cast<T>(1) / detail::functor1<vec, L, T, T, Q>::call(cosh, x);
+ }
+
+ // csch
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> csch(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'csch' only accept floating-point inputs");
+ return static_cast<T>(1) / detail::functor1<vec, L, T, T, Q>::call(sinh, x);
+ }
+
+ // coth
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> coth(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'coth' only accept floating-point inputs");
+ return glm::cosh(x) / glm::sinh(x);
+ }
+
+ // asech
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> asech(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'asech' only accept floating-point inputs");
+ return detail::functor1<vec, L, T, T, Q>::call(acosh, static_cast<T>(1) / x);
+ }
+
+ // acsch
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> acsch(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'acsch' only accept floating-point inputs");
+ return detail::functor1<vec, L, T, T, Q>::call(asinh, static_cast<T>(1) / x);
+ }
+
+ // acoth
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> acoth(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'acoth' only accept floating-point inputs");
+ return detail::functor1<vec, L, T, T, Q>::call(atanh, static_cast<T>(1) / x);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_relational.hpp b/3rdparty/glm/source/glm/ext/vector_relational.hpp
new file mode 100644
index 0000000..1c2367d
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_relational.hpp
@@ -0,0 +1,107 @@
+/// @ref ext_vector_relational
+/// @file glm/ext/vector_relational.hpp
+///
+/// @see core (dependence)
+/// @see ext_scalar_integer (dependence)
+///
+/// @defgroup ext_vector_relational GLM_EXT_vector_relational
+/// @ingroup ext
+///
+/// Exposes comparison functions for vector types that take a user defined epsilon values.
+///
+/// Include <glm/ext/vector_relational.hpp> to use the features of this extension.
+///
+/// @see core_vector_relational
+/// @see ext_scalar_relational
+/// @see ext_matrix_relational
+
+#pragma once
+
+// Dependencies
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_relational extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_relational
+ /// @{
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T epsilon);
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& epsilon);
+
+ /// Returns the component-wise comparison of |x - y| >= epsilon.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T epsilon);
+
+ /// Returns the component-wise comparison of |x - y| >= epsilon.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& epsilon);
+
+ /// Returns the component-wise comparison between two vectors in term of ULPs.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y, int ULPs);
+
+ /// Returns the component-wise comparison between two vectors in term of ULPs.
+ /// True if this expression is satisfied.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, int, Q> const& ULPs);
+
+ /// Returns the component-wise comparison between two vectors in term of ULPs.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, int ULPs);
+
+ /// Returns the component-wise comparison between two vectors in term of ULPs.
+ /// True if this expression is not satisfied.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, int, Q> const& ULPs);
+
+ /// @}
+}//namespace glm
+
+#include "vector_relational.inl"
diff --git a/3rdparty/glm/source/glm/ext/vector_relational.inl b/3rdparty/glm/source/glm/ext/vector_relational.inl
new file mode 100644
index 0000000..7a39ab5
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_relational.inl
@@ -0,0 +1,75 @@
+#include "../vector_relational.hpp"
+#include "../common.hpp"
+#include "../detail/qualifier.hpp"
+#include "../detail/type_float.hpp"
+
+namespace glm
+{
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T Epsilon)
+ {
+ return equal(x, y, vec<L, T, Q>(Epsilon));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& Epsilon)
+ {
+ return lessThanEqual(abs(x - y), Epsilon);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T Epsilon)
+ {
+ return notEqual(x, y, vec<L, T, Q>(Epsilon));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& Epsilon)
+ {
+ return greaterThan(abs(x - y), Epsilon);
+ }
+
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y, int MaxULPs)
+ {
+ return equal(x, y, vec<L, int, Q>(MaxULPs));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, int, Q> const& MaxULPs)
+ {
+ vec<L, bool, Q> Result(false);
+ for(length_t i = 0; i < L; ++i)
+ {
+ detail::float_t<T> const a(x[i]);
+ detail::float_t<T> const b(y[i]);
+
+ // Different signs means they do not match.
+ if(a.negative() != b.negative())
+ {
+ // Check for equality to make sure +0==-0
+ Result[i] = a.mantissa() == b.mantissa() && a.exponent() == b.exponent();
+ }
+ else
+ {
+ // Find the difference in ULPs.
+ typename detail::float_t<T>::int_type const DiffULPs = abs(a.i - b.i);
+ Result[i] = DiffULPs <= MaxULPs[i];
+ }
+ }
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, int MaxULPs)
+ {
+ return notEqual(x, y, vec<L, int, Q>(MaxULPs));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, int, Q> const& MaxULPs)
+ {
+ return not_(equal(x, y, MaxULPs));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_uint1.hpp b/3rdparty/glm/source/glm/ext/vector_uint1.hpp
new file mode 100644
index 0000000..eb8a704
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_uint1.hpp
@@ -0,0 +1,32 @@
+/// @ref ext_vector_uint1
+/// @file glm/ext/vector_uint1.hpp
+///
+/// @defgroup ext_vector_uint1 GLM_EXT_vector_uint1
+/// @ingroup ext
+///
+/// Exposes uvec1 vector type.
+///
+/// Include <glm/ext/vector_uvec1.hpp> to use the features of this extension.
+///
+/// @see ext_vector_int1 extension.
+/// @see ext_vector_uint1_precision extension.
+
+#pragma once
+
+#include "../detail/type_vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_uint1 extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_uint1
+ /// @{
+
+ /// 1 component vector of unsigned integer numbers.
+ typedef vec<1, unsigned int, defaultp> uvec1;
+
+ /// @}
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/ext/vector_uint1_sized.hpp b/3rdparty/glm/source/glm/ext/vector_uint1_sized.hpp
new file mode 100644
index 0000000..2a938bb
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_uint1_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_vector_uint1_sized
+/// @file glm/ext/vector_uint1_sized.hpp
+///
+/// @defgroup ext_vector_uint1_sized GLM_EXT_vector_uint1_sized
+/// @ingroup ext
+///
+/// Exposes sized unsigned integer vector types.
+///
+/// Include <glm/ext/vector_uint1_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_uint_sized
+/// @see ext_vector_int1_sized
+
+#pragma once
+
+#include "../ext/vector_uint1.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_uint1_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_uint1_sized
+ /// @{
+
+ /// 8 bit unsigned integer vector of 1 component type.
+ ///
+ /// @see ext_vector_uint1_sized
+ typedef vec<1, uint8, defaultp> u8vec1;
+
+ /// 16 bit unsigned integer vector of 1 component type.
+ ///
+ /// @see ext_vector_uint1_sized
+ typedef vec<1, uint16, defaultp> u16vec1;
+
+ /// 32 bit unsigned integer vector of 1 component type.
+ ///
+ /// @see ext_vector_uint1_sized
+ typedef vec<1, uint32, defaultp> u32vec1;
+
+ /// 64 bit unsigned integer vector of 1 component type.
+ ///
+ /// @see ext_vector_uint1_sized
+ typedef vec<1, uint64, defaultp> u64vec1;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_uint2.hpp b/3rdparty/glm/source/glm/ext/vector_uint2.hpp
new file mode 100644
index 0000000..03c00f5
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_uint2.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_uint2.hpp
+
+#pragma once
+#include "../detail/type_vec2.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 2 components vector of unsigned integer numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<2, unsigned int, defaultp> uvec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_uint2_sized.hpp b/3rdparty/glm/source/glm/ext/vector_uint2_sized.hpp
new file mode 100644
index 0000000..620fdc6
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_uint2_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_vector_uint2_sized
+/// @file glm/ext/vector_uint2_sized.hpp
+///
+/// @defgroup ext_vector_uint2_sized GLM_EXT_vector_uint2_sized
+/// @ingroup ext
+///
+/// Exposes sized unsigned integer vector of 2 components type.
+///
+/// Include <glm/ext/vector_uint2_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_uint_sized
+/// @see ext_vector_int2_sized
+
+#pragma once
+
+#include "../ext/vector_uint2.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_uint2_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_uint2_sized
+ /// @{
+
+ /// 8 bit unsigned integer vector of 2 components type.
+ ///
+ /// @see ext_vector_uint2_sized
+ typedef vec<2, uint8, defaultp> u8vec2;
+
+ /// 16 bit unsigned integer vector of 2 components type.
+ ///
+ /// @see ext_vector_uint2_sized
+ typedef vec<2, uint16, defaultp> u16vec2;
+
+ /// 32 bit unsigned integer vector of 2 components type.
+ ///
+ /// @see ext_vector_uint2_sized
+ typedef vec<2, uint32, defaultp> u32vec2;
+
+ /// 64 bit unsigned integer vector of 2 components type.
+ ///
+ /// @see ext_vector_uint2_sized
+ typedef vec<2, uint64, defaultp> u64vec2;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_uint3.hpp b/3rdparty/glm/source/glm/ext/vector_uint3.hpp
new file mode 100644
index 0000000..f5b41c4
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_uint3.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_uint3.hpp
+
+#pragma once
+#include "../detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 3 components vector of unsigned integer numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<3, unsigned int, defaultp> uvec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_uint3_sized.hpp b/3rdparty/glm/source/glm/ext/vector_uint3_sized.hpp
new file mode 100644
index 0000000..6f96b98
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_uint3_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_vector_uint3_sized
+/// @file glm/ext/vector_uint3_sized.hpp
+///
+/// @defgroup ext_vector_uint3_sized GLM_EXT_vector_uint3_sized
+/// @ingroup ext
+///
+/// Exposes sized unsigned integer vector of 3 components type.
+///
+/// Include <glm/ext/vector_uint3_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_uint_sized
+/// @see ext_vector_int3_sized
+
+#pragma once
+
+#include "../ext/vector_uint3.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_uint3_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_uint3_sized
+ /// @{
+
+ /// 8 bit unsigned integer vector of 3 components type.
+ ///
+ /// @see ext_vector_uint3_sized
+ typedef vec<3, uint8, defaultp> u8vec3;
+
+ /// 16 bit unsigned integer vector of 3 components type.
+ ///
+ /// @see ext_vector_uint3_sized
+ typedef vec<3, uint16, defaultp> u16vec3;
+
+ /// 32 bit unsigned integer vector of 3 components type.
+ ///
+ /// @see ext_vector_uint3_sized
+ typedef vec<3, uint32, defaultp> u32vec3;
+
+ /// 64 bit unsigned integer vector of 3 components type.
+ ///
+ /// @see ext_vector_uint3_sized
+ typedef vec<3, uint64, defaultp> u64vec3;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_uint4.hpp b/3rdparty/glm/source/glm/ext/vector_uint4.hpp
new file mode 100644
index 0000000..32ced58
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_uint4.hpp
@@ -0,0 +1,18 @@
+/// @ref core
+/// @file glm/ext/vector_uint4.hpp
+
+#pragma once
+#include "../detail/type_vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_vector
+ /// @{
+
+ /// 4 components vector of unsigned integer numbers.
+ ///
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a>
+ typedef vec<4, unsigned int, defaultp> uvec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_uint4_sized.hpp b/3rdparty/glm/source/glm/ext/vector_uint4_sized.hpp
new file mode 100644
index 0000000..da992ea
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_uint4_sized.hpp
@@ -0,0 +1,49 @@
+/// @ref ext_vector_uint4_sized
+/// @file glm/ext/vector_uint4_sized.hpp
+///
+/// @defgroup ext_vector_uint4_sized GLM_EXT_vector_uint4_sized
+/// @ingroup ext
+///
+/// Exposes sized unsigned integer vector of 4 components type.
+///
+/// Include <glm/ext/vector_uint4_sized.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_uint_sized
+/// @see ext_vector_int4_sized
+
+#pragma once
+
+#include "../ext/vector_uint4.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_uint4_sized extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup ext_vector_uint4_sized
+ /// @{
+
+ /// 8 bit unsigned integer vector of 4 components type.
+ ///
+ /// @see ext_vector_uint4_sized
+ typedef vec<4, uint8, defaultp> u8vec4;
+
+ /// 16 bit unsigned integer vector of 4 components type.
+ ///
+ /// @see ext_vector_uint4_sized
+ typedef vec<4, uint16, defaultp> u16vec4;
+
+ /// 32 bit unsigned integer vector of 4 components type.
+ ///
+ /// @see ext_vector_uint4_sized
+ typedef vec<4, uint32, defaultp> u32vec4;
+
+ /// 64 bit unsigned integer vector of 4 components type.
+ ///
+ /// @see ext_vector_uint4_sized
+ typedef vec<4, uint64, defaultp> u64vec4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/ext/vector_ulp.hpp b/3rdparty/glm/source/glm/ext/vector_ulp.hpp
new file mode 100644
index 0000000..6210396
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_ulp.hpp
@@ -0,0 +1,109 @@
+/// @ref ext_vector_ulp
+/// @file glm/ext/vector_ulp.hpp
+///
+/// @defgroup ext_vector_ulp GLM_EXT_vector_ulp
+/// @ingroup ext
+///
+/// Allow the measurement of the accuracy of a function against a reference
+/// implementation. This extension works on floating-point data and provide results
+/// in ULP.
+///
+/// Include <glm/ext/vector_ulp.hpp> to use the features of this extension.
+///
+/// @see ext_scalar_ulp
+/// @see ext_scalar_relational
+/// @see ext_vector_relational
+
+#pragma once
+
+// Dependencies
+#include "../ext/scalar_ulp.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_EXT_vector_ulp extension included")
+#endif
+
+namespace glm
+{
+ /// Return the next ULP value(s) after the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_scalar_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> nextFloat(vec<L, T, Q> const& x);
+
+ /// Return the value(s) ULP distance after the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_scalar_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> nextFloat(vec<L, T, Q> const& x, int ULPs);
+
+ /// Return the value(s) ULP distance after the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_scalar_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> nextFloat(vec<L, T, Q> const& x, vec<L, int, Q> const& ULPs);
+
+ /// Return the previous ULP value(s) before the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_scalar_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prevFloat(vec<L, T, Q> const& x);
+
+ /// Return the value(s) ULP distance before the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_scalar_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prevFloat(vec<L, T, Q> const& x, int ULPs);
+
+ /// Return the value(s) ULP distance before the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_scalar_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prevFloat(vec<L, T, Q> const& x, vec<L, int, Q> const& ULPs);
+
+ /// Return the distance in the number of ULP between 2 single-precision floating-point scalars.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_scalar_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int, Q> floatDistance(vec<L, float, Q> const& x, vec<L, float, Q> const& y);
+
+ /// Return the distance in the number of ULP between 2 double-precision floating-point scalars.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_scalar_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int64, Q> floatDistance(vec<L, double, Q> const& x, vec<L, double, Q> const& y);
+
+ /// @}
+}//namespace glm
+
+#include "vector_ulp.inl"
diff --git a/3rdparty/glm/source/glm/ext/vector_ulp.inl b/3rdparty/glm/source/glm/ext/vector_ulp.inl
new file mode 100644
index 0000000..91565ce
--- /dev/null
+++ b/3rdparty/glm/source/glm/ext/vector_ulp.inl
@@ -0,0 +1,74 @@
+namespace glm
+{
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> nextFloat(vec<L, T, Q> const& x)
+ {
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = nextFloat(x[i]);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> nextFloat(vec<L, T, Q> const& x, int ULPs)
+ {
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = nextFloat(x[i], ULPs);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> nextFloat(vec<L, T, Q> const& x, vec<L, int, Q> const& ULPs)
+ {
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = nextFloat(x[i], ULPs[i]);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prevFloat(vec<L, T, Q> const& x)
+ {
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = prevFloat(x[i]);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prevFloat(vec<L, T, Q> const& x, int ULPs)
+ {
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = prevFloat(x[i], ULPs);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prevFloat(vec<L, T, Q> const& x, vec<L, int, Q> const& ULPs)
+ {
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = prevFloat(x[i], ULPs[i]);
+ return Result;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int, Q> floatDistance(vec<L, float, Q> const& x, vec<L, float, Q> const& y)
+ {
+ vec<L, int, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = floatDistance(x[i], y[i]);
+ return Result;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int64, Q> floatDistance(vec<L, double, Q> const& x, vec<L, double, Q> const& y)
+ {
+ vec<L, int64, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = floatDistance(x[i], y[i]);
+ return Result;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/fwd.hpp b/3rdparty/glm/source/glm/fwd.hpp
new file mode 100644
index 0000000..89177f4
--- /dev/null
+++ b/3rdparty/glm/source/glm/fwd.hpp
@@ -0,0 +1,1233 @@
+#pragma once
+
+#include "detail/qualifier.hpp"
+
+namespace glm
+{
+#if GLM_HAS_EXTENDED_INTEGER_TYPE
+ typedef std::int8_t int8;
+ typedef std::int16_t int16;
+ typedef std::int32_t int32;
+ typedef std::int64_t int64;
+
+ typedef std::uint8_t uint8;
+ typedef std::uint16_t uint16;
+ typedef std::uint32_t uint32;
+ typedef std::uint64_t uint64;
+#else
+ typedef signed char int8;
+ typedef signed short int16;
+ typedef signed int int32;
+ typedef detail::int64 int64;
+
+ typedef unsigned char uint8;
+ typedef unsigned short uint16;
+ typedef unsigned int uint32;
+ typedef detail::uint64 uint64;
+#endif
+
+ // Scalar int
+
+ typedef int8 lowp_i8;
+ typedef int8 mediump_i8;
+ typedef int8 highp_i8;
+ typedef int8 i8;
+
+ typedef int8 lowp_int8;
+ typedef int8 mediump_int8;
+ typedef int8 highp_int8;
+
+ typedef int8 lowp_int8_t;
+ typedef int8 mediump_int8_t;
+ typedef int8 highp_int8_t;
+ typedef int8 int8_t;
+
+ typedef int16 lowp_i16;
+ typedef int16 mediump_i16;
+ typedef int16 highp_i16;
+ typedef int16 i16;
+
+ typedef int16 lowp_int16;
+ typedef int16 mediump_int16;
+ typedef int16 highp_int16;
+
+ typedef int16 lowp_int16_t;
+ typedef int16 mediump_int16_t;
+ typedef int16 highp_int16_t;
+ typedef int16 int16_t;
+
+ typedef int32 lowp_i32;
+ typedef int32 mediump_i32;
+ typedef int32 highp_i32;
+ typedef int32 i32;
+
+ typedef int32 lowp_int32;
+ typedef int32 mediump_int32;
+ typedef int32 highp_int32;
+
+ typedef int32 lowp_int32_t;
+ typedef int32 mediump_int32_t;
+ typedef int32 highp_int32_t;
+ typedef int32 int32_t;
+
+ typedef int64 lowp_i64;
+ typedef int64 mediump_i64;
+ typedef int64 highp_i64;
+ typedef int64 i64;
+
+ typedef int64 lowp_int64;
+ typedef int64 mediump_int64;
+ typedef int64 highp_int64;
+
+ typedef int64 lowp_int64_t;
+ typedef int64 mediump_int64_t;
+ typedef int64 highp_int64_t;
+ typedef int64 int64_t;
+
+ // Scalar uint
+
+ typedef unsigned int uint;
+
+ typedef uint8 lowp_u8;
+ typedef uint8 mediump_u8;
+ typedef uint8 highp_u8;
+ typedef uint8 u8;
+
+ typedef uint8 lowp_uint8;
+ typedef uint8 mediump_uint8;
+ typedef uint8 highp_uint8;
+
+ typedef uint8 lowp_uint8_t;
+ typedef uint8 mediump_uint8_t;
+ typedef uint8 highp_uint8_t;
+ typedef uint8 uint8_t;
+
+ typedef uint16 lowp_u16;
+ typedef uint16 mediump_u16;
+ typedef uint16 highp_u16;
+ typedef uint16 u16;
+
+ typedef uint16 lowp_uint16;
+ typedef uint16 mediump_uint16;
+ typedef uint16 highp_uint16;
+
+ typedef uint16 lowp_uint16_t;
+ typedef uint16 mediump_uint16_t;
+ typedef uint16 highp_uint16_t;
+ typedef uint16 uint16_t;
+
+ typedef uint32 lowp_u32;
+ typedef uint32 mediump_u32;
+ typedef uint32 highp_u32;
+ typedef uint32 u32;
+
+ typedef uint32 lowp_uint32;
+ typedef uint32 mediump_uint32;
+ typedef uint32 highp_uint32;
+
+ typedef uint32 lowp_uint32_t;
+ typedef uint32 mediump_uint32_t;
+ typedef uint32 highp_uint32_t;
+ typedef uint32 uint32_t;
+
+ typedef uint64 lowp_u64;
+ typedef uint64 mediump_u64;
+ typedef uint64 highp_u64;
+ typedef uint64 u64;
+
+ typedef uint64 lowp_uint64;
+ typedef uint64 mediump_uint64;
+ typedef uint64 highp_uint64;
+
+ typedef uint64 lowp_uint64_t;
+ typedef uint64 mediump_uint64_t;
+ typedef uint64 highp_uint64_t;
+ typedef uint64 uint64_t;
+
+ // Scalar float
+
+ typedef float lowp_f32;
+ typedef float mediump_f32;
+ typedef float highp_f32;
+ typedef float f32;
+
+ typedef float lowp_float32;
+ typedef float mediump_float32;
+ typedef float highp_float32;
+ typedef float float32;
+
+ typedef float lowp_float32_t;
+ typedef float mediump_float32_t;
+ typedef float highp_float32_t;
+ typedef float float32_t;
+
+
+ typedef double lowp_f64;
+ typedef double mediump_f64;
+ typedef double highp_f64;
+ typedef double f64;
+
+ typedef double lowp_float64;
+ typedef double mediump_float64;
+ typedef double highp_float64;
+ typedef double float64;
+
+ typedef double lowp_float64_t;
+ typedef double mediump_float64_t;
+ typedef double highp_float64_t;
+ typedef double float64_t;
+
+ // Vector bool
+
+ typedef vec<1, bool, lowp> lowp_bvec1;
+ typedef vec<2, bool, lowp> lowp_bvec2;
+ typedef vec<3, bool, lowp> lowp_bvec3;
+ typedef vec<4, bool, lowp> lowp_bvec4;
+
+ typedef vec<1, bool, mediump> mediump_bvec1;
+ typedef vec<2, bool, mediump> mediump_bvec2;
+ typedef vec<3, bool, mediump> mediump_bvec3;
+ typedef vec<4, bool, mediump> mediump_bvec4;
+
+ typedef vec<1, bool, highp> highp_bvec1;
+ typedef vec<2, bool, highp> highp_bvec2;
+ typedef vec<3, bool, highp> highp_bvec3;
+ typedef vec<4, bool, highp> highp_bvec4;
+
+ typedef vec<1, bool, defaultp> bvec1;
+ typedef vec<2, bool, defaultp> bvec2;
+ typedef vec<3, bool, defaultp> bvec3;
+ typedef vec<4, bool, defaultp> bvec4;
+
+ // Vector int
+
+ typedef vec<1, int, lowp> lowp_ivec1;
+ typedef vec<2, int, lowp> lowp_ivec2;
+ typedef vec<3, int, lowp> lowp_ivec3;
+ typedef vec<4, int, lowp> lowp_ivec4;
+
+ typedef vec<1, int, mediump> mediump_ivec1;
+ typedef vec<2, int, mediump> mediump_ivec2;
+ typedef vec<3, int, mediump> mediump_ivec3;
+ typedef vec<4, int, mediump> mediump_ivec4;
+
+ typedef vec<1, int, highp> highp_ivec1;
+ typedef vec<2, int, highp> highp_ivec2;
+ typedef vec<3, int, highp> highp_ivec3;
+ typedef vec<4, int, highp> highp_ivec4;
+
+ typedef vec<1, int, defaultp> ivec1;
+ typedef vec<2, int, defaultp> ivec2;
+ typedef vec<3, int, defaultp> ivec3;
+ typedef vec<4, int, defaultp> ivec4;
+
+ typedef vec<1, i8, lowp> lowp_i8vec1;
+ typedef vec<2, i8, lowp> lowp_i8vec2;
+ typedef vec<3, i8, lowp> lowp_i8vec3;
+ typedef vec<4, i8, lowp> lowp_i8vec4;
+
+ typedef vec<1, i8, mediump> mediump_i8vec1;
+ typedef vec<2, i8, mediump> mediump_i8vec2;
+ typedef vec<3, i8, mediump> mediump_i8vec3;
+ typedef vec<4, i8, mediump> mediump_i8vec4;
+
+ typedef vec<1, i8, highp> highp_i8vec1;
+ typedef vec<2, i8, highp> highp_i8vec2;
+ typedef vec<3, i8, highp> highp_i8vec3;
+ typedef vec<4, i8, highp> highp_i8vec4;
+
+ typedef vec<1, i8, defaultp> i8vec1;
+ typedef vec<2, i8, defaultp> i8vec2;
+ typedef vec<3, i8, defaultp> i8vec3;
+ typedef vec<4, i8, defaultp> i8vec4;
+
+ typedef vec<1, i16, lowp> lowp_i16vec1;
+ typedef vec<2, i16, lowp> lowp_i16vec2;
+ typedef vec<3, i16, lowp> lowp_i16vec3;
+ typedef vec<4, i16, lowp> lowp_i16vec4;
+
+ typedef vec<1, i16, mediump> mediump_i16vec1;
+ typedef vec<2, i16, mediump> mediump_i16vec2;
+ typedef vec<3, i16, mediump> mediump_i16vec3;
+ typedef vec<4, i16, mediump> mediump_i16vec4;
+
+ typedef vec<1, i16, highp> highp_i16vec1;
+ typedef vec<2, i16, highp> highp_i16vec2;
+ typedef vec<3, i16, highp> highp_i16vec3;
+ typedef vec<4, i16, highp> highp_i16vec4;
+
+ typedef vec<1, i16, defaultp> i16vec1;
+ typedef vec<2, i16, defaultp> i16vec2;
+ typedef vec<3, i16, defaultp> i16vec3;
+ typedef vec<4, i16, defaultp> i16vec4;
+
+ typedef vec<1, i32, lowp> lowp_i32vec1;
+ typedef vec<2, i32, lowp> lowp_i32vec2;
+ typedef vec<3, i32, lowp> lowp_i32vec3;
+ typedef vec<4, i32, lowp> lowp_i32vec4;
+
+ typedef vec<1, i32, mediump> mediump_i32vec1;
+ typedef vec<2, i32, mediump> mediump_i32vec2;
+ typedef vec<3, i32, mediump> mediump_i32vec3;
+ typedef vec<4, i32, mediump> mediump_i32vec4;
+
+ typedef vec<1, i32, highp> highp_i32vec1;
+ typedef vec<2, i32, highp> highp_i32vec2;
+ typedef vec<3, i32, highp> highp_i32vec3;
+ typedef vec<4, i32, highp> highp_i32vec4;
+
+ typedef vec<1, i32, defaultp> i32vec1;
+ typedef vec<2, i32, defaultp> i32vec2;
+ typedef vec<3, i32, defaultp> i32vec3;
+ typedef vec<4, i32, defaultp> i32vec4;
+
+ typedef vec<1, i64, lowp> lowp_i64vec1;
+ typedef vec<2, i64, lowp> lowp_i64vec2;
+ typedef vec<3, i64, lowp> lowp_i64vec3;
+ typedef vec<4, i64, lowp> lowp_i64vec4;
+
+ typedef vec<1, i64, mediump> mediump_i64vec1;
+ typedef vec<2, i64, mediump> mediump_i64vec2;
+ typedef vec<3, i64, mediump> mediump_i64vec3;
+ typedef vec<4, i64, mediump> mediump_i64vec4;
+
+ typedef vec<1, i64, highp> highp_i64vec1;
+ typedef vec<2, i64, highp> highp_i64vec2;
+ typedef vec<3, i64, highp> highp_i64vec3;
+ typedef vec<4, i64, highp> highp_i64vec4;
+
+ typedef vec<1, i64, defaultp> i64vec1;
+ typedef vec<2, i64, defaultp> i64vec2;
+ typedef vec<3, i64, defaultp> i64vec3;
+ typedef vec<4, i64, defaultp> i64vec4;
+
+ // Vector uint
+
+ typedef vec<1, uint, lowp> lowp_uvec1;
+ typedef vec<2, uint, lowp> lowp_uvec2;
+ typedef vec<3, uint, lowp> lowp_uvec3;
+ typedef vec<4, uint, lowp> lowp_uvec4;
+
+ typedef vec<1, uint, mediump> mediump_uvec1;
+ typedef vec<2, uint, mediump> mediump_uvec2;
+ typedef vec<3, uint, mediump> mediump_uvec3;
+ typedef vec<4, uint, mediump> mediump_uvec4;
+
+ typedef vec<1, uint, highp> highp_uvec1;
+ typedef vec<2, uint, highp> highp_uvec2;
+ typedef vec<3, uint, highp> highp_uvec3;
+ typedef vec<4, uint, highp> highp_uvec4;
+
+ typedef vec<1, uint, defaultp> uvec1;
+ typedef vec<2, uint, defaultp> uvec2;
+ typedef vec<3, uint, defaultp> uvec3;
+ typedef vec<4, uint, defaultp> uvec4;
+
+ typedef vec<1, u8, lowp> lowp_u8vec1;
+ typedef vec<2, u8, lowp> lowp_u8vec2;
+ typedef vec<3, u8, lowp> lowp_u8vec3;
+ typedef vec<4, u8, lowp> lowp_u8vec4;
+
+ typedef vec<1, u8, mediump> mediump_u8vec1;
+ typedef vec<2, u8, mediump> mediump_u8vec2;
+ typedef vec<3, u8, mediump> mediump_u8vec3;
+ typedef vec<4, u8, mediump> mediump_u8vec4;
+
+ typedef vec<1, u8, highp> highp_u8vec1;
+ typedef vec<2, u8, highp> highp_u8vec2;
+ typedef vec<3, u8, highp> highp_u8vec3;
+ typedef vec<4, u8, highp> highp_u8vec4;
+
+ typedef vec<1, u8, defaultp> u8vec1;
+ typedef vec<2, u8, defaultp> u8vec2;
+ typedef vec<3, u8, defaultp> u8vec3;
+ typedef vec<4, u8, defaultp> u8vec4;
+
+ typedef vec<1, u16, lowp> lowp_u16vec1;
+ typedef vec<2, u16, lowp> lowp_u16vec2;
+ typedef vec<3, u16, lowp> lowp_u16vec3;
+ typedef vec<4, u16, lowp> lowp_u16vec4;
+
+ typedef vec<1, u16, mediump> mediump_u16vec1;
+ typedef vec<2, u16, mediump> mediump_u16vec2;
+ typedef vec<3, u16, mediump> mediump_u16vec3;
+ typedef vec<4, u16, mediump> mediump_u16vec4;
+
+ typedef vec<1, u16, highp> highp_u16vec1;
+ typedef vec<2, u16, highp> highp_u16vec2;
+ typedef vec<3, u16, highp> highp_u16vec3;
+ typedef vec<4, u16, highp> highp_u16vec4;
+
+ typedef vec<1, u16, defaultp> u16vec1;
+ typedef vec<2, u16, defaultp> u16vec2;
+ typedef vec<3, u16, defaultp> u16vec3;
+ typedef vec<4, u16, defaultp> u16vec4;
+
+ typedef vec<1, u32, lowp> lowp_u32vec1;
+ typedef vec<2, u32, lowp> lowp_u32vec2;
+ typedef vec<3, u32, lowp> lowp_u32vec3;
+ typedef vec<4, u32, lowp> lowp_u32vec4;
+
+ typedef vec<1, u32, mediump> mediump_u32vec1;
+ typedef vec<2, u32, mediump> mediump_u32vec2;
+ typedef vec<3, u32, mediump> mediump_u32vec3;
+ typedef vec<4, u32, mediump> mediump_u32vec4;
+
+ typedef vec<1, u32, highp> highp_u32vec1;
+ typedef vec<2, u32, highp> highp_u32vec2;
+ typedef vec<3, u32, highp> highp_u32vec3;
+ typedef vec<4, u32, highp> highp_u32vec4;
+
+ typedef vec<1, u32, defaultp> u32vec1;
+ typedef vec<2, u32, defaultp> u32vec2;
+ typedef vec<3, u32, defaultp> u32vec3;
+ typedef vec<4, u32, defaultp> u32vec4;
+
+ typedef vec<1, u64, lowp> lowp_u64vec1;
+ typedef vec<2, u64, lowp> lowp_u64vec2;
+ typedef vec<3, u64, lowp> lowp_u64vec3;
+ typedef vec<4, u64, lowp> lowp_u64vec4;
+
+ typedef vec<1, u64, mediump> mediump_u64vec1;
+ typedef vec<2, u64, mediump> mediump_u64vec2;
+ typedef vec<3, u64, mediump> mediump_u64vec3;
+ typedef vec<4, u64, mediump> mediump_u64vec4;
+
+ typedef vec<1, u64, highp> highp_u64vec1;
+ typedef vec<2, u64, highp> highp_u64vec2;
+ typedef vec<3, u64, highp> highp_u64vec3;
+ typedef vec<4, u64, highp> highp_u64vec4;
+
+ typedef vec<1, u64, defaultp> u64vec1;
+ typedef vec<2, u64, defaultp> u64vec2;
+ typedef vec<3, u64, defaultp> u64vec3;
+ typedef vec<4, u64, defaultp> u64vec4;
+
+ // Vector float
+
+ typedef vec<1, float, lowp> lowp_vec1;
+ typedef vec<2, float, lowp> lowp_vec2;
+ typedef vec<3, float, lowp> lowp_vec3;
+ typedef vec<4, float, lowp> lowp_vec4;
+
+ typedef vec<1, float, mediump> mediump_vec1;
+ typedef vec<2, float, mediump> mediump_vec2;
+ typedef vec<3, float, mediump> mediump_vec3;
+ typedef vec<4, float, mediump> mediump_vec4;
+
+ typedef vec<1, float, highp> highp_vec1;
+ typedef vec<2, float, highp> highp_vec2;
+ typedef vec<3, float, highp> highp_vec3;
+ typedef vec<4, float, highp> highp_vec4;
+
+ typedef vec<1, float, defaultp> vec1;
+ typedef vec<2, float, defaultp> vec2;
+ typedef vec<3, float, defaultp> vec3;
+ typedef vec<4, float, defaultp> vec4;
+
+ typedef vec<1, float, lowp> lowp_fvec1;
+ typedef vec<2, float, lowp> lowp_fvec2;
+ typedef vec<3, float, lowp> lowp_fvec3;
+ typedef vec<4, float, lowp> lowp_fvec4;
+
+ typedef vec<1, float, mediump> mediump_fvec1;
+ typedef vec<2, float, mediump> mediump_fvec2;
+ typedef vec<3, float, mediump> mediump_fvec3;
+ typedef vec<4, float, mediump> mediump_fvec4;
+
+ typedef vec<1, float, highp> highp_fvec1;
+ typedef vec<2, float, highp> highp_fvec2;
+ typedef vec<3, float, highp> highp_fvec3;
+ typedef vec<4, float, highp> highp_fvec4;
+
+ typedef vec<1, f32, defaultp> fvec1;
+ typedef vec<2, f32, defaultp> fvec2;
+ typedef vec<3, f32, defaultp> fvec3;
+ typedef vec<4, f32, defaultp> fvec4;
+
+ typedef vec<1, f32, lowp> lowp_f32vec1;
+ typedef vec<2, f32, lowp> lowp_f32vec2;
+ typedef vec<3, f32, lowp> lowp_f32vec3;
+ typedef vec<4, f32, lowp> lowp_f32vec4;
+
+ typedef vec<1, f32, mediump> mediump_f32vec1;
+ typedef vec<2, f32, mediump> mediump_f32vec2;
+ typedef vec<3, f32, mediump> mediump_f32vec3;
+ typedef vec<4, f32, mediump> mediump_f32vec4;
+
+ typedef vec<1, f32, highp> highp_f32vec1;
+ typedef vec<2, f32, highp> highp_f32vec2;
+ typedef vec<3, f32, highp> highp_f32vec3;
+ typedef vec<4, f32, highp> highp_f32vec4;
+
+ typedef vec<1, f32, defaultp> f32vec1;
+ typedef vec<2, f32, defaultp> f32vec2;
+ typedef vec<3, f32, defaultp> f32vec3;
+ typedef vec<4, f32, defaultp> f32vec4;
+
+ typedef vec<1, f64, lowp> lowp_dvec1;
+ typedef vec<2, f64, lowp> lowp_dvec2;
+ typedef vec<3, f64, lowp> lowp_dvec3;
+ typedef vec<4, f64, lowp> lowp_dvec4;
+
+ typedef vec<1, f64, mediump> mediump_dvec1;
+ typedef vec<2, f64, mediump> mediump_dvec2;
+ typedef vec<3, f64, mediump> mediump_dvec3;
+ typedef vec<4, f64, mediump> mediump_dvec4;
+
+ typedef vec<1, f64, highp> highp_dvec1;
+ typedef vec<2, f64, highp> highp_dvec2;
+ typedef vec<3, f64, highp> highp_dvec3;
+ typedef vec<4, f64, highp> highp_dvec4;
+
+ typedef vec<1, f64, defaultp> dvec1;
+ typedef vec<2, f64, defaultp> dvec2;
+ typedef vec<3, f64, defaultp> dvec3;
+ typedef vec<4, f64, defaultp> dvec4;
+
+ typedef vec<1, f64, lowp> lowp_f64vec1;
+ typedef vec<2, f64, lowp> lowp_f64vec2;
+ typedef vec<3, f64, lowp> lowp_f64vec3;
+ typedef vec<4, f64, lowp> lowp_f64vec4;
+
+ typedef vec<1, f64, mediump> mediump_f64vec1;
+ typedef vec<2, f64, mediump> mediump_f64vec2;
+ typedef vec<3, f64, mediump> mediump_f64vec3;
+ typedef vec<4, f64, mediump> mediump_f64vec4;
+
+ typedef vec<1, f64, highp> highp_f64vec1;
+ typedef vec<2, f64, highp> highp_f64vec2;
+ typedef vec<3, f64, highp> highp_f64vec3;
+ typedef vec<4, f64, highp> highp_f64vec4;
+
+ typedef vec<1, f64, defaultp> f64vec1;
+ typedef vec<2, f64, defaultp> f64vec2;
+ typedef vec<3, f64, defaultp> f64vec3;
+ typedef vec<4, f64, defaultp> f64vec4;
+
+ // Matrix NxN
+
+ typedef mat<2, 2, f32, lowp> lowp_mat2;
+ typedef mat<3, 3, f32, lowp> lowp_mat3;
+ typedef mat<4, 4, f32, lowp> lowp_mat4;
+
+ typedef mat<2, 2, f32, mediump> mediump_mat2;
+ typedef mat<3, 3, f32, mediump> mediump_mat3;
+ typedef mat<4, 4, f32, mediump> mediump_mat4;
+
+ typedef mat<2, 2, f32, highp> highp_mat2;
+ typedef mat<3, 3, f32, highp> highp_mat3;
+ typedef mat<4, 4, f32, highp> highp_mat4;
+
+ typedef mat<2, 2, f32, defaultp> mat2;
+ typedef mat<3, 3, f32, defaultp> mat3;
+ typedef mat<4, 4, f32, defaultp> mat4;
+
+ typedef mat<2, 2, f32, lowp> lowp_fmat2;
+ typedef mat<3, 3, f32, lowp> lowp_fmat3;
+ typedef mat<4, 4, f32, lowp> lowp_fmat4;
+
+ typedef mat<2, 2, f32, mediump> mediump_fmat2;
+ typedef mat<3, 3, f32, mediump> mediump_fmat3;
+ typedef mat<4, 4, f32, mediump> mediump_fmat4;
+
+ typedef mat<2, 2, f32, highp> highp_fmat2;
+ typedef mat<3, 3, f32, highp> highp_fmat3;
+ typedef mat<4, 4, f32, highp> highp_fmat4;
+
+ typedef mat<2, 2, f32, defaultp> fmat2;
+ typedef mat<3, 3, f32, defaultp> fmat3;
+ typedef mat<4, 4, f32, defaultp> fmat4;
+
+ typedef mat<2, 2, f32, lowp> lowp_f32mat2;
+ typedef mat<3, 3, f32, lowp> lowp_f32mat3;
+ typedef mat<4, 4, f32, lowp> lowp_f32mat4;
+
+ typedef mat<2, 2, f32, mediump> mediump_f32mat2;
+ typedef mat<3, 3, f32, mediump> mediump_f32mat3;
+ typedef mat<4, 4, f32, mediump> mediump_f32mat4;
+
+ typedef mat<2, 2, f32, highp> highp_f32mat2;
+ typedef mat<3, 3, f32, highp> highp_f32mat3;
+ typedef mat<4, 4, f32, highp> highp_f32mat4;
+
+ typedef mat<2, 2, f32, defaultp> f32mat2;
+ typedef mat<3, 3, f32, defaultp> f32mat3;
+ typedef mat<4, 4, f32, defaultp> f32mat4;
+
+ typedef mat<2, 2, f64, lowp> lowp_dmat2;
+ typedef mat<3, 3, f64, lowp> lowp_dmat3;
+ typedef mat<4, 4, f64, lowp> lowp_dmat4;
+
+ typedef mat<2, 2, f64, mediump> mediump_dmat2;
+ typedef mat<3, 3, f64, mediump> mediump_dmat3;
+ typedef mat<4, 4, f64, mediump> mediump_dmat4;
+
+ typedef mat<2, 2, f64, highp> highp_dmat2;
+ typedef mat<3, 3, f64, highp> highp_dmat3;
+ typedef mat<4, 4, f64, highp> highp_dmat4;
+
+ typedef mat<2, 2, f64, defaultp> dmat2;
+ typedef mat<3, 3, f64, defaultp> dmat3;
+ typedef mat<4, 4, f64, defaultp> dmat4;
+
+ typedef mat<2, 2, f64, lowp> lowp_f64mat2;
+ typedef mat<3, 3, f64, lowp> lowp_f64mat3;
+ typedef mat<4, 4, f64, lowp> lowp_f64mat4;
+
+ typedef mat<2, 2, f64, mediump> mediump_f64mat2;
+ typedef mat<3, 3, f64, mediump> mediump_f64mat3;
+ typedef mat<4, 4, f64, mediump> mediump_f64mat4;
+
+ typedef mat<2, 2, f64, highp> highp_f64mat2;
+ typedef mat<3, 3, f64, highp> highp_f64mat3;
+ typedef mat<4, 4, f64, highp> highp_f64mat4;
+
+ typedef mat<2, 2, f64, defaultp> f64mat2;
+ typedef mat<3, 3, f64, defaultp> f64mat3;
+ typedef mat<4, 4, f64, defaultp> f64mat4;
+
+ // Matrix MxN
+
+ typedef mat<2, 2, f32, lowp> lowp_mat2x2;
+ typedef mat<2, 3, f32, lowp> lowp_mat2x3;
+ typedef mat<2, 4, f32, lowp> lowp_mat2x4;
+ typedef mat<3, 2, f32, lowp> lowp_mat3x2;
+ typedef mat<3, 3, f32, lowp> lowp_mat3x3;
+ typedef mat<3, 4, f32, lowp> lowp_mat3x4;
+ typedef mat<4, 2, f32, lowp> lowp_mat4x2;
+ typedef mat<4, 3, f32, lowp> lowp_mat4x3;
+ typedef mat<4, 4, f32, lowp> lowp_mat4x4;
+
+ typedef mat<2, 2, f32, mediump> mediump_mat2x2;
+ typedef mat<2, 3, f32, mediump> mediump_mat2x3;
+ typedef mat<2, 4, f32, mediump> mediump_mat2x4;
+ typedef mat<3, 2, f32, mediump> mediump_mat3x2;
+ typedef mat<3, 3, f32, mediump> mediump_mat3x3;
+ typedef mat<3, 4, f32, mediump> mediump_mat3x4;
+ typedef mat<4, 2, f32, mediump> mediump_mat4x2;
+ typedef mat<4, 3, f32, mediump> mediump_mat4x3;
+ typedef mat<4, 4, f32, mediump> mediump_mat4x4;
+
+ typedef mat<2, 2, f32, highp> highp_mat2x2;
+ typedef mat<2, 3, f32, highp> highp_mat2x3;
+ typedef mat<2, 4, f32, highp> highp_mat2x4;
+ typedef mat<3, 2, f32, highp> highp_mat3x2;
+ typedef mat<3, 3, f32, highp> highp_mat3x3;
+ typedef mat<3, 4, f32, highp> highp_mat3x4;
+ typedef mat<4, 2, f32, highp> highp_mat4x2;
+ typedef mat<4, 3, f32, highp> highp_mat4x3;
+ typedef mat<4, 4, f32, highp> highp_mat4x4;
+
+ typedef mat<2, 2, f32, defaultp> mat2x2;
+ typedef mat<3, 2, f32, defaultp> mat3x2;
+ typedef mat<4, 2, f32, defaultp> mat4x2;
+ typedef mat<2, 3, f32, defaultp> mat2x3;
+ typedef mat<3, 3, f32, defaultp> mat3x3;
+ typedef mat<4, 3, f32, defaultp> mat4x3;
+ typedef mat<2, 4, f32, defaultp> mat2x4;
+ typedef mat<3, 4, f32, defaultp> mat3x4;
+ typedef mat<4, 4, f32, defaultp> mat4x4;
+
+ typedef mat<2, 2, f32, lowp> lowp_fmat2x2;
+ typedef mat<2, 3, f32, lowp> lowp_fmat2x3;
+ typedef mat<2, 4, f32, lowp> lowp_fmat2x4;
+ typedef mat<3, 2, f32, lowp> lowp_fmat3x2;
+ typedef mat<3, 3, f32, lowp> lowp_fmat3x3;
+ typedef mat<3, 4, f32, lowp> lowp_fmat3x4;
+ typedef mat<4, 2, f32, lowp> lowp_fmat4x2;
+ typedef mat<4, 3, f32, lowp> lowp_fmat4x3;
+ typedef mat<4, 4, f32, lowp> lowp_fmat4x4;
+
+ typedef mat<2, 2, f32, mediump> mediump_fmat2x2;
+ typedef mat<2, 3, f32, mediump> mediump_fmat2x3;
+ typedef mat<2, 4, f32, mediump> mediump_fmat2x4;
+ typedef mat<3, 2, f32, mediump> mediump_fmat3x2;
+ typedef mat<3, 3, f32, mediump> mediump_fmat3x3;
+ typedef mat<3, 4, f32, mediump> mediump_fmat3x4;
+ typedef mat<4, 2, f32, mediump> mediump_fmat4x2;
+ typedef mat<4, 3, f32, mediump> mediump_fmat4x3;
+ typedef mat<4, 4, f32, mediump> mediump_fmat4x4;
+
+ typedef mat<2, 2, f32, highp> highp_fmat2x2;
+ typedef mat<2, 3, f32, highp> highp_fmat2x3;
+ typedef mat<2, 4, f32, highp> highp_fmat2x4;
+ typedef mat<3, 2, f32, highp> highp_fmat3x2;
+ typedef mat<3, 3, f32, highp> highp_fmat3x3;
+ typedef mat<3, 4, f32, highp> highp_fmat3x4;
+ typedef mat<4, 2, f32, highp> highp_fmat4x2;
+ typedef mat<4, 3, f32, highp> highp_fmat4x3;
+ typedef mat<4, 4, f32, highp> highp_fmat4x4;
+
+ typedef mat<2, 2, f32, defaultp> fmat2x2;
+ typedef mat<3, 2, f32, defaultp> fmat3x2;
+ typedef mat<4, 2, f32, defaultp> fmat4x2;
+ typedef mat<2, 3, f32, defaultp> fmat2x3;
+ typedef mat<3, 3, f32, defaultp> fmat3x3;
+ typedef mat<4, 3, f32, defaultp> fmat4x3;
+ typedef mat<2, 4, f32, defaultp> fmat2x4;
+ typedef mat<3, 4, f32, defaultp> fmat3x4;
+ typedef mat<4, 4, f32, defaultp> fmat4x4;
+
+ typedef mat<2, 2, f32, lowp> lowp_f32mat2x2;
+ typedef mat<2, 3, f32, lowp> lowp_f32mat2x3;
+ typedef mat<2, 4, f32, lowp> lowp_f32mat2x4;
+ typedef mat<3, 2, f32, lowp> lowp_f32mat3x2;
+ typedef mat<3, 3, f32, lowp> lowp_f32mat3x3;
+ typedef mat<3, 4, f32, lowp> lowp_f32mat3x4;
+ typedef mat<4, 2, f32, lowp> lowp_f32mat4x2;
+ typedef mat<4, 3, f32, lowp> lowp_f32mat4x3;
+ typedef mat<4, 4, f32, lowp> lowp_f32mat4x4;
+
+ typedef mat<2, 2, f32, mediump> mediump_f32mat2x2;
+ typedef mat<2, 3, f32, mediump> mediump_f32mat2x3;
+ typedef mat<2, 4, f32, mediump> mediump_f32mat2x4;
+ typedef mat<3, 2, f32, mediump> mediump_f32mat3x2;
+ typedef mat<3, 3, f32, mediump> mediump_f32mat3x3;
+ typedef mat<3, 4, f32, mediump> mediump_f32mat3x4;
+ typedef mat<4, 2, f32, mediump> mediump_f32mat4x2;
+ typedef mat<4, 3, f32, mediump> mediump_f32mat4x3;
+ typedef mat<4, 4, f32, mediump> mediump_f32mat4x4;
+
+ typedef mat<2, 2, f32, highp> highp_f32mat2x2;
+ typedef mat<2, 3, f32, highp> highp_f32mat2x3;
+ typedef mat<2, 4, f32, highp> highp_f32mat2x4;
+ typedef mat<3, 2, f32, highp> highp_f32mat3x2;
+ typedef mat<3, 3, f32, highp> highp_f32mat3x3;
+ typedef mat<3, 4, f32, highp> highp_f32mat3x4;
+ typedef mat<4, 2, f32, highp> highp_f32mat4x2;
+ typedef mat<4, 3, f32, highp> highp_f32mat4x3;
+ typedef mat<4, 4, f32, highp> highp_f32mat4x4;
+
+ typedef mat<2, 2, f32, defaultp> f32mat2x2;
+ typedef mat<3, 2, f32, defaultp> f32mat3x2;
+ typedef mat<4, 2, f32, defaultp> f32mat4x2;
+ typedef mat<2, 3, f32, defaultp> f32mat2x3;
+ typedef mat<3, 3, f32, defaultp> f32mat3x3;
+ typedef mat<4, 3, f32, defaultp> f32mat4x3;
+ typedef mat<2, 4, f32, defaultp> f32mat2x4;
+ typedef mat<3, 4, f32, defaultp> f32mat3x4;
+ typedef mat<4, 4, f32, defaultp> f32mat4x4;
+
+ typedef mat<2, 2, double, lowp> lowp_dmat2x2;
+ typedef mat<2, 3, double, lowp> lowp_dmat2x3;
+ typedef mat<2, 4, double, lowp> lowp_dmat2x4;
+ typedef mat<3, 2, double, lowp> lowp_dmat3x2;
+ typedef mat<3, 3, double, lowp> lowp_dmat3x3;
+ typedef mat<3, 4, double, lowp> lowp_dmat3x4;
+ typedef mat<4, 2, double, lowp> lowp_dmat4x2;
+ typedef mat<4, 3, double, lowp> lowp_dmat4x3;
+ typedef mat<4, 4, double, lowp> lowp_dmat4x4;
+
+ typedef mat<2, 2, double, mediump> mediump_dmat2x2;
+ typedef mat<2, 3, double, mediump> mediump_dmat2x3;
+ typedef mat<2, 4, double, mediump> mediump_dmat2x4;
+ typedef mat<3, 2, double, mediump> mediump_dmat3x2;
+ typedef mat<3, 3, double, mediump> mediump_dmat3x3;
+ typedef mat<3, 4, double, mediump> mediump_dmat3x4;
+ typedef mat<4, 2, double, mediump> mediump_dmat4x2;
+ typedef mat<4, 3, double, mediump> mediump_dmat4x3;
+ typedef mat<4, 4, double, mediump> mediump_dmat4x4;
+
+ typedef mat<2, 2, double, highp> highp_dmat2x2;
+ typedef mat<2, 3, double, highp> highp_dmat2x3;
+ typedef mat<2, 4, double, highp> highp_dmat2x4;
+ typedef mat<3, 2, double, highp> highp_dmat3x2;
+ typedef mat<3, 3, double, highp> highp_dmat3x3;
+ typedef mat<3, 4, double, highp> highp_dmat3x4;
+ typedef mat<4, 2, double, highp> highp_dmat4x2;
+ typedef mat<4, 3, double, highp> highp_dmat4x3;
+ typedef mat<4, 4, double, highp> highp_dmat4x4;
+
+ typedef mat<2, 2, double, defaultp> dmat2x2;
+ typedef mat<3, 2, double, defaultp> dmat3x2;
+ typedef mat<4, 2, double, defaultp> dmat4x2;
+ typedef mat<2, 3, double, defaultp> dmat2x3;
+ typedef mat<3, 3, double, defaultp> dmat3x3;
+ typedef mat<4, 3, double, defaultp> dmat4x3;
+ typedef mat<2, 4, double, defaultp> dmat2x4;
+ typedef mat<3, 4, double, defaultp> dmat3x4;
+ typedef mat<4, 4, double, defaultp> dmat4x4;
+
+ typedef mat<2, 2, f64, lowp> lowp_f64mat2x2;
+ typedef mat<2, 3, f64, lowp> lowp_f64mat2x3;
+ typedef mat<2, 4, f64, lowp> lowp_f64mat2x4;
+ typedef mat<3, 2, f64, lowp> lowp_f64mat3x2;
+ typedef mat<3, 3, f64, lowp> lowp_f64mat3x3;
+ typedef mat<3, 4, f64, lowp> lowp_f64mat3x4;
+ typedef mat<4, 2, f64, lowp> lowp_f64mat4x2;
+ typedef mat<4, 3, f64, lowp> lowp_f64mat4x3;
+ typedef mat<4, 4, f64, lowp> lowp_f64mat4x4;
+
+ typedef mat<2, 2, f64, mediump> mediump_f64mat2x2;
+ typedef mat<2, 3, f64, mediump> mediump_f64mat2x3;
+ typedef mat<2, 4, f64, mediump> mediump_f64mat2x4;
+ typedef mat<3, 2, f64, mediump> mediump_f64mat3x2;
+ typedef mat<3, 3, f64, mediump> mediump_f64mat3x3;
+ typedef mat<3, 4, f64, mediump> mediump_f64mat3x4;
+ typedef mat<4, 2, f64, mediump> mediump_f64mat4x2;
+ typedef mat<4, 3, f64, mediump> mediump_f64mat4x3;
+ typedef mat<4, 4, f64, mediump> mediump_f64mat4x4;
+
+ typedef mat<2, 2, f64, highp> highp_f64mat2x2;
+ typedef mat<2, 3, f64, highp> highp_f64mat2x3;
+ typedef mat<2, 4, f64, highp> highp_f64mat2x4;
+ typedef mat<3, 2, f64, highp> highp_f64mat3x2;
+ typedef mat<3, 3, f64, highp> highp_f64mat3x3;
+ typedef mat<3, 4, f64, highp> highp_f64mat3x4;
+ typedef mat<4, 2, f64, highp> highp_f64mat4x2;
+ typedef mat<4, 3, f64, highp> highp_f64mat4x3;
+ typedef mat<4, 4, f64, highp> highp_f64mat4x4;
+
+ typedef mat<2, 2, f64, defaultp> f64mat2x2;
+ typedef mat<3, 2, f64, defaultp> f64mat3x2;
+ typedef mat<4, 2, f64, defaultp> f64mat4x2;
+ typedef mat<2, 3, f64, defaultp> f64mat2x3;
+ typedef mat<3, 3, f64, defaultp> f64mat3x3;
+ typedef mat<4, 3, f64, defaultp> f64mat4x3;
+ typedef mat<2, 4, f64, defaultp> f64mat2x4;
+ typedef mat<3, 4, f64, defaultp> f64mat3x4;
+ typedef mat<4, 4, f64, defaultp> f64mat4x4;
+
+ // Signed integer matrix MxN
+
+ typedef mat<2, 2, int, lowp> lowp_imat2x2;
+ typedef mat<2, 3, int, lowp> lowp_imat2x3;
+ typedef mat<2, 4, int, lowp> lowp_imat2x4;
+ typedef mat<3, 2, int, lowp> lowp_imat3x2;
+ typedef mat<3, 3, int, lowp> lowp_imat3x3;
+ typedef mat<3, 4, int, lowp> lowp_imat3x4;
+ typedef mat<4, 2, int, lowp> lowp_imat4x2;
+ typedef mat<4, 3, int, lowp> lowp_imat4x3;
+ typedef mat<4, 4, int, lowp> lowp_imat4x4;
+
+ typedef mat<2, 2, int, mediump> mediump_imat2x2;
+ typedef mat<2, 3, int, mediump> mediump_imat2x3;
+ typedef mat<2, 4, int, mediump> mediump_imat2x4;
+ typedef mat<3, 2, int, mediump> mediump_imat3x2;
+ typedef mat<3, 3, int, mediump> mediump_imat3x3;
+ typedef mat<3, 4, int, mediump> mediump_imat3x4;
+ typedef mat<4, 2, int, mediump> mediump_imat4x2;
+ typedef mat<4, 3, int, mediump> mediump_imat4x3;
+ typedef mat<4, 4, int, mediump> mediump_imat4x4;
+
+ typedef mat<2, 2, int, highp> highp_imat2x2;
+ typedef mat<2, 3, int, highp> highp_imat2x3;
+ typedef mat<2, 4, int, highp> highp_imat2x4;
+ typedef mat<3, 2, int, highp> highp_imat3x2;
+ typedef mat<3, 3, int, highp> highp_imat3x3;
+ typedef mat<3, 4, int, highp> highp_imat3x4;
+ typedef mat<4, 2, int, highp> highp_imat4x2;
+ typedef mat<4, 3, int, highp> highp_imat4x3;
+ typedef mat<4, 4, int, highp> highp_imat4x4;
+
+ typedef mat<2, 2, int, defaultp> imat2x2;
+ typedef mat<3, 2, int, defaultp> imat3x2;
+ typedef mat<4, 2, int, defaultp> imat4x2;
+ typedef mat<2, 3, int, defaultp> imat2x3;
+ typedef mat<3, 3, int, defaultp> imat3x3;
+ typedef mat<4, 3, int, defaultp> imat4x3;
+ typedef mat<2, 4, int, defaultp> imat2x4;
+ typedef mat<3, 4, int, defaultp> imat3x4;
+ typedef mat<4, 4, int, defaultp> imat4x4;
+
+
+ typedef mat<2, 2, int8, lowp> lowp_i8mat2x2;
+ typedef mat<2, 3, int8, lowp> lowp_i8mat2x3;
+ typedef mat<2, 4, int8, lowp> lowp_i8mat2x4;
+ typedef mat<3, 2, int8, lowp> lowp_i8mat3x2;
+ typedef mat<3, 3, int8, lowp> lowp_i8mat3x3;
+ typedef mat<3, 4, int8, lowp> lowp_i8mat3x4;
+ typedef mat<4, 2, int8, lowp> lowp_i8mat4x2;
+ typedef mat<4, 3, int8, lowp> lowp_i8mat4x3;
+ typedef mat<4, 4, int8, lowp> lowp_i8mat4x4;
+
+ typedef mat<2, 2, int8, mediump> mediump_i8mat2x2;
+ typedef mat<2, 3, int8, mediump> mediump_i8mat2x3;
+ typedef mat<2, 4, int8, mediump> mediump_i8mat2x4;
+ typedef mat<3, 2, int8, mediump> mediump_i8mat3x2;
+ typedef mat<3, 3, int8, mediump> mediump_i8mat3x3;
+ typedef mat<3, 4, int8, mediump> mediump_i8mat3x4;
+ typedef mat<4, 2, int8, mediump> mediump_i8mat4x2;
+ typedef mat<4, 3, int8, mediump> mediump_i8mat4x3;
+ typedef mat<4, 4, int8, mediump> mediump_i8mat4x4;
+
+ typedef mat<2, 2, int8, highp> highp_i8mat2x2;
+ typedef mat<2, 3, int8, highp> highp_i8mat2x3;
+ typedef mat<2, 4, int8, highp> highp_i8mat2x4;
+ typedef mat<3, 2, int8, highp> highp_i8mat3x2;
+ typedef mat<3, 3, int8, highp> highp_i8mat3x3;
+ typedef mat<3, 4, int8, highp> highp_i8mat3x4;
+ typedef mat<4, 2, int8, highp> highp_i8mat4x2;
+ typedef mat<4, 3, int8, highp> highp_i8mat4x3;
+ typedef mat<4, 4, int8, highp> highp_i8mat4x4;
+
+ typedef mat<2, 2, int8, defaultp> i8mat2x2;
+ typedef mat<3, 2, int8, defaultp> i8mat3x2;
+ typedef mat<4, 2, int8, defaultp> i8mat4x2;
+ typedef mat<2, 3, int8, defaultp> i8mat2x3;
+ typedef mat<3, 3, int8, defaultp> i8mat3x3;
+ typedef mat<4, 3, int8, defaultp> i8mat4x3;
+ typedef mat<2, 4, int8, defaultp> i8mat2x4;
+ typedef mat<3, 4, int8, defaultp> i8mat3x4;
+ typedef mat<4, 4, int8, defaultp> i8mat4x4;
+
+
+ typedef mat<2, 2, int16, lowp> lowp_i16mat2x2;
+ typedef mat<2, 3, int16, lowp> lowp_i16mat2x3;
+ typedef mat<2, 4, int16, lowp> lowp_i16mat2x4;
+ typedef mat<3, 2, int16, lowp> lowp_i16mat3x2;
+ typedef mat<3, 3, int16, lowp> lowp_i16mat3x3;
+ typedef mat<3, 4, int16, lowp> lowp_i16mat3x4;
+ typedef mat<4, 2, int16, lowp> lowp_i16mat4x2;
+ typedef mat<4, 3, int16, lowp> lowp_i16mat4x3;
+ typedef mat<4, 4, int16, lowp> lowp_i16mat4x4;
+
+ typedef mat<2, 2, int16, mediump> mediump_i16mat2x2;
+ typedef mat<2, 3, int16, mediump> mediump_i16mat2x3;
+ typedef mat<2, 4, int16, mediump> mediump_i16mat2x4;
+ typedef mat<3, 2, int16, mediump> mediump_i16mat3x2;
+ typedef mat<3, 3, int16, mediump> mediump_i16mat3x3;
+ typedef mat<3, 4, int16, mediump> mediump_i16mat3x4;
+ typedef mat<4, 2, int16, mediump> mediump_i16mat4x2;
+ typedef mat<4, 3, int16, mediump> mediump_i16mat4x3;
+ typedef mat<4, 4, int16, mediump> mediump_i16mat4x4;
+
+ typedef mat<2, 2, int16, highp> highp_i16mat2x2;
+ typedef mat<2, 3, int16, highp> highp_i16mat2x3;
+ typedef mat<2, 4, int16, highp> highp_i16mat2x4;
+ typedef mat<3, 2, int16, highp> highp_i16mat3x2;
+ typedef mat<3, 3, int16, highp> highp_i16mat3x3;
+ typedef mat<3, 4, int16, highp> highp_i16mat3x4;
+ typedef mat<4, 2, int16, highp> highp_i16mat4x2;
+ typedef mat<4, 3, int16, highp> highp_i16mat4x3;
+ typedef mat<4, 4, int16, highp> highp_i16mat4x4;
+
+ typedef mat<2, 2, int16, defaultp> i16mat2x2;
+ typedef mat<3, 2, int16, defaultp> i16mat3x2;
+ typedef mat<4, 2, int16, defaultp> i16mat4x2;
+ typedef mat<2, 3, int16, defaultp> i16mat2x3;
+ typedef mat<3, 3, int16, defaultp> i16mat3x3;
+ typedef mat<4, 3, int16, defaultp> i16mat4x3;
+ typedef mat<2, 4, int16, defaultp> i16mat2x4;
+ typedef mat<3, 4, int16, defaultp> i16mat3x4;
+ typedef mat<4, 4, int16, defaultp> i16mat4x4;
+
+
+ typedef mat<2, 2, int32, lowp> lowp_i32mat2x2;
+ typedef mat<2, 3, int32, lowp> lowp_i32mat2x3;
+ typedef mat<2, 4, int32, lowp> lowp_i32mat2x4;
+ typedef mat<3, 2, int32, lowp> lowp_i32mat3x2;
+ typedef mat<3, 3, int32, lowp> lowp_i32mat3x3;
+ typedef mat<3, 4, int32, lowp> lowp_i32mat3x4;
+ typedef mat<4, 2, int32, lowp> lowp_i32mat4x2;
+ typedef mat<4, 3, int32, lowp> lowp_i32mat4x3;
+ typedef mat<4, 4, int32, lowp> lowp_i32mat4x4;
+
+ typedef mat<2, 2, int32, mediump> mediump_i32mat2x2;
+ typedef mat<2, 3, int32, mediump> mediump_i32mat2x3;
+ typedef mat<2, 4, int32, mediump> mediump_i32mat2x4;
+ typedef mat<3, 2, int32, mediump> mediump_i32mat3x2;
+ typedef mat<3, 3, int32, mediump> mediump_i32mat3x3;
+ typedef mat<3, 4, int32, mediump> mediump_i32mat3x4;
+ typedef mat<4, 2, int32, mediump> mediump_i32mat4x2;
+ typedef mat<4, 3, int32, mediump> mediump_i32mat4x3;
+ typedef mat<4, 4, int32, mediump> mediump_i32mat4x4;
+
+ typedef mat<2, 2, int32, highp> highp_i32mat2x2;
+ typedef mat<2, 3, int32, highp> highp_i32mat2x3;
+ typedef mat<2, 4, int32, highp> highp_i32mat2x4;
+ typedef mat<3, 2, int32, highp> highp_i32mat3x2;
+ typedef mat<3, 3, int32, highp> highp_i32mat3x3;
+ typedef mat<3, 4, int32, highp> highp_i32mat3x4;
+ typedef mat<4, 2, int32, highp> highp_i32mat4x2;
+ typedef mat<4, 3, int32, highp> highp_i32mat4x3;
+ typedef mat<4, 4, int32, highp> highp_i32mat4x4;
+
+ typedef mat<2, 2, int32, defaultp> i32mat2x2;
+ typedef mat<3, 2, int32, defaultp> i32mat3x2;
+ typedef mat<4, 2, int32, defaultp> i32mat4x2;
+ typedef mat<2, 3, int32, defaultp> i32mat2x3;
+ typedef mat<3, 3, int32, defaultp> i32mat3x3;
+ typedef mat<4, 3, int32, defaultp> i32mat4x3;
+ typedef mat<2, 4, int32, defaultp> i32mat2x4;
+ typedef mat<3, 4, int32, defaultp> i32mat3x4;
+ typedef mat<4, 4, int32, defaultp> i32mat4x4;
+
+
+ typedef mat<2, 2, int64, lowp> lowp_i64mat2x2;
+ typedef mat<2, 3, int64, lowp> lowp_i64mat2x3;
+ typedef mat<2, 4, int64, lowp> lowp_i64mat2x4;
+ typedef mat<3, 2, int64, lowp> lowp_i64mat3x2;
+ typedef mat<3, 3, int64, lowp> lowp_i64mat3x3;
+ typedef mat<3, 4, int64, lowp> lowp_i64mat3x4;
+ typedef mat<4, 2, int64, lowp> lowp_i64mat4x2;
+ typedef mat<4, 3, int64, lowp> lowp_i64mat4x3;
+ typedef mat<4, 4, int64, lowp> lowp_i64mat4x4;
+
+ typedef mat<2, 2, int64, mediump> mediump_i64mat2x2;
+ typedef mat<2, 3, int64, mediump> mediump_i64mat2x3;
+ typedef mat<2, 4, int64, mediump> mediump_i64mat2x4;
+ typedef mat<3, 2, int64, mediump> mediump_i64mat3x2;
+ typedef mat<3, 3, int64, mediump> mediump_i64mat3x3;
+ typedef mat<3, 4, int64, mediump> mediump_i64mat3x4;
+ typedef mat<4, 2, int64, mediump> mediump_i64mat4x2;
+ typedef mat<4, 3, int64, mediump> mediump_i64mat4x3;
+ typedef mat<4, 4, int64, mediump> mediump_i64mat4x4;
+
+ typedef mat<2, 2, int64, highp> highp_i64mat2x2;
+ typedef mat<2, 3, int64, highp> highp_i64mat2x3;
+ typedef mat<2, 4, int64, highp> highp_i64mat2x4;
+ typedef mat<3, 2, int64, highp> highp_i64mat3x2;
+ typedef mat<3, 3, int64, highp> highp_i64mat3x3;
+ typedef mat<3, 4, int64, highp> highp_i64mat3x4;
+ typedef mat<4, 2, int64, highp> highp_i64mat4x2;
+ typedef mat<4, 3, int64, highp> highp_i64mat4x3;
+ typedef mat<4, 4, int64, highp> highp_i64mat4x4;
+
+ typedef mat<2, 2, int64, defaultp> i64mat2x2;
+ typedef mat<3, 2, int64, defaultp> i64mat3x2;
+ typedef mat<4, 2, int64, defaultp> i64mat4x2;
+ typedef mat<2, 3, int64, defaultp> i64mat2x3;
+ typedef mat<3, 3, int64, defaultp> i64mat3x3;
+ typedef mat<4, 3, int64, defaultp> i64mat4x3;
+ typedef mat<2, 4, int64, defaultp> i64mat2x4;
+ typedef mat<3, 4, int64, defaultp> i64mat3x4;
+ typedef mat<4, 4, int64, defaultp> i64mat4x4;
+
+
+ // Unsigned integer matrix MxN
+
+ typedef mat<2, 2, uint, lowp> lowp_umat2x2;
+ typedef mat<2, 3, uint, lowp> lowp_umat2x3;
+ typedef mat<2, 4, uint, lowp> lowp_umat2x4;
+ typedef mat<3, 2, uint, lowp> lowp_umat3x2;
+ typedef mat<3, 3, uint, lowp> lowp_umat3x3;
+ typedef mat<3, 4, uint, lowp> lowp_umat3x4;
+ typedef mat<4, 2, uint, lowp> lowp_umat4x2;
+ typedef mat<4, 3, uint, lowp> lowp_umat4x3;
+ typedef mat<4, 4, uint, lowp> lowp_umat4x4;
+
+ typedef mat<2, 2, uint, mediump> mediump_umat2x2;
+ typedef mat<2, 3, uint, mediump> mediump_umat2x3;
+ typedef mat<2, 4, uint, mediump> mediump_umat2x4;
+ typedef mat<3, 2, uint, mediump> mediump_umat3x2;
+ typedef mat<3, 3, uint, mediump> mediump_umat3x3;
+ typedef mat<3, 4, uint, mediump> mediump_umat3x4;
+ typedef mat<4, 2, uint, mediump> mediump_umat4x2;
+ typedef mat<4, 3, uint, mediump> mediump_umat4x3;
+ typedef mat<4, 4, uint, mediump> mediump_umat4x4;
+
+ typedef mat<2, 2, uint, highp> highp_umat2x2;
+ typedef mat<2, 3, uint, highp> highp_umat2x3;
+ typedef mat<2, 4, uint, highp> highp_umat2x4;
+ typedef mat<3, 2, uint, highp> highp_umat3x2;
+ typedef mat<3, 3, uint, highp> highp_umat3x3;
+ typedef mat<3, 4, uint, highp> highp_umat3x4;
+ typedef mat<4, 2, uint, highp> highp_umat4x2;
+ typedef mat<4, 3, uint, highp> highp_umat4x3;
+ typedef mat<4, 4, uint, highp> highp_umat4x4;
+
+ typedef mat<2, 2, uint, defaultp> umat2x2;
+ typedef mat<3, 2, uint, defaultp> umat3x2;
+ typedef mat<4, 2, uint, defaultp> umat4x2;
+ typedef mat<2, 3, uint, defaultp> umat2x3;
+ typedef mat<3, 3, uint, defaultp> umat3x3;
+ typedef mat<4, 3, uint, defaultp> umat4x3;
+ typedef mat<2, 4, uint, defaultp> umat2x4;
+ typedef mat<3, 4, uint, defaultp> umat3x4;
+ typedef mat<4, 4, uint, defaultp> umat4x4;
+
+
+ typedef mat<2, 2, uint8, lowp> lowp_u8mat2x2;
+ typedef mat<2, 3, uint8, lowp> lowp_u8mat2x3;
+ typedef mat<2, 4, uint8, lowp> lowp_u8mat2x4;
+ typedef mat<3, 2, uint8, lowp> lowp_u8mat3x2;
+ typedef mat<3, 3, uint8, lowp> lowp_u8mat3x3;
+ typedef mat<3, 4, uint8, lowp> lowp_u8mat3x4;
+ typedef mat<4, 2, uint8, lowp> lowp_u8mat4x2;
+ typedef mat<4, 3, uint8, lowp> lowp_u8mat4x3;
+ typedef mat<4, 4, uint8, lowp> lowp_u8mat4x4;
+
+ typedef mat<2, 2, uint8, mediump> mediump_u8mat2x2;
+ typedef mat<2, 3, uint8, mediump> mediump_u8mat2x3;
+ typedef mat<2, 4, uint8, mediump> mediump_u8mat2x4;
+ typedef mat<3, 2, uint8, mediump> mediump_u8mat3x2;
+ typedef mat<3, 3, uint8, mediump> mediump_u8mat3x3;
+ typedef mat<3, 4, uint8, mediump> mediump_u8mat3x4;
+ typedef mat<4, 2, uint8, mediump> mediump_u8mat4x2;
+ typedef mat<4, 3, uint8, mediump> mediump_u8mat4x3;
+ typedef mat<4, 4, uint8, mediump> mediump_u8mat4x4;
+
+ typedef mat<2, 2, uint8, highp> highp_u8mat2x2;
+ typedef mat<2, 3, uint8, highp> highp_u8mat2x3;
+ typedef mat<2, 4, uint8, highp> highp_u8mat2x4;
+ typedef mat<3, 2, uint8, highp> highp_u8mat3x2;
+ typedef mat<3, 3, uint8, highp> highp_u8mat3x3;
+ typedef mat<3, 4, uint8, highp> highp_u8mat3x4;
+ typedef mat<4, 2, uint8, highp> highp_u8mat4x2;
+ typedef mat<4, 3, uint8, highp> highp_u8mat4x3;
+ typedef mat<4, 4, uint8, highp> highp_u8mat4x4;
+
+ typedef mat<2, 2, uint8, defaultp> u8mat2x2;
+ typedef mat<3, 2, uint8, defaultp> u8mat3x2;
+ typedef mat<4, 2, uint8, defaultp> u8mat4x2;
+ typedef mat<2, 3, uint8, defaultp> u8mat2x3;
+ typedef mat<3, 3, uint8, defaultp> u8mat3x3;
+ typedef mat<4, 3, uint8, defaultp> u8mat4x3;
+ typedef mat<2, 4, uint8, defaultp> u8mat2x4;
+ typedef mat<3, 4, uint8, defaultp> u8mat3x4;
+ typedef mat<4, 4, uint8, defaultp> u8mat4x4;
+
+
+ typedef mat<2, 2, uint16, lowp> lowp_u16mat2x2;
+ typedef mat<2, 3, uint16, lowp> lowp_u16mat2x3;
+ typedef mat<2, 4, uint16, lowp> lowp_u16mat2x4;
+ typedef mat<3, 2, uint16, lowp> lowp_u16mat3x2;
+ typedef mat<3, 3, uint16, lowp> lowp_u16mat3x3;
+ typedef mat<3, 4, uint16, lowp> lowp_u16mat3x4;
+ typedef mat<4, 2, uint16, lowp> lowp_u16mat4x2;
+ typedef mat<4, 3, uint16, lowp> lowp_u16mat4x3;
+ typedef mat<4, 4, uint16, lowp> lowp_u16mat4x4;
+
+ typedef mat<2, 2, uint16, mediump> mediump_u16mat2x2;
+ typedef mat<2, 3, uint16, mediump> mediump_u16mat2x3;
+ typedef mat<2, 4, uint16, mediump> mediump_u16mat2x4;
+ typedef mat<3, 2, uint16, mediump> mediump_u16mat3x2;
+ typedef mat<3, 3, uint16, mediump> mediump_u16mat3x3;
+ typedef mat<3, 4, uint16, mediump> mediump_u16mat3x4;
+ typedef mat<4, 2, uint16, mediump> mediump_u16mat4x2;
+ typedef mat<4, 3, uint16, mediump> mediump_u16mat4x3;
+ typedef mat<4, 4, uint16, mediump> mediump_u16mat4x4;
+
+ typedef mat<2, 2, uint16, highp> highp_u16mat2x2;
+ typedef mat<2, 3, uint16, highp> highp_u16mat2x3;
+ typedef mat<2, 4, uint16, highp> highp_u16mat2x4;
+ typedef mat<3, 2, uint16, highp> highp_u16mat3x2;
+ typedef mat<3, 3, uint16, highp> highp_u16mat3x3;
+ typedef mat<3, 4, uint16, highp> highp_u16mat3x4;
+ typedef mat<4, 2, uint16, highp> highp_u16mat4x2;
+ typedef mat<4, 3, uint16, highp> highp_u16mat4x3;
+ typedef mat<4, 4, uint16, highp> highp_u16mat4x4;
+
+ typedef mat<2, 2, uint16, defaultp> u16mat2x2;
+ typedef mat<3, 2, uint16, defaultp> u16mat3x2;
+ typedef mat<4, 2, uint16, defaultp> u16mat4x2;
+ typedef mat<2, 3, uint16, defaultp> u16mat2x3;
+ typedef mat<3, 3, uint16, defaultp> u16mat3x3;
+ typedef mat<4, 3, uint16, defaultp> u16mat4x3;
+ typedef mat<2, 4, uint16, defaultp> u16mat2x4;
+ typedef mat<3, 4, uint16, defaultp> u16mat3x4;
+ typedef mat<4, 4, uint16, defaultp> u16mat4x4;
+
+
+ typedef mat<2, 2, uint32, lowp> lowp_u32mat2x2;
+ typedef mat<2, 3, uint32, lowp> lowp_u32mat2x3;
+ typedef mat<2, 4, uint32, lowp> lowp_u32mat2x4;
+ typedef mat<3, 2, uint32, lowp> lowp_u32mat3x2;
+ typedef mat<3, 3, uint32, lowp> lowp_u32mat3x3;
+ typedef mat<3, 4, uint32, lowp> lowp_u32mat3x4;
+ typedef mat<4, 2, uint32, lowp> lowp_u32mat4x2;
+ typedef mat<4, 3, uint32, lowp> lowp_u32mat4x3;
+ typedef mat<4, 4, uint32, lowp> lowp_u32mat4x4;
+
+ typedef mat<2, 2, uint32, mediump> mediump_u32mat2x2;
+ typedef mat<2, 3, uint32, mediump> mediump_u32mat2x3;
+ typedef mat<2, 4, uint32, mediump> mediump_u32mat2x4;
+ typedef mat<3, 2, uint32, mediump> mediump_u32mat3x2;
+ typedef mat<3, 3, uint32, mediump> mediump_u32mat3x3;
+ typedef mat<3, 4, uint32, mediump> mediump_u32mat3x4;
+ typedef mat<4, 2, uint32, mediump> mediump_u32mat4x2;
+ typedef mat<4, 3, uint32, mediump> mediump_u32mat4x3;
+ typedef mat<4, 4, uint32, mediump> mediump_u32mat4x4;
+
+ typedef mat<2, 2, uint32, highp> highp_u32mat2x2;
+ typedef mat<2, 3, uint32, highp> highp_u32mat2x3;
+ typedef mat<2, 4, uint32, highp> highp_u32mat2x4;
+ typedef mat<3, 2, uint32, highp> highp_u32mat3x2;
+ typedef mat<3, 3, uint32, highp> highp_u32mat3x3;
+ typedef mat<3, 4, uint32, highp> highp_u32mat3x4;
+ typedef mat<4, 2, uint32, highp> highp_u32mat4x2;
+ typedef mat<4, 3, uint32, highp> highp_u32mat4x3;
+ typedef mat<4, 4, uint32, highp> highp_u32mat4x4;
+
+ typedef mat<2, 2, uint32, defaultp> u32mat2x2;
+ typedef mat<3, 2, uint32, defaultp> u32mat3x2;
+ typedef mat<4, 2, uint32, defaultp> u32mat4x2;
+ typedef mat<2, 3, uint32, defaultp> u32mat2x3;
+ typedef mat<3, 3, uint32, defaultp> u32mat3x3;
+ typedef mat<4, 3, uint32, defaultp> u32mat4x3;
+ typedef mat<2, 4, uint32, defaultp> u32mat2x4;
+ typedef mat<3, 4, uint32, defaultp> u32mat3x4;
+ typedef mat<4, 4, uint32, defaultp> u32mat4x4;
+
+
+ typedef mat<2, 2, uint64, lowp> lowp_u64mat2x2;
+ typedef mat<2, 3, uint64, lowp> lowp_u64mat2x3;
+ typedef mat<2, 4, uint64, lowp> lowp_u64mat2x4;
+ typedef mat<3, 2, uint64, lowp> lowp_u64mat3x2;
+ typedef mat<3, 3, uint64, lowp> lowp_u64mat3x3;
+ typedef mat<3, 4, uint64, lowp> lowp_u64mat3x4;
+ typedef mat<4, 2, uint64, lowp> lowp_u64mat4x2;
+ typedef mat<4, 3, uint64, lowp> lowp_u64mat4x3;
+ typedef mat<4, 4, uint64, lowp> lowp_u64mat4x4;
+
+ typedef mat<2, 2, uint64, mediump> mediump_u64mat2x2;
+ typedef mat<2, 3, uint64, mediump> mediump_u64mat2x3;
+ typedef mat<2, 4, uint64, mediump> mediump_u64mat2x4;
+ typedef mat<3, 2, uint64, mediump> mediump_u64mat3x2;
+ typedef mat<3, 3, uint64, mediump> mediump_u64mat3x3;
+ typedef mat<3, 4, uint64, mediump> mediump_u64mat3x4;
+ typedef mat<4, 2, uint64, mediump> mediump_u64mat4x2;
+ typedef mat<4, 3, uint64, mediump> mediump_u64mat4x3;
+ typedef mat<4, 4, uint64, mediump> mediump_u64mat4x4;
+
+ typedef mat<2, 2, uint64, highp> highp_u64mat2x2;
+ typedef mat<2, 3, uint64, highp> highp_u64mat2x3;
+ typedef mat<2, 4, uint64, highp> highp_u64mat2x4;
+ typedef mat<3, 2, uint64, highp> highp_u64mat3x2;
+ typedef mat<3, 3, uint64, highp> highp_u64mat3x3;
+ typedef mat<3, 4, uint64, highp> highp_u64mat3x4;
+ typedef mat<4, 2, uint64, highp> highp_u64mat4x2;
+ typedef mat<4, 3, uint64, highp> highp_u64mat4x3;
+ typedef mat<4, 4, uint64, highp> highp_u64mat4x4;
+
+ typedef mat<2, 2, uint64, defaultp> u64mat2x2;
+ typedef mat<3, 2, uint64, defaultp> u64mat3x2;
+ typedef mat<4, 2, uint64, defaultp> u64mat4x2;
+ typedef mat<2, 3, uint64, defaultp> u64mat2x3;
+ typedef mat<3, 3, uint64, defaultp> u64mat3x3;
+ typedef mat<4, 3, uint64, defaultp> u64mat4x3;
+ typedef mat<2, 4, uint64, defaultp> u64mat2x4;
+ typedef mat<3, 4, uint64, defaultp> u64mat3x4;
+ typedef mat<4, 4, uint64, defaultp> u64mat4x4;
+
+ // Quaternion
+
+ typedef qua<float, lowp> lowp_quat;
+ typedef qua<float, mediump> mediump_quat;
+ typedef qua<float, highp> highp_quat;
+ typedef qua<float, defaultp> quat;
+
+ typedef qua<float, lowp> lowp_fquat;
+ typedef qua<float, mediump> mediump_fquat;
+ typedef qua<float, highp> highp_fquat;
+ typedef qua<float, defaultp> fquat;
+
+ typedef qua<f32, lowp> lowp_f32quat;
+ typedef qua<f32, mediump> mediump_f32quat;
+ typedef qua<f32, highp> highp_f32quat;
+ typedef qua<f32, defaultp> f32quat;
+
+ typedef qua<double, lowp> lowp_dquat;
+ typedef qua<double, mediump> mediump_dquat;
+ typedef qua<double, highp> highp_dquat;
+ typedef qua<double, defaultp> dquat;
+
+ typedef qua<f64, lowp> lowp_f64quat;
+ typedef qua<f64, mediump> mediump_f64quat;
+ typedef qua<f64, highp> highp_f64quat;
+ typedef qua<f64, defaultp> f64quat;
+}//namespace glm
+
+
diff --git a/3rdparty/glm/source/glm/geometric.hpp b/3rdparty/glm/source/glm/geometric.hpp
new file mode 100644
index 0000000..b704f25
--- /dev/null
+++ b/3rdparty/glm/source/glm/geometric.hpp
@@ -0,0 +1,116 @@
+/// @ref core
+/// @file glm/geometric.hpp
+///
+/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+///
+/// @defgroup core_func_geometric Geometric functions
+/// @ingroup core
+///
+/// These operate on vectors as vectors, not component-wise.
+///
+/// Include <glm/geometric.hpp> to use these core features.
+
+#pragma once
+
+#include "detail/type_vec3.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_func_geometric
+ /// @{
+
+ /// Returns the length of x, i.e., sqrt(x * x).
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/length.xml">GLSL length man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T length(vec<L, T, Q> const& x);
+
+ /// Returns the distance betwwen p0 and p1, i.e., length(p0 - p1).
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/distance.xml">GLSL distance man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T distance(vec<L, T, Q> const& p0, vec<L, T, Q> const& p1);
+
+ /// Returns the dot product of x and y, i.e., result = x * y.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/dot.xml">GLSL dot man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR T dot(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns the cross product of x and y.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/cross.xml">GLSL cross man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y);
+
+ /// Returns a vector in the same direction as x but with length of 1.
+ /// According to issue 10 GLSL 1.10 specification, if length(x) == 0 then result is undefined and generate an error.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/normalize.xml">GLSL normalize man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> normalize(vec<L, T, Q> const& x);
+
+ /// If dot(Nref, I) < 0.0, return N, otherwise, return -N.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/faceforward.xml">GLSL faceforward man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> faceforward(
+ vec<L, T, Q> const& N,
+ vec<L, T, Q> const& I,
+ vec<L, T, Q> const& Nref);
+
+ /// For the incident vector I and surface orientation N,
+ /// returns the reflection direction : result = I - 2.0 * dot(N, I) * N.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/reflect.xml">GLSL reflect man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> reflect(
+ vec<L, T, Q> const& I,
+ vec<L, T, Q> const& N);
+
+ /// For the incident vector I and surface normal N,
+ /// and the ratio of indices of refraction eta,
+ /// return the refraction vector.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/refract.xml">GLSL refract man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.5 Geometric Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> refract(
+ vec<L, T, Q> const& I,
+ vec<L, T, Q> const& N,
+ T eta);
+
+ /// @}
+}//namespace glm
+
+#include "detail/func_geometric.inl"
diff --git a/3rdparty/glm/source/glm/glm.hpp b/3rdparty/glm/source/glm/glm.hpp
new file mode 100644
index 0000000..8b61064
--- /dev/null
+++ b/3rdparty/glm/source/glm/glm.hpp
@@ -0,0 +1,136 @@
+/// @ref core
+/// @file glm/glm.hpp
+///
+/// @defgroup core Core features
+///
+/// @brief Features that implement in C++ the GLSL specification as closely as possible.
+///
+/// The GLM core consists of C++ types that mirror GLSL types and
+/// C++ functions that mirror the GLSL functions.
+///
+/// The best documentation for GLM Core is the current GLSL specification,
+/// <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.clean.pdf">version 4.2
+/// (pdf file)</a>.
+///
+/// GLM core functionalities require <glm/glm.hpp> to be included to be used.
+///
+///
+/// @defgroup core_vector Vector types
+///
+/// Vector types of two to four components with an exhaustive set of operators.
+///
+/// @ingroup core
+///
+///
+/// @defgroup core_vector_precision Vector types with precision qualifiers
+///
+/// @brief Vector types with precision qualifiers which may result in various precision in term of ULPs
+///
+/// GLSL allows defining qualifiers for particular variables.
+/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility,
+/// with OpenGL ES's GLSL, these qualifiers do have an effect.
+///
+/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing:
+/// a number of typedefs that use a particular qualifier.
+///
+/// None of these types make any guarantees about the actual qualifier used.
+///
+/// @ingroup core
+///
+///
+/// @defgroup core_matrix Matrix types
+///
+/// Matrix types of with C columns and R rows where C and R are values between 2 to 4 included.
+/// These types have exhaustive sets of operators.
+///
+/// @ingroup core
+///
+///
+/// @defgroup core_matrix_precision Matrix types with precision qualifiers
+///
+/// @brief Matrix types with precision qualifiers which may result in various precision in term of ULPs
+///
+/// GLSL allows defining qualifiers for particular variables.
+/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility,
+/// with OpenGL ES's GLSL, these qualifiers do have an effect.
+///
+/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing:
+/// a number of typedefs that use a particular qualifier.
+///
+/// None of these types make any guarantees about the actual qualifier used.
+///
+/// @ingroup core
+///
+///
+/// @defgroup ext Stable extensions
+///
+/// @brief Additional features not specified by GLSL specification.
+///
+/// EXT extensions are fully tested and documented.
+///
+/// Even if it's highly unrecommended, it's possible to include all the extensions at once by
+/// including <glm/ext.hpp>. Otherwise, each extension needs to be included a specific file.
+///
+///
+/// @defgroup gtc Recommended extensions
+///
+/// @brief Additional features not specified by GLSL specification.
+///
+/// GTC extensions aim to be stable with tests and documentation.
+///
+/// Even if it's highly unrecommended, it's possible to include all the extensions at once by
+/// including <glm/ext.hpp>. Otherwise, each extension needs to be included a specific file.
+///
+///
+/// @defgroup gtx Experimental extensions
+///
+/// @brief Experimental features not specified by GLSL specification.
+///
+/// Experimental extensions are useful functions and types, but the development of
+/// their API and functionality is not necessarily stable. They can change
+/// substantially between versions. Backwards compatibility is not much of an issue
+/// for them.
+///
+/// Even if it's highly unrecommended, it's possible to include all the extensions
+/// at once by including <glm/ext.hpp>. Otherwise, each extension needs to be
+/// included a specific file.
+///
+/// @mainpage OpenGL Mathematics (GLM)
+/// - Website: <a href="https://glm.g-truc.net">glm.g-truc.net</a>
+/// - <a href="modules.html">GLM API documentation</a>
+/// - <a href="https://github.com/g-truc/glm/blob/master/manual.md">GLM Manual</a>
+
+#include "detail/_fixes.hpp"
+
+#include "detail/setup.hpp"
+
+#pragma once
+
+#include <cmath>
+#include <climits>
+#include <cfloat>
+#include <limits>
+#include <cassert>
+#include "fwd.hpp"
+
+#include "vec2.hpp"
+#include "vec3.hpp"
+#include "vec4.hpp"
+#include "mat2x2.hpp"
+#include "mat2x3.hpp"
+#include "mat2x4.hpp"
+#include "mat3x2.hpp"
+#include "mat3x3.hpp"
+#include "mat3x4.hpp"
+#include "mat4x2.hpp"
+#include "mat4x3.hpp"
+#include "mat4x4.hpp"
+
+#include "trigonometric.hpp"
+#include "exponential.hpp"
+#include "common.hpp"
+#include "packing.hpp"
+#include "geometric.hpp"
+#include "matrix.hpp"
+#include "vector_relational.hpp"
+#include "integer.hpp"
diff --git a/3rdparty/glm/source/glm/gtc/bitfield.hpp b/3rdparty/glm/source/glm/gtc/bitfield.hpp
new file mode 100644
index 0000000..084fbe7
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/bitfield.hpp
@@ -0,0 +1,266 @@
+/// @ref gtc_bitfield
+/// @file glm/gtc/bitfield.hpp
+///
+/// @see core (dependence)
+/// @see gtc_bitfield (dependence)
+///
+/// @defgroup gtc_bitfield GLM_GTC_bitfield
+/// @ingroup gtc
+///
+/// Include <glm/gtc/bitfield.hpp> to use the features of this extension.
+///
+/// Allow to perform bit operations on integer values
+
+#include "../detail/setup.hpp"
+
+#pragma once
+
+// Dependencies
+#include "../ext/scalar_int_sized.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+#include "../detail/qualifier.hpp"
+#include "../detail/_vectorize.hpp"
+#include "type_precision.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_bitfield extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_bitfield
+ /// @{
+
+ /// Build a mask of 'count' bits
+ ///
+ /// @see gtc_bitfield
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType mask(genIUType Bits);
+
+ /// Build a mask of 'count' bits
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed and unsigned integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_bitfield
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> mask(vec<L, T, Q> const& v);
+
+ /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side.
+ ///
+ /// @see gtc_bitfield
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType bitfieldRotateRight(genIUType In, int Shift);
+
+ /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed and unsigned integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_bitfield
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> bitfieldRotateRight(vec<L, T, Q> const& In, int Shift);
+
+ /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side.
+ ///
+ /// @see gtc_bitfield
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType bitfieldRotateLeft(genIUType In, int Shift);
+
+ /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed and unsigned integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_bitfield
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> bitfieldRotateLeft(vec<L, T, Q> const& In, int Shift);
+
+ /// Set to 1 a range of bits.
+ ///
+ /// @see gtc_bitfield
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount);
+
+ /// Set to 1 a range of bits.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed and unsigned integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_bitfield
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> bitfieldFillOne(vec<L, T, Q> const& Value, int FirstBit, int BitCount);
+
+ /// Set to 0 a range of bits.
+ ///
+ /// @see gtc_bitfield
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount);
+
+ /// Set to 0 a range of bits.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Signed and unsigned integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_bitfield
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> bitfieldFillZero(vec<L, T, Q> const& Value, int FirstBit, int BitCount);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of x followed by the first bit of y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL int16 bitfieldInterleave(int8 x, int8 y);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of x followed by the first bit of y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint16 bitfieldInterleave(uint8 x, uint8 y);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of v.x followed by the first bit of v.y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint16 bitfieldInterleave(u8vec2 const& v);
+
+ /// Deinterleaves the bits of x.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL glm::u8vec2 bitfieldDeinterleave(glm::uint16 x);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of x followed by the first bit of y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL int32 bitfieldInterleave(int16 x, int16 y);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of x followed by the first bit of y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint32 bitfieldInterleave(uint16 x, uint16 y);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of v.x followed by the first bit of v.y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint32 bitfieldInterleave(u16vec2 const& v);
+
+ /// Deinterleaves the bits of x.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL glm::u16vec2 bitfieldDeinterleave(glm::uint32 x);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of x followed by the first bit of y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of x followed by the first bit of y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y);
+
+ /// Interleaves the bits of x and y.
+ /// The first bit is the first bit of v.x followed by the first bit of v.y.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint64 bitfieldInterleave(u32vec2 const& v);
+
+ /// Deinterleaves the bits of x.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL glm::u32vec2 bitfieldDeinterleave(glm::uint64 x);
+
+ /// Interleaves the bits of x, y and z.
+ /// The first bit is the first bit of x followed by the first bit of y and the first bit of z.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z);
+
+ /// Interleaves the bits of x, y and z.
+ /// The first bit is the first bit of x followed by the first bit of y and the first bit of z.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z);
+
+ /// Interleaves the bits of x, y and z.
+ /// The first bit is the first bit of x followed by the first bit of y and the first bit of z.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z);
+
+ /// Interleaves the bits of x, y and z.
+ /// The first bit is the first bit of x followed by the first bit of y and the first bit of z.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z);
+
+ /// Interleaves the bits of x, y and z.
+ /// The first bit is the first bit of x followed by the first bit of y and the first bit of z.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y, int32 z);
+
+ /// Interleaves the bits of x, y and z.
+ /// The first bit is the first bit of x followed by the first bit of y and the first bit of z.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z);
+
+ /// Interleaves the bits of x, y, z and w.
+ /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w);
+
+ /// Interleaves the bits of x, y, z and w.
+ /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w);
+
+ /// Interleaves the bits of x, y, z and w.
+ /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w);
+
+ /// Interleaves the bits of x, y, z and w.
+ /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w.
+ /// The other bits are interleaved following the previous sequence.
+ ///
+ /// @see gtc_bitfield
+ GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w);
+
+ /// @}
+} //namespace glm
+
+#include "bitfield.inl"
diff --git a/3rdparty/glm/source/glm/gtc/bitfield.inl b/3rdparty/glm/source/glm/gtc/bitfield.inl
new file mode 100644
index 0000000..06cf188
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/bitfield.inl
@@ -0,0 +1,626 @@
+/// @ref gtc_bitfield
+
+#include "../simd/integer.h"
+
+namespace glm{
+namespace detail
+{
+ template<typename PARAM, typename RET>
+ GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y);
+
+ template<typename PARAM, typename RET>
+ GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z);
+
+ template<typename PARAM, typename RET>
+ GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z, PARAM w);
+
+ template<>
+ GLM_FUNC_QUALIFIER glm::uint16 bitfieldInterleave(glm::uint8 x, glm::uint8 y)
+ {
+ glm::uint16 REG1(x);
+ glm::uint16 REG2(y);
+
+ REG1 = ((REG1 << 4) | REG1) & static_cast<glm::uint16>(0x0F0F);
+ REG2 = ((REG2 << 4) | REG2) & static_cast<glm::uint16>(0x0F0F);
+
+ REG1 = ((REG1 << 2) | REG1) & static_cast<glm::uint16>(0x3333);
+ REG2 = ((REG2 << 2) | REG2) & static_cast<glm::uint16>(0x3333);
+
+ REG1 = ((REG1 << 1) | REG1) & static_cast<glm::uint16>(0x5555);
+ REG2 = ((REG2 << 1) | REG2) & static_cast<glm::uint16>(0x5555);
+
+ return REG1 | static_cast<glm::uint16>(REG2 << 1);
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint16 x, glm::uint16 y)
+ {
+ glm::uint32 REG1(x);
+ glm::uint32 REG2(y);
+
+ REG1 = ((REG1 << 8) | REG1) & static_cast<glm::uint32>(0x00FF00FF);
+ REG2 = ((REG2 << 8) | REG2) & static_cast<glm::uint32>(0x00FF00FF);
+
+ REG1 = ((REG1 << 4) | REG1) & static_cast<glm::uint32>(0x0F0F0F0F);
+ REG2 = ((REG2 << 4) | REG2) & static_cast<glm::uint32>(0x0F0F0F0F);
+
+ REG1 = ((REG1 << 2) | REG1) & static_cast<glm::uint32>(0x33333333);
+ REG2 = ((REG2 << 2) | REG2) & static_cast<glm::uint32>(0x33333333);
+
+ REG1 = ((REG1 << 1) | REG1) & static_cast<glm::uint32>(0x55555555);
+ REG2 = ((REG2 << 1) | REG2) & static_cast<glm::uint32>(0x55555555);
+
+ return REG1 | (REG2 << 1);
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y)
+ {
+ glm::uint64 REG1(x);
+ glm::uint64 REG2(y);
+
+ REG1 = ((REG1 << 16) | REG1) & static_cast<glm::uint64>(0x0000FFFF0000FFFFull);
+ REG2 = ((REG2 << 16) | REG2) & static_cast<glm::uint64>(0x0000FFFF0000FFFFull);
+
+ REG1 = ((REG1 << 8) | REG1) & static_cast<glm::uint64>(0x00FF00FF00FF00FFull);
+ REG2 = ((REG2 << 8) | REG2) & static_cast<glm::uint64>(0x00FF00FF00FF00FFull);
+
+ REG1 = ((REG1 << 4) | REG1) & static_cast<glm::uint64>(0x0F0F0F0F0F0F0F0Full);
+ REG2 = ((REG2 << 4) | REG2) & static_cast<glm::uint64>(0x0F0F0F0F0F0F0F0Full);
+
+ REG1 = ((REG1 << 2) | REG1) & static_cast<glm::uint64>(0x3333333333333333ull);
+ REG2 = ((REG2 << 2) | REG2) & static_cast<glm::uint64>(0x3333333333333333ull);
+
+ REG1 = ((REG1 << 1) | REG1) & static_cast<glm::uint64>(0x5555555555555555ull);
+ REG2 = ((REG2 << 1) | REG2) & static_cast<glm::uint64>(0x5555555555555555ull);
+
+ return REG1 | (REG2 << 1);
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z)
+ {
+ glm::uint32 REG1(x);
+ glm::uint32 REG2(y);
+ glm::uint32 REG3(z);
+
+ REG1 = ((REG1 << 16) | REG1) & static_cast<glm::uint32>(0xFF0000FFu);
+ REG2 = ((REG2 << 16) | REG2) & static_cast<glm::uint32>(0xFF0000FFu);
+ REG3 = ((REG3 << 16) | REG3) & static_cast<glm::uint32>(0xFF0000FFu);
+
+ REG1 = ((REG1 << 8) | REG1) & static_cast<glm::uint32>(0x0F00F00Fu);
+ REG2 = ((REG2 << 8) | REG2) & static_cast<glm::uint32>(0x0F00F00Fu);
+ REG3 = ((REG3 << 8) | REG3) & static_cast<glm::uint32>(0x0F00F00Fu);
+
+ REG1 = ((REG1 << 4) | REG1) & static_cast<glm::uint32>(0xC30C30C3u);
+ REG2 = ((REG2 << 4) | REG2) & static_cast<glm::uint32>(0xC30C30C3u);
+ REG3 = ((REG3 << 4) | REG3) & static_cast<glm::uint32>(0xC30C30C3u);
+
+ REG1 = ((REG1 << 2) | REG1) & static_cast<glm::uint32>(0x49249249u);
+ REG2 = ((REG2 << 2) | REG2) & static_cast<glm::uint32>(0x49249249u);
+ REG3 = ((REG3 << 2) | REG3) & static_cast<glm::uint32>(0x49249249u);
+
+ return REG1 | (REG2 << 1) | (REG3 << 2);
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z)
+ {
+ glm::uint64 REG1(x);
+ glm::uint64 REG2(y);
+ glm::uint64 REG3(z);
+
+ REG1 = ((REG1 << 32) | REG1) & static_cast<glm::uint64>(0xFFFF00000000FFFFull);
+ REG2 = ((REG2 << 32) | REG2) & static_cast<glm::uint64>(0xFFFF00000000FFFFull);
+ REG3 = ((REG3 << 32) | REG3) & static_cast<glm::uint64>(0xFFFF00000000FFFFull);
+
+ REG1 = ((REG1 << 16) | REG1) & static_cast<glm::uint64>(0x00FF0000FF0000FFull);
+ REG2 = ((REG2 << 16) | REG2) & static_cast<glm::uint64>(0x00FF0000FF0000FFull);
+ REG3 = ((REG3 << 16) | REG3) & static_cast<glm::uint64>(0x00FF0000FF0000FFull);
+
+ REG1 = ((REG1 << 8) | REG1) & static_cast<glm::uint64>(0xF00F00F00F00F00Full);
+ REG2 = ((REG2 << 8) | REG2) & static_cast<glm::uint64>(0xF00F00F00F00F00Full);
+ REG3 = ((REG3 << 8) | REG3) & static_cast<glm::uint64>(0xF00F00F00F00F00Full);
+
+ REG1 = ((REG1 << 4) | REG1) & static_cast<glm::uint64>(0x30C30C30C30C30C3ull);
+ REG2 = ((REG2 << 4) | REG2) & static_cast<glm::uint64>(0x30C30C30C30C30C3ull);
+ REG3 = ((REG3 << 4) | REG3) & static_cast<glm::uint64>(0x30C30C30C30C30C3ull);
+
+ REG1 = ((REG1 << 2) | REG1) & static_cast<glm::uint64>(0x9249249249249249ull);
+ REG2 = ((REG2 << 2) | REG2) & static_cast<glm::uint64>(0x9249249249249249ull);
+ REG3 = ((REG3 << 2) | REG3) & static_cast<glm::uint64>(0x9249249249249249ull);
+
+ return REG1 | (REG2 << 1) | (REG3 << 2);
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y, glm::uint32 z)
+ {
+ glm::uint64 REG1(x);
+ glm::uint64 REG2(y);
+ glm::uint64 REG3(z);
+
+ REG1 = ((REG1 << 32) | REG1) & static_cast<glm::uint64>(0xFFFF00000000FFFFull);
+ REG2 = ((REG2 << 32) | REG2) & static_cast<glm::uint64>(0xFFFF00000000FFFFull);
+ REG3 = ((REG3 << 32) | REG3) & static_cast<glm::uint64>(0xFFFF00000000FFFFull);
+
+ REG1 = ((REG1 << 16) | REG1) & static_cast<glm::uint64>(0x00FF0000FF0000FFull);
+ REG2 = ((REG2 << 16) | REG2) & static_cast<glm::uint64>(0x00FF0000FF0000FFull);
+ REG3 = ((REG3 << 16) | REG3) & static_cast<glm::uint64>(0x00FF0000FF0000FFull);
+
+ REG1 = ((REG1 << 8) | REG1) & static_cast<glm::uint64>(0xF00F00F00F00F00Full);
+ REG2 = ((REG2 << 8) | REG2) & static_cast<glm::uint64>(0xF00F00F00F00F00Full);
+ REG3 = ((REG3 << 8) | REG3) & static_cast<glm::uint64>(0xF00F00F00F00F00Full);
+
+ REG1 = ((REG1 << 4) | REG1) & static_cast<glm::uint64>(0x30C30C30C30C30C3ull);
+ REG2 = ((REG2 << 4) | REG2) & static_cast<glm::uint64>(0x30C30C30C30C30C3ull);
+ REG3 = ((REG3 << 4) | REG3) & static_cast<glm::uint64>(0x30C30C30C30C30C3ull);
+
+ REG1 = ((REG1 << 2) | REG1) & static_cast<glm::uint64>(0x9249249249249249ull);
+ REG2 = ((REG2 << 2) | REG2) & static_cast<glm::uint64>(0x9249249249249249ull);
+ REG3 = ((REG3 << 2) | REG3) & static_cast<glm::uint64>(0x9249249249249249ull);
+
+ return REG1 | (REG2 << 1) | (REG3 << 2);
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z, glm::uint8 w)
+ {
+ glm::uint32 REG1(x);
+ glm::uint32 REG2(y);
+ glm::uint32 REG3(z);
+ glm::uint32 REG4(w);
+
+ REG1 = ((REG1 << 12) | REG1) & static_cast<glm::uint32>(0x000F000Fu);
+ REG2 = ((REG2 << 12) | REG2) & static_cast<glm::uint32>(0x000F000Fu);
+ REG3 = ((REG3 << 12) | REG3) & static_cast<glm::uint32>(0x000F000Fu);
+ REG4 = ((REG4 << 12) | REG4) & static_cast<glm::uint32>(0x000F000Fu);
+
+ REG1 = ((REG1 << 6) | REG1) & static_cast<glm::uint32>(0x03030303u);
+ REG2 = ((REG2 << 6) | REG2) & static_cast<glm::uint32>(0x03030303u);
+ REG3 = ((REG3 << 6) | REG3) & static_cast<glm::uint32>(0x03030303u);
+ REG4 = ((REG4 << 6) | REG4) & static_cast<glm::uint32>(0x03030303u);
+
+ REG1 = ((REG1 << 3) | REG1) & static_cast<glm::uint32>(0x11111111u);
+ REG2 = ((REG2 << 3) | REG2) & static_cast<glm::uint32>(0x11111111u);
+ REG3 = ((REG3 << 3) | REG3) & static_cast<glm::uint32>(0x11111111u);
+ REG4 = ((REG4 << 3) | REG4) & static_cast<glm::uint32>(0x11111111u);
+
+ return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3);
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z, glm::uint16 w)
+ {
+ glm::uint64 REG1(x);
+ glm::uint64 REG2(y);
+ glm::uint64 REG3(z);
+ glm::uint64 REG4(w);
+
+ REG1 = ((REG1 << 24) | REG1) & static_cast<glm::uint64>(0x000000FF000000FFull);
+ REG2 = ((REG2 << 24) | REG2) & static_cast<glm::uint64>(0x000000FF000000FFull);
+ REG3 = ((REG3 << 24) | REG3) & static_cast<glm::uint64>(0x000000FF000000FFull);
+ REG4 = ((REG4 << 24) | REG4) & static_cast<glm::uint64>(0x000000FF000000FFull);
+
+ REG1 = ((REG1 << 12) | REG1) & static_cast<glm::uint64>(0x000F000F000F000Full);
+ REG2 = ((REG2 << 12) | REG2) & static_cast<glm::uint64>(0x000F000F000F000Full);
+ REG3 = ((REG3 << 12) | REG3) & static_cast<glm::uint64>(0x000F000F000F000Full);
+ REG4 = ((REG4 << 12) | REG4) & static_cast<glm::uint64>(0x000F000F000F000Full);
+
+ REG1 = ((REG1 << 6) | REG1) & static_cast<glm::uint64>(0x0303030303030303ull);
+ REG2 = ((REG2 << 6) | REG2) & static_cast<glm::uint64>(0x0303030303030303ull);
+ REG3 = ((REG3 << 6) | REG3) & static_cast<glm::uint64>(0x0303030303030303ull);
+ REG4 = ((REG4 << 6) | REG4) & static_cast<glm::uint64>(0x0303030303030303ull);
+
+ REG1 = ((REG1 << 3) | REG1) & static_cast<glm::uint64>(0x1111111111111111ull);
+ REG2 = ((REG2 << 3) | REG2) & static_cast<glm::uint64>(0x1111111111111111ull);
+ REG3 = ((REG3 << 3) | REG3) & static_cast<glm::uint64>(0x1111111111111111ull);
+ REG4 = ((REG4 << 3) | REG4) & static_cast<glm::uint64>(0x1111111111111111ull);
+
+ return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3);
+ }
+}//namespace detail
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType mask(genIUType Bits)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'mask' accepts only integer values");
+
+ return Bits >= sizeof(genIUType) * 8 ? ~static_cast<genIUType>(0) : (static_cast<genIUType>(1) << Bits) - static_cast<genIUType>(1);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> mask(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'mask' accepts only integer values");
+
+ return detail::functor1<vec, L, T, T, Q>::call(mask, v);
+ }
+
+ template<typename genIType>
+ GLM_FUNC_QUALIFIER genIType bitfieldRotateRight(genIType In, int Shift)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIType>::is_integer, "'bitfieldRotateRight' accepts only integer values");
+
+ int const BitSize = static_cast<genIType>(sizeof(genIType) * 8);
+ return (In << static_cast<genIType>(Shift)) | (In >> static_cast<genIType>(BitSize - Shift));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldRotateRight(vec<L, T, Q> const& In, int Shift)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldRotateRight' accepts only integer values");
+
+ int const BitSize = static_cast<int>(sizeof(T) * 8);
+ return (In << static_cast<T>(Shift)) | (In >> static_cast<T>(BitSize - Shift));
+ }
+
+ template<typename genIType>
+ GLM_FUNC_QUALIFIER genIType bitfieldRotateLeft(genIType In, int Shift)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIType>::is_integer, "'bitfieldRotateLeft' accepts only integer values");
+
+ int const BitSize = static_cast<genIType>(sizeof(genIType) * 8);
+ return (In >> static_cast<genIType>(Shift)) | (In << static_cast<genIType>(BitSize - Shift));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldRotateLeft(vec<L, T, Q> const& In, int Shift)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldRotateLeft' accepts only integer values");
+
+ int const BitSize = static_cast<int>(sizeof(T) * 8);
+ return (In >> static_cast<T>(Shift)) | (In << static_cast<T>(BitSize - Shift));
+ }
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount)
+ {
+ return Value | static_cast<genIUType>(mask(BitCount) << FirstBit);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldFillOne(vec<L, T, Q> const& Value, int FirstBit, int BitCount)
+ {
+ return Value | static_cast<T>(mask(BitCount) << FirstBit);
+ }
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount)
+ {
+ return Value & static_cast<genIUType>(~(mask(BitCount) << FirstBit));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldFillZero(vec<L, T, Q> const& Value, int FirstBit, int BitCount)
+ {
+ return Value & static_cast<T>(~(mask(BitCount) << FirstBit));
+ }
+
+ GLM_FUNC_QUALIFIER int16 bitfieldInterleave(int8 x, int8 y)
+ {
+ union sign8
+ {
+ int8 i;
+ uint8 u;
+ } sign_x, sign_y;
+
+ union sign16
+ {
+ int16 i;
+ uint16 u;
+ } result;
+
+ sign_x.i = x;
+ sign_y.i = y;
+ result.u = bitfieldInterleave(sign_x.u, sign_y.u);
+
+ return result.i;
+ }
+
+ GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(uint8 x, uint8 y)
+ {
+ return detail::bitfieldInterleave<uint8, uint16>(x, y);
+ }
+
+ GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(u8vec2 const& v)
+ {
+ return detail::bitfieldInterleave<uint8, uint16>(v.x, v.y);
+ }
+
+ GLM_FUNC_QUALIFIER u8vec2 bitfieldDeinterleave(glm::uint16 x)
+ {
+ uint16 REG1(x);
+ uint16 REG2(x >>= 1);
+
+ REG1 = REG1 & static_cast<uint16>(0x5555);
+ REG2 = REG2 & static_cast<uint16>(0x5555);
+
+ REG1 = ((REG1 >> 1) | REG1) & static_cast<uint16>(0x3333);
+ REG2 = ((REG2 >> 1) | REG2) & static_cast<uint16>(0x3333);
+
+ REG1 = ((REG1 >> 2) | REG1) & static_cast<uint16>(0x0F0F);
+ REG2 = ((REG2 >> 2) | REG2) & static_cast<uint16>(0x0F0F);
+
+ REG1 = ((REG1 >> 4) | REG1) & static_cast<uint16>(0x00FF);
+ REG2 = ((REG2 >> 4) | REG2) & static_cast<uint16>(0x00FF);
+
+ REG1 = ((REG1 >> 8) | REG1) & static_cast<uint16>(0xFFFF);
+ REG2 = ((REG2 >> 8) | REG2) & static_cast<uint16>(0xFFFF);
+
+ return glm::u8vec2(REG1, REG2);
+ }
+
+ GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int16 x, int16 y)
+ {
+ union sign16
+ {
+ int16 i;
+ uint16 u;
+ } sign_x, sign_y;
+
+ union sign32
+ {
+ int32 i;
+ uint32 u;
+ } result;
+
+ sign_x.i = x;
+ sign_y.i = y;
+ result.u = bitfieldInterleave(sign_x.u, sign_y.u);
+
+ return result.i;
+ }
+
+ GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint16 x, uint16 y)
+ {
+ return detail::bitfieldInterleave<uint16, uint32>(x, y);
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(u16vec2 const& v)
+ {
+ return detail::bitfieldInterleave<uint16, uint32>(v.x, v.y);
+ }
+
+ GLM_FUNC_QUALIFIER glm::u16vec2 bitfieldDeinterleave(glm::uint32 x)
+ {
+ glm::uint32 REG1(x);
+ glm::uint32 REG2(x >>= 1);
+
+ REG1 = REG1 & static_cast<glm::uint32>(0x55555555);
+ REG2 = REG2 & static_cast<glm::uint32>(0x55555555);
+
+ REG1 = ((REG1 >> 1) | REG1) & static_cast<glm::uint32>(0x33333333);
+ REG2 = ((REG2 >> 1) | REG2) & static_cast<glm::uint32>(0x33333333);
+
+ REG1 = ((REG1 >> 2) | REG1) & static_cast<glm::uint32>(0x0F0F0F0F);
+ REG2 = ((REG2 >> 2) | REG2) & static_cast<glm::uint32>(0x0F0F0F0F);
+
+ REG1 = ((REG1 >> 4) | REG1) & static_cast<glm::uint32>(0x00FF00FF);
+ REG2 = ((REG2 >> 4) | REG2) & static_cast<glm::uint32>(0x00FF00FF);
+
+ REG1 = ((REG1 >> 8) | REG1) & static_cast<glm::uint32>(0x0000FFFF);
+ REG2 = ((REG2 >> 8) | REG2) & static_cast<glm::uint32>(0x0000FFFF);
+
+ return glm::u16vec2(REG1, REG2);
+ }
+
+ GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y)
+ {
+ union sign32
+ {
+ int32 i;
+ uint32 u;
+ } sign_x, sign_y;
+
+ union sign64
+ {
+ int64 i;
+ uint64 u;
+ } result;
+
+ sign_x.i = x;
+ sign_y.i = y;
+ result.u = bitfieldInterleave(sign_x.u, sign_y.u);
+
+ return result.i;
+ }
+
+ GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y)
+ {
+ return detail::bitfieldInterleave<uint32, uint64>(x, y);
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(u32vec2 const& v)
+ {
+ return detail::bitfieldInterleave<uint32, uint64>(v.x, v.y);
+ }
+
+ GLM_FUNC_QUALIFIER glm::u32vec2 bitfieldDeinterleave(glm::uint64 x)
+ {
+ glm::uint64 REG1(x);
+ glm::uint64 REG2(x >>= 1);
+
+ REG1 = REG1 & static_cast<glm::uint64>(0x5555555555555555ull);
+ REG2 = REG2 & static_cast<glm::uint64>(0x5555555555555555ull);
+
+ REG1 = ((REG1 >> 1) | REG1) & static_cast<glm::uint64>(0x3333333333333333ull);
+ REG2 = ((REG2 >> 1) | REG2) & static_cast<glm::uint64>(0x3333333333333333ull);
+
+ REG1 = ((REG1 >> 2) | REG1) & static_cast<glm::uint64>(0x0F0F0F0F0F0F0F0Full);
+ REG2 = ((REG2 >> 2) | REG2) & static_cast<glm::uint64>(0x0F0F0F0F0F0F0F0Full);
+
+ REG1 = ((REG1 >> 4) | REG1) & static_cast<glm::uint64>(0x00FF00FF00FF00FFull);
+ REG2 = ((REG2 >> 4) | REG2) & static_cast<glm::uint64>(0x00FF00FF00FF00FFull);
+
+ REG1 = ((REG1 >> 8) | REG1) & static_cast<glm::uint64>(0x0000FFFF0000FFFFull);
+ REG2 = ((REG2 >> 8) | REG2) & static_cast<glm::uint64>(0x0000FFFF0000FFFFull);
+
+ REG1 = ((REG1 >> 16) | REG1) & static_cast<glm::uint64>(0x00000000FFFFFFFFull);
+ REG2 = ((REG2 >> 16) | REG2) & static_cast<glm::uint64>(0x00000000FFFFFFFFull);
+
+ return glm::u32vec2(REG1, REG2);
+ }
+
+ GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z)
+ {
+ union sign8
+ {
+ int8 i;
+ uint8 u;
+ } sign_x, sign_y, sign_z;
+
+ union sign32
+ {
+ int32 i;
+ uint32 u;
+ } result;
+
+ sign_x.i = x;
+ sign_y.i = y;
+ sign_z.i = z;
+ result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u);
+
+ return result.i;
+ }
+
+ GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z)
+ {
+ return detail::bitfieldInterleave<uint8, uint32>(x, y, z);
+ }
+
+ GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec3 const& v)
+ {
+ return detail::bitfieldInterleave<uint8, uint32>(v.x, v.y, v.z);
+ }
+
+ GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z)
+ {
+ union sign16
+ {
+ int16 i;
+ uint16 u;
+ } sign_x, sign_y, sign_z;
+
+ union sign64
+ {
+ int64 i;
+ uint64 u;
+ } result;
+
+ sign_x.i = x;
+ sign_y.i = y;
+ sign_z.i = z;
+ result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u);
+
+ return result.i;
+ }
+
+ GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z)
+ {
+ return detail::bitfieldInterleave<uint32, uint64>(x, y, z);
+ }
+
+ GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec3 const& v)
+ {
+ return detail::bitfieldInterleave<uint32, uint64>(v.x, v.y, v.z);
+ }
+
+ GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y, int32 z)
+ {
+ union sign16
+ {
+ int32 i;
+ uint32 u;
+ } sign_x, sign_y, sign_z;
+
+ union sign64
+ {
+ int64 i;
+ uint64 u;
+ } result;
+
+ sign_x.i = x;
+ sign_y.i = y;
+ sign_z.i = z;
+ result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u);
+
+ return result.i;
+ }
+
+ GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z)
+ {
+ return detail::bitfieldInterleave<uint32, uint64>(x, y, z);
+ }
+
+ GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u32vec3 const& v)
+ {
+ return detail::bitfieldInterleave<uint32, uint64>(v.x, v.y, v.z);
+ }
+
+ GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w)
+ {
+ union sign8
+ {
+ int8 i;
+ uint8 u;
+ } sign_x, sign_y, sign_z, sign_w;
+
+ union sign32
+ {
+ int32 i;
+ uint32 u;
+ } result;
+
+ sign_x.i = x;
+ sign_y.i = y;
+ sign_z.i = z;
+ sign_w.i = w;
+ result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u);
+
+ return result.i;
+ }
+
+ GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w)
+ {
+ return detail::bitfieldInterleave<uint8, uint32>(x, y, z, w);
+ }
+
+ GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec4 const& v)
+ {
+ return detail::bitfieldInterleave<uint8, uint32>(v.x, v.y, v.z, v.w);
+ }
+
+ GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w)
+ {
+ union sign16
+ {
+ int16 i;
+ uint16 u;
+ } sign_x, sign_y, sign_z, sign_w;
+
+ union sign64
+ {
+ int64 i;
+ uint64 u;
+ } result;
+
+ sign_x.i = x;
+ sign_y.i = y;
+ sign_z.i = z;
+ sign_w.i = w;
+ result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u);
+
+ return result.i;
+ }
+
+ GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w)
+ {
+ return detail::bitfieldInterleave<uint16, uint64>(x, y, z, w);
+ }
+
+ GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec4 const& v)
+ {
+ return detail::bitfieldInterleave<uint16, uint64>(v.x, v.y, v.z, v.w);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/color_space.hpp b/3rdparty/glm/source/glm/gtc/color_space.hpp
new file mode 100644
index 0000000..cffd9f0
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/color_space.hpp
@@ -0,0 +1,56 @@
+/// @ref gtc_color_space
+/// @file glm/gtc/color_space.hpp
+///
+/// @see core (dependence)
+/// @see gtc_color_space (dependence)
+///
+/// @defgroup gtc_color_space GLM_GTC_color_space
+/// @ingroup gtc
+///
+/// Include <glm/gtc/color_space.hpp> to use the features of this extension.
+///
+/// Allow to perform bit operations on integer values
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../exponential.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_color_space extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_color_space
+ /// @{
+
+ /// Convert a linear color to sRGB color using a standard gamma correction.
+ /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> convertLinearToSRGB(vec<L, T, Q> const& ColorLinear);
+
+ /// Convert a linear color to sRGB color using a custom gamma correction.
+ /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> convertLinearToSRGB(vec<L, T, Q> const& ColorLinear, T Gamma);
+
+ /// Convert a sRGB color to linear color using a standard gamma correction.
+ /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> convertSRGBToLinear(vec<L, T, Q> const& ColorSRGB);
+
+ /// Convert a sRGB color to linear color using a custom gamma correction.
+ // IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> convertSRGBToLinear(vec<L, T, Q> const& ColorSRGB, T Gamma);
+
+ /// @}
+} //namespace glm
+
+#include "color_space.inl"
diff --git a/3rdparty/glm/source/glm/gtc/color_space.inl b/3rdparty/glm/source/glm/gtc/color_space.inl
new file mode 100644
index 0000000..2a90004
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/color_space.inl
@@ -0,0 +1,84 @@
+/// @ref gtc_color_space
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q>
+ struct compute_rgbToSrgb
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& ColorRGB, T GammaCorrection)
+ {
+ vec<L, T, Q> const ClampedColor(clamp(ColorRGB, static_cast<T>(0), static_cast<T>(1)));
+
+ return mix(
+ pow(ClampedColor, vec<L, T, Q>(GammaCorrection)) * static_cast<T>(1.055) - static_cast<T>(0.055),
+ ClampedColor * static_cast<T>(12.92),
+ lessThan(ClampedColor, vec<L, T, Q>(static_cast<T>(0.0031308))));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_rgbToSrgb<4, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorRGB, T GammaCorrection)
+ {
+ return vec<4, T, Q>(compute_rgbToSrgb<3, T, Q>::call(vec<3, T, Q>(ColorRGB), GammaCorrection), ColorRGB.w);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q>
+ struct compute_srgbToRgb
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& ColorSRGB, T Gamma)
+ {
+ return mix(
+ pow((ColorSRGB + static_cast<T>(0.055)) * static_cast<T>(0.94786729857819905213270142180095), vec<L, T, Q>(Gamma)),
+ ColorSRGB * static_cast<T>(0.07739938080495356037151702786378),
+ lessThanEqual(ColorSRGB, vec<L, T, Q>(static_cast<T>(0.04045))));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_srgbToRgb<4, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorSRGB, T Gamma)
+ {
+ return vec<4, T, Q>(compute_srgbToRgb<3, T, Q>::call(vec<3, T, Q>(ColorSRGB), Gamma), ColorSRGB.w);
+ }
+ };
+}//namespace detail
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> convertLinearToSRGB(vec<L, T, Q> const& ColorLinear)
+ {
+ return detail::compute_rgbToSrgb<L, T, Q>::call(ColorLinear, static_cast<T>(0.41666));
+ }
+
+ // Based on Ian Taylor http://chilliant.blogspot.fr/2012/08/srgb-approximations-for-hlsl.html
+ template<>
+ GLM_FUNC_QUALIFIER vec<3, float, lowp> convertLinearToSRGB(vec<3, float, lowp> const& ColorLinear)
+ {
+ vec<3, float, lowp> S1 = sqrt(ColorLinear);
+ vec<3, float, lowp> S2 = sqrt(S1);
+ vec<3, float, lowp> S3 = sqrt(S2);
+ return 0.662002687f * S1 + 0.684122060f * S2 - 0.323583601f * S3 - 0.0225411470f * ColorLinear;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> convertLinearToSRGB(vec<L, T, Q> const& ColorLinear, T Gamma)
+ {
+ return detail::compute_rgbToSrgb<L, T, Q>::call(ColorLinear, static_cast<T>(1) / Gamma);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> convertSRGBToLinear(vec<L, T, Q> const& ColorSRGB)
+ {
+ return detail::compute_srgbToRgb<L, T, Q>::call(ColorSRGB, static_cast<T>(2.4));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> convertSRGBToLinear(vec<L, T, Q> const& ColorSRGB, T Gamma)
+ {
+ return detail::compute_srgbToRgb<L, T, Q>::call(ColorSRGB, Gamma);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/constants.hpp b/3rdparty/glm/source/glm/gtc/constants.hpp
new file mode 100644
index 0000000..99f2128
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/constants.hpp
@@ -0,0 +1,165 @@
+/// @ref gtc_constants
+/// @file glm/gtc/constants.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_constants GLM_GTC_constants
+/// @ingroup gtc
+///
+/// Include <glm/gtc/constants.hpp> to use the features of this extension.
+///
+/// Provide a list of constants and precomputed useful values.
+
+#pragma once
+
+// Dependencies
+#include "../ext/scalar_constants.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_constants extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_constants
+ /// @{
+
+ /// Return 0.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType zero();
+
+ /// Return 1.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType one();
+
+ /// Return pi * 2.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType two_pi();
+
+ /// Return square root of pi.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType root_pi();
+
+ /// Return pi / 2.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType half_pi();
+
+ /// Return pi / 2 * 3.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType three_over_two_pi();
+
+ /// Return pi / 4.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType quarter_pi();
+
+ /// Return 1 / pi.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_pi();
+
+ /// Return 1 / (pi * 2).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_two_pi();
+
+ /// Return 2 / pi.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_pi();
+
+ /// Return 4 / pi.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType four_over_pi();
+
+ /// Return 2 / sqrt(pi).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_root_pi();
+
+ /// Return 1 / sqrt(2).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_root_two();
+
+ /// Return sqrt(pi / 2).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType root_half_pi();
+
+ /// Return sqrt(2 * pi).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType root_two_pi();
+
+ /// Return sqrt(ln(4)).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType root_ln_four();
+
+ /// Return e constant.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType e();
+
+ /// Return Euler's constant.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType euler();
+
+ /// Return sqrt(2).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType root_two();
+
+ /// Return sqrt(3).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType root_three();
+
+ /// Return sqrt(5).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType root_five();
+
+ /// Return ln(2).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType ln_two();
+
+ /// Return ln(10).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ten();
+
+ /// Return ln(ln(2)).
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ln_two();
+
+ /// Return 1 / 3.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType third();
+
+ /// Return 2 / 3.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType two_thirds();
+
+ /// Return the golden ratio constant.
+ /// @see gtc_constants
+ template<typename genType>
+ GLM_FUNC_DECL GLM_CONSTEXPR genType golden_ratio();
+
+ /// @}
+} //namespace glm
+
+#include "constants.inl"
diff --git a/3rdparty/glm/source/glm/gtc/constants.inl b/3rdparty/glm/source/glm/gtc/constants.inl
new file mode 100644
index 0000000..bb98c6b
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/constants.inl
@@ -0,0 +1,167 @@
+/// @ref gtc_constants
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType zero()
+ {
+ return genType(0);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one()
+ {
+ return genType(1);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_pi()
+ {
+ return genType(6.28318530717958647692528676655900576);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_pi()
+ {
+ return genType(1.772453850905516027);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType half_pi()
+ {
+ return genType(1.57079632679489661923132169163975144);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType three_over_two_pi()
+ {
+ return genType(4.71238898038468985769396507491925432);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType quarter_pi()
+ {
+ return genType(0.785398163397448309615660845819875721);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_pi()
+ {
+ return genType(0.318309886183790671537767526745028724);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_two_pi()
+ {
+ return genType(0.159154943091895335768883763372514362);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_pi()
+ {
+ return genType(0.636619772367581343075535053490057448);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType four_over_pi()
+ {
+ return genType(1.273239544735162686151070106980114898);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_root_pi()
+ {
+ return genType(1.12837916709551257389615890312154517);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_root_two()
+ {
+ return genType(0.707106781186547524400844362104849039);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_half_pi()
+ {
+ return genType(1.253314137315500251);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two_pi()
+ {
+ return genType(2.506628274631000502);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_ln_four()
+ {
+ return genType(1.17741002251547469);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType e()
+ {
+ return genType(2.71828182845904523536);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType euler()
+ {
+ return genType(0.577215664901532860606);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two()
+ {
+ return genType(1.41421356237309504880168872420969808);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_three()
+ {
+ return genType(1.73205080756887729352744634150587236);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_five()
+ {
+ return genType(2.23606797749978969640917366873127623);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_two()
+ {
+ return genType(0.693147180559945309417232121458176568);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ten()
+ {
+ return genType(2.30258509299404568401799145468436421);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ln_two()
+ {
+ return genType(-0.3665129205816643);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType third()
+ {
+ return genType(0.3333333333333333333333333333333333333333);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_thirds()
+ {
+ return genType(0.666666666666666666666666666666666666667);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType golden_ratio()
+ {
+ return genType(1.61803398874989484820458683436563811);
+ }
+
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/epsilon.hpp b/3rdparty/glm/source/glm/gtc/epsilon.hpp
new file mode 100644
index 0000000..640439b
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/epsilon.hpp
@@ -0,0 +1,60 @@
+/// @ref gtc_epsilon
+/// @file glm/gtc/epsilon.hpp
+///
+/// @see core (dependence)
+/// @see gtc_quaternion (dependence)
+///
+/// @defgroup gtc_epsilon GLM_GTC_epsilon
+/// @ingroup gtc
+///
+/// Include <glm/gtc/epsilon.hpp> to use the features of this extension.
+///
+/// Comparison functions for a user defined epsilon values.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_epsilon extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_epsilon
+ /// @{
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is satisfied.
+ ///
+ /// @see gtc_epsilon
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> epsilonEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T const& epsilon);
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is satisfied.
+ ///
+ /// @see gtc_epsilon
+ template<typename genType>
+ GLM_FUNC_DECL bool epsilonEqual(genType const& x, genType const& y, genType const& epsilon);
+
+ /// Returns the component-wise comparison of |x - y| < epsilon.
+ /// True if this expression is not satisfied.
+ ///
+ /// @see gtc_epsilon
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> epsilonNotEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T const& epsilon);
+
+ /// Returns the component-wise comparison of |x - y| >= epsilon.
+ /// True if this expression is not satisfied.
+ ///
+ /// @see gtc_epsilon
+ template<typename genType>
+ GLM_FUNC_DECL bool epsilonNotEqual(genType const& x, genType const& y, genType const& epsilon);
+
+ /// @}
+}//namespace glm
+
+#include "epsilon.inl"
diff --git a/3rdparty/glm/source/glm/gtc/epsilon.inl b/3rdparty/glm/source/glm/gtc/epsilon.inl
new file mode 100644
index 0000000..508b9f8
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/epsilon.inl
@@ -0,0 +1,80 @@
+/// @ref gtc_epsilon
+
+// Dependency:
+#include "../vector_relational.hpp"
+#include "../common.hpp"
+
+namespace glm
+{
+ template<>
+ GLM_FUNC_QUALIFIER bool epsilonEqual
+ (
+ float const& x,
+ float const& y,
+ float const& epsilon
+ )
+ {
+ return abs(x - y) < epsilon;
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER bool epsilonEqual
+ (
+ double const& x,
+ double const& y,
+ double const& epsilon
+ )
+ {
+ return abs(x - y) < epsilon;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> epsilonEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T const& epsilon)
+ {
+ return lessThan(abs(x - y), vec<L, T, Q>(epsilon));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> epsilonEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& epsilon)
+ {
+ return lessThan(abs(x - y), vec<L, T, Q>(epsilon));
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER bool epsilonNotEqual(float const& x, float const& y, float const& epsilon)
+ {
+ return abs(x - y) >= epsilon;
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER bool epsilonNotEqual(double const& x, double const& y, double const& epsilon)
+ {
+ return abs(x - y) >= epsilon;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> epsilonNotEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T const& epsilon)
+ {
+ return greaterThanEqual(abs(x - y), vec<L, T, Q>(epsilon));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> epsilonNotEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, T, Q> const& epsilon)
+ {
+ return greaterThanEqual(abs(x - y), vec<L, T, Q>(epsilon));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonEqual(qua<T, Q> const& x, qua<T, Q> const& y, T const& epsilon)
+ {
+ vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w);
+ return lessThan(abs(v), vec<4, T, Q>(epsilon));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonNotEqual(qua<T, Q> const& x, qua<T, Q> const& y, T const& epsilon)
+ {
+ vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w);
+ return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/integer.hpp b/3rdparty/glm/source/glm/gtc/integer.hpp
new file mode 100644
index 0000000..a2a5bf6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/integer.hpp
@@ -0,0 +1,43 @@
+/// @ref gtc_integer
+/// @file glm/gtc/integer.hpp
+///
+/// @see core (dependence)
+/// @see gtc_integer (dependence)
+///
+/// @defgroup gtc_integer GLM_GTC_integer
+/// @ingroup gtc
+///
+/// Include <glm/gtc/integer.hpp> to use the features of this extension.
+///
+/// @brief Allow to perform bit operations on integer values
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../common.hpp"
+#include "../integer.hpp"
+#include "../exponential.hpp"
+#include "../ext/scalar_common.hpp"
+#include "../ext/vector_common.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_integer extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_integer
+ /// @{
+
+ /// Returns the log2 of x for integer values. Usefull to compute mipmap count from the texture size.
+ /// @see gtc_integer
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType log2(genIUType x);
+
+ /// @}
+} //namespace glm
+
+#include "integer.inl"
diff --git a/3rdparty/glm/source/glm/gtc/integer.inl b/3rdparty/glm/source/glm/gtc/integer.inl
new file mode 100644
index 0000000..5f66dfe
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/integer.inl
@@ -0,0 +1,33 @@
+/// @ref gtc_integer
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_log2<L, T, Q, false, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v)
+ {
+ //Equivalent to return findMSB(vec); but save one function call in ASM with VC
+ //return findMSB(vec);
+ return vec<L, T, Q>(detail::compute_findMSB_vec<L, T, Q, sizeof(T) * 8>::call(v));
+ }
+ };
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ template<qualifier Q, bool Aligned>
+ struct compute_log2<4, int, Q, false, Aligned>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v)
+ {
+ vec<4, int, Q> Result;
+ _BitScanReverse(reinterpret_cast<unsigned long*>(&Result.x), v.x);
+ _BitScanReverse(reinterpret_cast<unsigned long*>(&Result.y), v.y);
+ _BitScanReverse(reinterpret_cast<unsigned long*>(&Result.z), v.z);
+ _BitScanReverse(reinterpret_cast<unsigned long*>(&Result.w), v.w);
+ return Result;
+ }
+ };
+# endif//GLM_HAS_BITSCAN_WINDOWS
+}//namespace detail
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/matrix_access.hpp b/3rdparty/glm/source/glm/gtc/matrix_access.hpp
new file mode 100644
index 0000000..4935ba7
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/matrix_access.hpp
@@ -0,0 +1,60 @@
+/// @ref gtc_matrix_access
+/// @file glm/gtc/matrix_access.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_matrix_access GLM_GTC_matrix_access
+/// @ingroup gtc
+///
+/// Include <glm/gtc/matrix_access.hpp> to use the features of this extension.
+///
+/// Defines functions to access rows or columns of a matrix easily.
+
+#pragma once
+
+// Dependency:
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_matrix_access extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_matrix_access
+ /// @{
+
+ /// Get a specific row of a matrix.
+ /// @see gtc_matrix_access
+ template<typename genType>
+ GLM_FUNC_DECL typename genType::row_type row(
+ genType const& m,
+ length_t index);
+
+ /// Set a specific row to a matrix.
+ /// @see gtc_matrix_access
+ template<typename genType>
+ GLM_FUNC_DECL genType row(
+ genType const& m,
+ length_t index,
+ typename genType::row_type const& x);
+
+ /// Get a specific column of a matrix.
+ /// @see gtc_matrix_access
+ template<typename genType>
+ GLM_FUNC_DECL typename genType::col_type column(
+ genType const& m,
+ length_t index);
+
+ /// Set a specific column to a matrix.
+ /// @see gtc_matrix_access
+ template<typename genType>
+ GLM_FUNC_DECL genType column(
+ genType const& m,
+ length_t index,
+ typename genType::col_type const& x);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_access.inl"
diff --git a/3rdparty/glm/source/glm/gtc/matrix_access.inl b/3rdparty/glm/source/glm/gtc/matrix_access.inl
new file mode 100644
index 0000000..09fcc10
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/matrix_access.inl
@@ -0,0 +1,62 @@
+/// @ref gtc_matrix_access
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType row
+ (
+ genType const& m,
+ length_t index,
+ typename genType::row_type const& x
+ )
+ {
+ assert(index >= 0 && index < m[0].length());
+
+ genType Result = m;
+ for(length_t i = 0; i < m.length(); ++i)
+ Result[i][index] = x[i];
+ return Result;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER typename genType::row_type row
+ (
+ genType const& m,
+ length_t index
+ )
+ {
+ assert(index >= 0 && index < m[0].length());
+
+ typename genType::row_type Result(0);
+ for(length_t i = 0; i < m.length(); ++i)
+ Result[i] = m[i][index];
+ return Result;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType column
+ (
+ genType const& m,
+ length_t index,
+ typename genType::col_type const& x
+ )
+ {
+ assert(index >= 0 && index < m.length());
+
+ genType Result = m;
+ Result[index] = x;
+ return Result;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER typename genType::col_type column
+ (
+ genType const& m,
+ length_t index
+ )
+ {
+ assert(index >= 0 && index < m.length());
+
+ return m[index];
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/matrix_integer.hpp b/3rdparty/glm/source/glm/gtc/matrix_integer.hpp
new file mode 100644
index 0000000..d7ebdc7
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/matrix_integer.hpp
@@ -0,0 +1,433 @@
+/// @ref gtc_matrix_integer
+/// @file glm/gtc/matrix_integer.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_matrix_integer GLM_GTC_matrix_integer
+/// @ingroup gtc
+///
+/// Include <glm/gtc/matrix_integer.hpp> to use the features of this extension.
+///
+/// Defines a number of matrices with integer types.
+
+#pragma once
+
+// Dependency:
+#include "../mat2x2.hpp"
+#include "../mat2x3.hpp"
+#include "../mat2x4.hpp"
+#include "../mat3x2.hpp"
+#include "../mat3x3.hpp"
+#include "../mat3x4.hpp"
+#include "../mat4x2.hpp"
+#include "../mat4x3.hpp"
+#include "../mat4x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_matrix_integer extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_matrix_integer
+ /// @{
+
+ /// High-qualifier signed integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, int, highp> highp_imat2;
+
+ /// High-qualifier signed integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, int, highp> highp_imat3;
+
+ /// High-qualifier signed integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, int, highp> highp_imat4;
+
+ /// High-qualifier signed integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, int, highp> highp_imat2x2;
+
+ /// High-qualifier signed integer 2x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 3, int, highp> highp_imat2x3;
+
+ /// High-qualifier signed integer 2x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 4, int, highp> highp_imat2x4;
+
+ /// High-qualifier signed integer 3x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 2, int, highp> highp_imat3x2;
+
+ /// High-qualifier signed integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, int, highp> highp_imat3x3;
+
+ /// High-qualifier signed integer 3x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 4, int, highp> highp_imat3x4;
+
+ /// High-qualifier signed integer 4x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 2, int, highp> highp_imat4x2;
+
+ /// High-qualifier signed integer 4x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 3, int, highp> highp_imat4x3;
+
+ /// High-qualifier signed integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, int, highp> highp_imat4x4;
+
+
+ /// Medium-qualifier signed integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, int, mediump> mediump_imat2;
+
+ /// Medium-qualifier signed integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, int, mediump> mediump_imat3;
+
+ /// Medium-qualifier signed integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, int, mediump> mediump_imat4;
+
+
+ /// Medium-qualifier signed integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, int, mediump> mediump_imat2x2;
+
+ /// Medium-qualifier signed integer 2x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 3, int, mediump> mediump_imat2x3;
+
+ /// Medium-qualifier signed integer 2x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 4, int, mediump> mediump_imat2x4;
+
+ /// Medium-qualifier signed integer 3x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 2, int, mediump> mediump_imat3x2;
+
+ /// Medium-qualifier signed integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, int, mediump> mediump_imat3x3;
+
+ /// Medium-qualifier signed integer 3x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 4, int, mediump> mediump_imat3x4;
+
+ /// Medium-qualifier signed integer 4x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 2, int, mediump> mediump_imat4x2;
+
+ /// Medium-qualifier signed integer 4x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 3, int, mediump> mediump_imat4x3;
+
+ /// Medium-qualifier signed integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, int, mediump> mediump_imat4x4;
+
+
+ /// Low-qualifier signed integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, int, lowp> lowp_imat2;
+
+ /// Low-qualifier signed integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, int, lowp> lowp_imat3;
+
+ /// Low-qualifier signed integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, int, lowp> lowp_imat4;
+
+
+ /// Low-qualifier signed integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, int, lowp> lowp_imat2x2;
+
+ /// Low-qualifier signed integer 2x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 3, int, lowp> lowp_imat2x3;
+
+ /// Low-qualifier signed integer 2x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 4, int, lowp> lowp_imat2x4;
+
+ /// Low-qualifier signed integer 3x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 2, int, lowp> lowp_imat3x2;
+
+ /// Low-qualifier signed integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, int, lowp> lowp_imat3x3;
+
+ /// Low-qualifier signed integer 3x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 4, int, lowp> lowp_imat3x4;
+
+ /// Low-qualifier signed integer 4x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 2, int, lowp> lowp_imat4x2;
+
+ /// Low-qualifier signed integer 4x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 3, int, lowp> lowp_imat4x3;
+
+ /// Low-qualifier signed integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, int, lowp> lowp_imat4x4;
+
+
+ /// High-qualifier unsigned integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, uint, highp> highp_umat2;
+
+ /// High-qualifier unsigned integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, uint, highp> highp_umat3;
+
+ /// High-qualifier unsigned integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, uint, highp> highp_umat4;
+
+ /// High-qualifier unsigned integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, uint, highp> highp_umat2x2;
+
+ /// High-qualifier unsigned integer 2x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 3, uint, highp> highp_umat2x3;
+
+ /// High-qualifier unsigned integer 2x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 4, uint, highp> highp_umat2x4;
+
+ /// High-qualifier unsigned integer 3x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 2, uint, highp> highp_umat3x2;
+
+ /// High-qualifier unsigned integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, uint, highp> highp_umat3x3;
+
+ /// High-qualifier unsigned integer 3x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 4, uint, highp> highp_umat3x4;
+
+ /// High-qualifier unsigned integer 4x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 2, uint, highp> highp_umat4x2;
+
+ /// High-qualifier unsigned integer 4x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 3, uint, highp> highp_umat4x3;
+
+ /// High-qualifier unsigned integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, uint, highp> highp_umat4x4;
+
+
+ /// Medium-qualifier unsigned integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, uint, mediump> mediump_umat2;
+
+ /// Medium-qualifier unsigned integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, uint, mediump> mediump_umat3;
+
+ /// Medium-qualifier unsigned integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, uint, mediump> mediump_umat4;
+
+
+ /// Medium-qualifier unsigned integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, uint, mediump> mediump_umat2x2;
+
+ /// Medium-qualifier unsigned integer 2x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 3, uint, mediump> mediump_umat2x3;
+
+ /// Medium-qualifier unsigned integer 2x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 4, uint, mediump> mediump_umat2x4;
+
+ /// Medium-qualifier unsigned integer 3x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 2, uint, mediump> mediump_umat3x2;
+
+ /// Medium-qualifier unsigned integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, uint, mediump> mediump_umat3x3;
+
+ /// Medium-qualifier unsigned integer 3x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 4, uint, mediump> mediump_umat3x4;
+
+ /// Medium-qualifier unsigned integer 4x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 2, uint, mediump> mediump_umat4x2;
+
+ /// Medium-qualifier unsigned integer 4x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 3, uint, mediump> mediump_umat4x3;
+
+ /// Medium-qualifier unsigned integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, uint, mediump> mediump_umat4x4;
+
+
+ /// Low-qualifier unsigned integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, uint, lowp> lowp_umat2;
+
+ /// Low-qualifier unsigned integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, uint, lowp> lowp_umat3;
+
+ /// Low-qualifier unsigned integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, uint, lowp> lowp_umat4;
+
+
+ /// Low-qualifier unsigned integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, uint, lowp> lowp_umat2x2;
+
+ /// Low-qualifier unsigned integer 2x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 3, uint, lowp> lowp_umat2x3;
+
+ /// Low-qualifier unsigned integer 2x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 4, uint, lowp> lowp_umat2x4;
+
+ /// Low-qualifier unsigned integer 3x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 2, uint, lowp> lowp_umat3x2;
+
+ /// Low-qualifier unsigned integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, uint, lowp> lowp_umat3x3;
+
+ /// Low-qualifier unsigned integer 3x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 4, uint, lowp> lowp_umat3x4;
+
+ /// Low-qualifier unsigned integer 4x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 2, uint, lowp> lowp_umat4x2;
+
+ /// Low-qualifier unsigned integer 4x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 3, uint, lowp> lowp_umat4x3;
+
+ /// Low-qualifier unsigned integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, uint, lowp> lowp_umat4x4;
+
+
+
+ /// Signed integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, int, defaultp> imat2;
+
+ /// Signed integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, int, defaultp> imat3;
+
+ /// Signed integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, int, defaultp> imat4;
+
+ /// Signed integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, int, defaultp> imat2x2;
+
+ /// Signed integer 2x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 3, int, defaultp> imat2x3;
+
+ /// Signed integer 2x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 4, int, defaultp> imat2x4;
+
+ /// Signed integer 3x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 2, int, defaultp> imat3x2;
+
+ /// Signed integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, int, defaultp> imat3x3;
+
+ /// Signed integer 3x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 4, int, defaultp> imat3x4;
+
+ /// Signed integer 4x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 2, int, defaultp> imat4x2;
+
+ /// Signed integer 4x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 3, int, defaultp> imat4x3;
+
+ /// Signed integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, int, defaultp> imat4x4;
+
+
+
+ /// Unsigned integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, uint, defaultp> umat2;
+
+ /// Unsigned integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, uint, defaultp> umat3;
+
+ /// Unsigned integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, uint, defaultp> umat4;
+
+ /// Unsigned integer 2x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 2, uint, defaultp> umat2x2;
+
+ /// Unsigned integer 2x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 3, uint, defaultp> umat2x3;
+
+ /// Unsigned integer 2x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<2, 4, uint, defaultp> umat2x4;
+
+ /// Unsigned integer 3x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 2, uint, defaultp> umat3x2;
+
+ /// Unsigned integer 3x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 3, uint, defaultp> umat3x3;
+
+ /// Unsigned integer 3x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<3, 4, uint, defaultp> umat3x4;
+
+ /// Unsigned integer 4x2 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 2, uint, defaultp> umat4x2;
+
+ /// Unsigned integer 4x3 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 3, uint, defaultp> umat4x3;
+
+ /// Unsigned integer 4x4 matrix.
+ /// @see gtc_matrix_integer
+ typedef mat<4, 4, uint, defaultp> umat4x4;
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/matrix_inverse.hpp b/3rdparty/glm/source/glm/gtc/matrix_inverse.hpp
new file mode 100644
index 0000000..8f77860
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/matrix_inverse.hpp
@@ -0,0 +1,50 @@
+/// @ref gtc_matrix_inverse
+/// @file glm/gtc/matrix_inverse.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_matrix_inverse GLM_GTC_matrix_inverse
+/// @ingroup gtc
+///
+/// Include <glm/gtc/matrix_inverse.hpp> to use the features of this extension.
+///
+/// Defines additional matrix inverting functions.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../matrix.hpp"
+#include "../mat2x2.hpp"
+#include "../mat3x3.hpp"
+#include "../mat4x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_matrix_inverse extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_matrix_inverse
+ /// @{
+
+ /// Fast matrix inverse for affine matrix.
+ ///
+ /// @param m Input matrix to invert.
+ /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly innacurate.
+ /// @see gtc_matrix_inverse
+ template<typename genType>
+ GLM_FUNC_DECL genType affineInverse(genType const& m);
+
+ /// Compute the inverse transpose of a matrix.
+ ///
+ /// @param m Input matrix to invert transpose.
+ /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly innacurate.
+ /// @see gtc_matrix_inverse
+ template<typename genType>
+ GLM_FUNC_DECL genType inverseTranspose(genType const& m);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_inverse.inl"
diff --git a/3rdparty/glm/source/glm/gtc/matrix_inverse.inl b/3rdparty/glm/source/glm/gtc/matrix_inverse.inl
new file mode 100644
index 0000000..c004b9e
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/matrix_inverse.inl
@@ -0,0 +1,118 @@
+/// @ref gtc_matrix_inverse
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> affineInverse(mat<3, 3, T, Q> const& m)
+ {
+ mat<2, 2, T, Q> const Inv(inverse(mat<2, 2, T, Q>(m)));
+
+ return mat<3, 3, T, Q>(
+ vec<3, T, Q>(Inv[0], static_cast<T>(0)),
+ vec<3, T, Q>(Inv[1], static_cast<T>(0)),
+ vec<3, T, Q>(-Inv * vec<2, T, Q>(m[2]), static_cast<T>(1)));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> affineInverse(mat<4, 4, T, Q> const& m)
+ {
+ mat<3, 3, T, Q> const Inv(inverse(mat<3, 3, T, Q>(m)));
+
+ return mat<4, 4, T, Q>(
+ vec<4, T, Q>(Inv[0], static_cast<T>(0)),
+ vec<4, T, Q>(Inv[1], static_cast<T>(0)),
+ vec<4, T, Q>(Inv[2], static_cast<T>(0)),
+ vec<4, T, Q>(-Inv * vec<3, T, Q>(m[3]), static_cast<T>(1)));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> inverseTranspose(mat<2, 2, T, Q> const& m)
+ {
+ T Determinant = m[0][0] * m[1][1] - m[1][0] * m[0][1];
+
+ mat<2, 2, T, Q> Inverse(
+ + m[1][1] / Determinant,
+ - m[0][1] / Determinant,
+ - m[1][0] / Determinant,
+ + m[0][0] / Determinant);
+
+ return Inverse;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> inverseTranspose(mat<3, 3, T, Q> const& m)
+ {
+ T Determinant =
+ + m[0][0] * (m[1][1] * m[2][2] - m[1][2] * m[2][1])
+ - m[0][1] * (m[1][0] * m[2][2] - m[1][2] * m[2][0])
+ + m[0][2] * (m[1][0] * m[2][1] - m[1][1] * m[2][0]);
+
+ mat<3, 3, T, Q> Inverse;
+ Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]);
+ Inverse[0][1] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]);
+ Inverse[0][2] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]);
+ Inverse[1][0] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]);
+ Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]);
+ Inverse[1][2] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]);
+ Inverse[2][0] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]);
+ Inverse[2][1] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]);
+ Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]);
+ Inverse /= Determinant;
+
+ return Inverse;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> inverseTranspose(mat<4, 4, T, Q> const& m)
+ {
+ T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ T SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ T SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ T SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ T SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ T SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ T SubFactor11 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ T SubFactor12 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
+ T SubFactor13 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
+ T SubFactor14 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
+ T SubFactor15 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
+ T SubFactor16 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
+ T SubFactor17 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ mat<4, 4, T, Q> Inverse;
+ Inverse[0][0] = + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02);
+ Inverse[0][1] = - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04);
+ Inverse[0][2] = + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05);
+ Inverse[0][3] = - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05);
+
+ Inverse[1][0] = - (m[0][1] * SubFactor00 - m[0][2] * SubFactor01 + m[0][3] * SubFactor02);
+ Inverse[1][1] = + (m[0][0] * SubFactor00 - m[0][2] * SubFactor03 + m[0][3] * SubFactor04);
+ Inverse[1][2] = - (m[0][0] * SubFactor01 - m[0][1] * SubFactor03 + m[0][3] * SubFactor05);
+ Inverse[1][3] = + (m[0][0] * SubFactor02 - m[0][1] * SubFactor04 + m[0][2] * SubFactor05);
+
+ Inverse[2][0] = + (m[0][1] * SubFactor06 - m[0][2] * SubFactor07 + m[0][3] * SubFactor08);
+ Inverse[2][1] = - (m[0][0] * SubFactor06 - m[0][2] * SubFactor09 + m[0][3] * SubFactor10);
+ Inverse[2][2] = + (m[0][0] * SubFactor07 - m[0][1] * SubFactor09 + m[0][3] * SubFactor11);
+ Inverse[2][3] = - (m[0][0] * SubFactor08 - m[0][1] * SubFactor10 + m[0][2] * SubFactor11);
+
+ Inverse[3][0] = - (m[0][1] * SubFactor12 - m[0][2] * SubFactor13 + m[0][3] * SubFactor14);
+ Inverse[3][1] = + (m[0][0] * SubFactor12 - m[0][2] * SubFactor15 + m[0][3] * SubFactor16);
+ Inverse[3][2] = - (m[0][0] * SubFactor13 - m[0][1] * SubFactor15 + m[0][3] * SubFactor17);
+ Inverse[3][3] = + (m[0][0] * SubFactor14 - m[0][1] * SubFactor16 + m[0][2] * SubFactor17);
+
+ T Determinant =
+ + m[0][0] * Inverse[0][0]
+ + m[0][1] * Inverse[0][1]
+ + m[0][2] * Inverse[0][2]
+ + m[0][3] * Inverse[0][3];
+
+ Inverse /= Determinant;
+
+ return Inverse;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/matrix_transform.hpp b/3rdparty/glm/source/glm/gtc/matrix_transform.hpp
new file mode 100644
index 0000000..612418f
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/matrix_transform.hpp
@@ -0,0 +1,36 @@
+/// @ref gtc_matrix_transform
+/// @file glm/gtc/matrix_transform.hpp
+///
+/// @see core (dependence)
+/// @see gtx_transform
+/// @see gtx_transform2
+///
+/// @defgroup gtc_matrix_transform GLM_GTC_matrix_transform
+/// @ingroup gtc
+///
+/// Include <glm/gtc/matrix_transform.hpp> to use the features of this extension.
+///
+/// Defines functions that generate common transformation matrices.
+///
+/// The matrices generated by this extension use standard OpenGL fixed-function
+/// conventions. For example, the lookAt function generates a transform from world
+/// space into the specific eye space that the projective matrix functions
+/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility
+/// specifications defines the particular layout of this eye space.
+
+#pragma once
+
+// Dependencies
+#include "../mat4x4.hpp"
+#include "../vec2.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+#include "../ext/matrix_projection.hpp"
+#include "../ext/matrix_clip_space.hpp"
+#include "../ext/matrix_transform.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_matrix_transform extension included")
+#endif
+
+#include "matrix_transform.inl"
diff --git a/3rdparty/glm/source/glm/gtc/matrix_transform.inl b/3rdparty/glm/source/glm/gtc/matrix_transform.inl
new file mode 100644
index 0000000..15b46bc
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/matrix_transform.inl
@@ -0,0 +1,3 @@
+#include "../geometric.hpp"
+#include "../trigonometric.hpp"
+#include "../matrix.hpp"
diff --git a/3rdparty/glm/source/glm/gtc/noise.hpp b/3rdparty/glm/source/glm/gtc/noise.hpp
new file mode 100644
index 0000000..ab1772e
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/noise.hpp
@@ -0,0 +1,61 @@
+/// @ref gtc_noise
+/// @file glm/gtc/noise.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_noise GLM_GTC_noise
+/// @ingroup gtc
+///
+/// Include <glm/gtc/noise.hpp> to use the features of this extension.
+///
+/// Defines 2D, 3D and 4D procedural noise functions
+/// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise":
+/// https://github.com/ashima/webgl-noise
+/// Following Stefan Gustavson's paper "Simplex noise demystified":
+/// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../detail/_noise.hpp"
+#include "../geometric.hpp"
+#include "../common.hpp"
+#include "../vector_relational.hpp"
+#include "../vec2.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_noise extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_noise
+ /// @{
+
+ /// Classic perlin noise.
+ /// @see gtc_noise
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T perlin(
+ vec<L, T, Q> const& p);
+
+ /// Periodic perlin noise.
+ /// @see gtc_noise
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T perlin(
+ vec<L, T, Q> const& p,
+ vec<L, T, Q> const& rep);
+
+ /// Simplex noise.
+ /// @see gtc_noise
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T simplex(
+ vec<L, T, Q> const& p);
+
+ /// @}
+}//namespace glm
+
+#include "noise.inl"
diff --git a/3rdparty/glm/source/glm/gtc/noise.inl b/3rdparty/glm/source/glm/gtc/noise.inl
new file mode 100644
index 0000000..30d0b27
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/noise.inl
@@ -0,0 +1,807 @@
+/// @ref gtc_noise
+///
+// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise":
+// https://github.com/ashima/webgl-noise
+// Following Stefan Gustavson's paper "Simplex noise demystified":
+// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf
+
+namespace glm{
+namespace gtc
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> grad4(T const& j, vec<4, T, Q> const& ip)
+ {
+ vec<3, T, Q> pXYZ = floor(fract(vec<3, T, Q>(j) * vec<3, T, Q>(ip)) * T(7)) * ip[2] - T(1);
+ T pW = static_cast<T>(1.5) - dot(abs(pXYZ), vec<3, T, Q>(1));
+ vec<4, T, Q> s = vec<4, T, Q>(lessThan(vec<4, T, Q>(pXYZ, pW), vec<4, T, Q>(0.0)));
+ pXYZ = pXYZ + (vec<3, T, Q>(s) * T(2) - T(1)) * s.w;
+ return vec<4, T, Q>(pXYZ, pW);
+ }
+}//namespace gtc
+
+ // Classic Perlin noise
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position)
+ {
+ vec<4, T, Q> Pi = glm::floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0);
+ vec<4, T, Q> Pf = glm::fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0);
+ Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation
+ vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z);
+ vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w);
+ vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z);
+ vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w);
+
+ vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy);
+
+ vec<4, T, Q> gx = static_cast<T>(2) * glm::fract(i / T(41)) - T(1);
+ vec<4, T, Q> gy = glm::abs(gx) - T(0.5);
+ vec<4, T, Q> tx = glm::floor(gx + T(0.5));
+ gx = gx - tx;
+
+ vec<2, T, Q> g00(gx.x, gy.x);
+ vec<2, T, Q> g10(gx.y, gy.y);
+ vec<2, T, Q> g01(gx.z, gy.z);
+ vec<2, T, Q> g11(gx.w, gy.w);
+
+ vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11)));
+ g00 *= norm.x;
+ g01 *= norm.y;
+ g10 *= norm.z;
+ g11 *= norm.w;
+
+ T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x));
+ T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y));
+ T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z));
+ T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w));
+
+ vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y));
+ vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x);
+ T n_xy = mix(n_x.x, n_x.y, fade_xy.y);
+ return T(2.3) * n_xy;
+ }
+
+ // Classic Perlin noise
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position)
+ {
+ vec<3, T, Q> Pi0 = floor(Position); // Integer part for indexing
+ vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1
+ Pi0 = detail::mod289(Pi0);
+ Pi1 = detail::mod289(Pi1);
+ vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation
+ vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0
+ vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
+ vec<4, T, Q> iy = vec<4, T, Q>(vec<2, T, Q>(Pi0.y), vec<2, T, Q>(Pi1.y));
+ vec<4, T, Q> iz0(Pi0.z);
+ vec<4, T, Q> iz1(Pi1.z);
+
+ vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy);
+ vec<4, T, Q> ixy0 = detail::permute(ixy + iz0);
+ vec<4, T, Q> ixy1 = detail::permute(ixy + iz1);
+
+ vec<4, T, Q> gx0 = ixy0 * T(1.0 / 7.0);
+ vec<4, T, Q> gy0 = fract(floor(gx0) * T(1.0 / 7.0)) - T(0.5);
+ gx0 = fract(gx0);
+ vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0);
+ vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0));
+ gx0 -= sz0 * (step(T(0), gx0) - T(0.5));
+ gy0 -= sz0 * (step(T(0), gy0) - T(0.5));
+
+ vec<4, T, Q> gx1 = ixy1 * T(1.0 / 7.0);
+ vec<4, T, Q> gy1 = fract(floor(gx1) * T(1.0 / 7.0)) - T(0.5);
+ gx1 = fract(gx1);
+ vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1);
+ vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0));
+ gx1 -= sz1 * (step(T(0), gx1) - T(0.5));
+ gy1 -= sz1 * (step(T(0), gy1) - T(0.5));
+
+ vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x);
+ vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y);
+ vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z);
+ vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w);
+ vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x);
+ vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y);
+ vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z);
+ vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w);
+
+ vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
+ g000 *= norm0.x;
+ g010 *= norm0.y;
+ g100 *= norm0.z;
+ g110 *= norm0.w;
+ vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
+ g001 *= norm1.x;
+ g011 *= norm1.y;
+ g101 *= norm1.z;
+ g111 *= norm1.w;
+
+ T n000 = dot(g000, Pf0);
+ T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z));
+ T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z));
+ T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z));
+ T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z));
+ T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z));
+ T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z));
+ T n111 = dot(g111, Pf1);
+
+ vec<3, T, Q> fade_xyz = detail::fade(Pf0);
+ vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z);
+ vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y);
+ T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
+ return T(2.2) * n_xyz;
+ }
+ /*
+ // Classic Perlin noise
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& P)
+ {
+ vec<3, T, Q> Pi0 = floor(P); // Integer part for indexing
+ vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1
+ Pi0 = mod(Pi0, T(289));
+ Pi1 = mod(Pi1, T(289));
+ vec<3, T, Q> Pf0 = fract(P); // Fractional part for interpolation
+ vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0
+ vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
+ vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y);
+ vec<4, T, Q> iz0(Pi0.z);
+ vec<4, T, Q> iz1(Pi1.z);
+
+ vec<4, T, Q> ixy = permute(permute(ix) + iy);
+ vec<4, T, Q> ixy0 = permute(ixy + iz0);
+ vec<4, T, Q> ixy1 = permute(ixy + iz1);
+
+ vec<4, T, Q> gx0 = ixy0 / T(7);
+ vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5);
+ gx0 = fract(gx0);
+ vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0);
+ vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0));
+ gx0 -= sz0 * (step(0.0, gx0) - T(0.5));
+ gy0 -= sz0 * (step(0.0, gy0) - T(0.5));
+
+ vec<4, T, Q> gx1 = ixy1 / T(7);
+ vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5);
+ gx1 = fract(gx1);
+ vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1);
+ vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0));
+ gx1 -= sz1 * (step(T(0), gx1) - T(0.5));
+ gy1 -= sz1 * (step(T(0), gy1) - T(0.5));
+
+ vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x);
+ vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y);
+ vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z);
+ vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w);
+ vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x);
+ vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y);
+ vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z);
+ vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w);
+
+ vec<4, T, Q> norm0 = taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
+ g000 *= norm0.x;
+ g010 *= norm0.y;
+ g100 *= norm0.z;
+ g110 *= norm0.w;
+ vec<4, T, Q> norm1 = taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
+ g001 *= norm1.x;
+ g011 *= norm1.y;
+ g101 *= norm1.z;
+ g111 *= norm1.w;
+
+ T n000 = dot(g000, Pf0);
+ T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z));
+ T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z));
+ T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z));
+ T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z));
+ T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z));
+ T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z));
+ T n111 = dot(g111, Pf1);
+
+ vec<3, T, Q> fade_xyz = fade(Pf0);
+ vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z);
+ vec<2, T, Q> n_yz = mix(
+ vec<2, T, Q>(n_z.x, n_z.y),
+ vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y);
+ T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
+ return T(2.2) * n_xyz;
+ }
+ */
+ // Classic Perlin noise
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position)
+ {
+ vec<4, T, Q> Pi0 = floor(Position); // Integer part for indexing
+ vec<4, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1
+ Pi0 = mod(Pi0, vec<4, T, Q>(289));
+ Pi1 = mod(Pi1, vec<4, T, Q>(289));
+ vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation
+ vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0
+ vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
+ vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y);
+ vec<4, T, Q> iz0(Pi0.z);
+ vec<4, T, Q> iz1(Pi1.z);
+ vec<4, T, Q> iw0(Pi0.w);
+ vec<4, T, Q> iw1(Pi1.w);
+
+ vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy);
+ vec<4, T, Q> ixy0 = detail::permute(ixy + iz0);
+ vec<4, T, Q> ixy1 = detail::permute(ixy + iz1);
+ vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0);
+ vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1);
+ vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0);
+ vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1);
+
+ vec<4, T, Q> gx00 = ixy00 / T(7);
+ vec<4, T, Q> gy00 = floor(gx00) / T(7);
+ vec<4, T, Q> gz00 = floor(gy00) / T(6);
+ gx00 = fract(gx00) - T(0.5);
+ gy00 = fract(gy00) - T(0.5);
+ gz00 = fract(gz00) - T(0.5);
+ vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00);
+ vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0.0));
+ gx00 -= sw00 * (step(T(0), gx00) - T(0.5));
+ gy00 -= sw00 * (step(T(0), gy00) - T(0.5));
+
+ vec<4, T, Q> gx01 = ixy01 / T(7);
+ vec<4, T, Q> gy01 = floor(gx01) / T(7);
+ vec<4, T, Q> gz01 = floor(gy01) / T(6);
+ gx01 = fract(gx01) - T(0.5);
+ gy01 = fract(gy01) - T(0.5);
+ gz01 = fract(gz01) - T(0.5);
+ vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01);
+ vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0));
+ gx01 -= sw01 * (step(T(0), gx01) - T(0.5));
+ gy01 -= sw01 * (step(T(0), gy01) - T(0.5));
+
+ vec<4, T, Q> gx10 = ixy10 / T(7);
+ vec<4, T, Q> gy10 = floor(gx10) / T(7);
+ vec<4, T, Q> gz10 = floor(gy10) / T(6);
+ gx10 = fract(gx10) - T(0.5);
+ gy10 = fract(gy10) - T(0.5);
+ gz10 = fract(gz10) - T(0.5);
+ vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10);
+ vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0));
+ gx10 -= sw10 * (step(T(0), gx10) - T(0.5));
+ gy10 -= sw10 * (step(T(0), gy10) - T(0.5));
+
+ vec<4, T, Q> gx11 = ixy11 / T(7);
+ vec<4, T, Q> gy11 = floor(gx11) / T(7);
+ vec<4, T, Q> gz11 = floor(gy11) / T(6);
+ gx11 = fract(gx11) - T(0.5);
+ gy11 = fract(gy11) - T(0.5);
+ gz11 = fract(gz11) - T(0.5);
+ vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11);
+ vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(0.0));
+ gx11 -= sw11 * (step(T(0), gx11) - T(0.5));
+ gy11 -= sw11 * (step(T(0), gy11) - T(0.5));
+
+ vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x);
+ vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y);
+ vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z);
+ vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w);
+ vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x);
+ vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y);
+ vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z);
+ vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w);
+ vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x);
+ vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y);
+ vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z);
+ vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w);
+ vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x);
+ vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y);
+ vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z);
+ vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w);
+
+ vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100)));
+ g0000 *= norm00.x;
+ g0100 *= norm00.y;
+ g1000 *= norm00.z;
+ g1100 *= norm00.w;
+
+ vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101)));
+ g0001 *= norm01.x;
+ g0101 *= norm01.y;
+ g1001 *= norm01.z;
+ g1101 *= norm01.w;
+
+ vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110)));
+ g0010 *= norm10.x;
+ g0110 *= norm10.y;
+ g1010 *= norm10.z;
+ g1110 *= norm10.w;
+
+ vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111)));
+ g0011 *= norm11.x;
+ g0111 *= norm11.y;
+ g1011 *= norm11.z;
+ g1111 *= norm11.w;
+
+ T n0000 = dot(g0000, Pf0);
+ T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w));
+ T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w));
+ T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w));
+ T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w));
+ T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w));
+ T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w));
+ T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w));
+ T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w));
+ T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w));
+ T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w));
+ T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w));
+ T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w));
+ T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w));
+ T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w));
+ T n1111 = dot(g1111, Pf1);
+
+ vec<4, T, Q> fade_xyzw = detail::fade(Pf0);
+ vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w);
+ vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w);
+ vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z);
+ vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y);
+ T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x);
+ return T(2.2) * n_xyzw;
+ }
+
+ // Classic Perlin noise, periodic variant
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position, vec<2, T, Q> const& rep)
+ {
+ vec<4, T, Q> Pi = floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0);
+ vec<4, T, Q> Pf = fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0);
+ Pi = mod(Pi, vec<4, T, Q>(rep.x, rep.y, rep.x, rep.y)); // To create noise with explicit period
+ Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation
+ vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z);
+ vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w);
+ vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z);
+ vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w);
+
+ vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy);
+
+ vec<4, T, Q> gx = static_cast<T>(2) * fract(i / T(41)) - T(1);
+ vec<4, T, Q> gy = abs(gx) - T(0.5);
+ vec<4, T, Q> tx = floor(gx + T(0.5));
+ gx = gx - tx;
+
+ vec<2, T, Q> g00(gx.x, gy.x);
+ vec<2, T, Q> g10(gx.y, gy.y);
+ vec<2, T, Q> g01(gx.z, gy.z);
+ vec<2, T, Q> g11(gx.w, gy.w);
+
+ vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11)));
+ g00 *= norm.x;
+ g01 *= norm.y;
+ g10 *= norm.z;
+ g11 *= norm.w;
+
+ T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x));
+ T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y));
+ T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z));
+ T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w));
+
+ vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y));
+ vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x);
+ T n_xy = mix(n_x.x, n_x.y, fade_xy.y);
+ return T(2.3) * n_xy;
+ }
+
+ // Classic Perlin noise, periodic variant
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position, vec<3, T, Q> const& rep)
+ {
+ vec<3, T, Q> Pi0 = mod(floor(Position), rep); // Integer part, modulo period
+ vec<3, T, Q> Pi1 = mod(Pi0 + vec<3, T, Q>(T(1)), rep); // Integer part + 1, mod period
+ Pi0 = mod(Pi0, vec<3, T, Q>(289));
+ Pi1 = mod(Pi1, vec<3, T, Q>(289));
+ vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation
+ vec<3, T, Q> Pf1 = Pf0 - vec<3, T, Q>(T(1)); // Fractional part - 1.0
+ vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
+ vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y);
+ vec<4, T, Q> iz0(Pi0.z);
+ vec<4, T, Q> iz1(Pi1.z);
+
+ vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy);
+ vec<4, T, Q> ixy0 = detail::permute(ixy + iz0);
+ vec<4, T, Q> ixy1 = detail::permute(ixy + iz1);
+
+ vec<4, T, Q> gx0 = ixy0 / T(7);
+ vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5);
+ gx0 = fract(gx0);
+ vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0);
+ vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0));
+ gx0 -= sz0 * (step(T(0), gx0) - T(0.5));
+ gy0 -= sz0 * (step(T(0), gy0) - T(0.5));
+
+ vec<4, T, Q> gx1 = ixy1 / T(7);
+ vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5);
+ gx1 = fract(gx1);
+ vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1);
+ vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(T(0)));
+ gx1 -= sz1 * (step(T(0), gx1) - T(0.5));
+ gy1 -= sz1 * (step(T(0), gy1) - T(0.5));
+
+ vec<3, T, Q> g000 = vec<3, T, Q>(gx0.x, gy0.x, gz0.x);
+ vec<3, T, Q> g100 = vec<3, T, Q>(gx0.y, gy0.y, gz0.y);
+ vec<3, T, Q> g010 = vec<3, T, Q>(gx0.z, gy0.z, gz0.z);
+ vec<3, T, Q> g110 = vec<3, T, Q>(gx0.w, gy0.w, gz0.w);
+ vec<3, T, Q> g001 = vec<3, T, Q>(gx1.x, gy1.x, gz1.x);
+ vec<3, T, Q> g101 = vec<3, T, Q>(gx1.y, gy1.y, gz1.y);
+ vec<3, T, Q> g011 = vec<3, T, Q>(gx1.z, gy1.z, gz1.z);
+ vec<3, T, Q> g111 = vec<3, T, Q>(gx1.w, gy1.w, gz1.w);
+
+ vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
+ g000 *= norm0.x;
+ g010 *= norm0.y;
+ g100 *= norm0.z;
+ g110 *= norm0.w;
+ vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
+ g001 *= norm1.x;
+ g011 *= norm1.y;
+ g101 *= norm1.z;
+ g111 *= norm1.w;
+
+ T n000 = dot(g000, Pf0);
+ T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z));
+ T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z));
+ T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z));
+ T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z));
+ T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z));
+ T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z));
+ T n111 = dot(g111, Pf1);
+
+ vec<3, T, Q> fade_xyz = detail::fade(Pf0);
+ vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z);
+ vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y);
+ T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
+ return T(2.2) * n_xyz;
+ }
+
+ // Classic Perlin noise, periodic version
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position, vec<4, T, Q> const& rep)
+ {
+ vec<4, T, Q> Pi0 = mod(floor(Position), rep); // Integer part modulo rep
+ vec<4, T, Q> Pi1 = mod(Pi0 + T(1), rep); // Integer part + 1 mod rep
+ vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation
+ vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0
+ vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
+ vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y);
+ vec<4, T, Q> iz0(Pi0.z);
+ vec<4, T, Q> iz1(Pi1.z);
+ vec<4, T, Q> iw0(Pi0.w);
+ vec<4, T, Q> iw1(Pi1.w);
+
+ vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy);
+ vec<4, T, Q> ixy0 = detail::permute(ixy + iz0);
+ vec<4, T, Q> ixy1 = detail::permute(ixy + iz1);
+ vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0);
+ vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1);
+ vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0);
+ vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1);
+
+ vec<4, T, Q> gx00 = ixy00 / T(7);
+ vec<4, T, Q> gy00 = floor(gx00) / T(7);
+ vec<4, T, Q> gz00 = floor(gy00) / T(6);
+ gx00 = fract(gx00) - T(0.5);
+ gy00 = fract(gy00) - T(0.5);
+ gz00 = fract(gz00) - T(0.5);
+ vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00);
+ vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0));
+ gx00 -= sw00 * (step(T(0), gx00) - T(0.5));
+ gy00 -= sw00 * (step(T(0), gy00) - T(0.5));
+
+ vec<4, T, Q> gx01 = ixy01 / T(7);
+ vec<4, T, Q> gy01 = floor(gx01) / T(7);
+ vec<4, T, Q> gz01 = floor(gy01) / T(6);
+ gx01 = fract(gx01) - T(0.5);
+ gy01 = fract(gy01) - T(0.5);
+ gz01 = fract(gz01) - T(0.5);
+ vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01);
+ vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0));
+ gx01 -= sw01 * (step(T(0), gx01) - T(0.5));
+ gy01 -= sw01 * (step(T(0), gy01) - T(0.5));
+
+ vec<4, T, Q> gx10 = ixy10 / T(7);
+ vec<4, T, Q> gy10 = floor(gx10) / T(7);
+ vec<4, T, Q> gz10 = floor(gy10) / T(6);
+ gx10 = fract(gx10) - T(0.5);
+ gy10 = fract(gy10) - T(0.5);
+ gz10 = fract(gz10) - T(0.5);
+ vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10);
+ vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0.0));
+ gx10 -= sw10 * (step(T(0), gx10) - T(0.5));
+ gy10 -= sw10 * (step(T(0), gy10) - T(0.5));
+
+ vec<4, T, Q> gx11 = ixy11 / T(7);
+ vec<4, T, Q> gy11 = floor(gx11) / T(7);
+ vec<4, T, Q> gz11 = floor(gy11) / T(6);
+ gx11 = fract(gx11) - T(0.5);
+ gy11 = fract(gy11) - T(0.5);
+ gz11 = fract(gz11) - T(0.5);
+ vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11);
+ vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(T(0)));
+ gx11 -= sw11 * (step(T(0), gx11) - T(0.5));
+ gy11 -= sw11 * (step(T(0), gy11) - T(0.5));
+
+ vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x);
+ vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y);
+ vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z);
+ vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w);
+ vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x);
+ vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y);
+ vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z);
+ vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w);
+ vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x);
+ vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y);
+ vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z);
+ vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w);
+ vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x);
+ vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y);
+ vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z);
+ vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w);
+
+ vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100)));
+ g0000 *= norm00.x;
+ g0100 *= norm00.y;
+ g1000 *= norm00.z;
+ g1100 *= norm00.w;
+
+ vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101)));
+ g0001 *= norm01.x;
+ g0101 *= norm01.y;
+ g1001 *= norm01.z;
+ g1101 *= norm01.w;
+
+ vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110)));
+ g0010 *= norm10.x;
+ g0110 *= norm10.y;
+ g1010 *= norm10.z;
+ g1110 *= norm10.w;
+
+ vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111)));
+ g0011 *= norm11.x;
+ g0111 *= norm11.y;
+ g1011 *= norm11.z;
+ g1111 *= norm11.w;
+
+ T n0000 = dot(g0000, Pf0);
+ T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w));
+ T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w));
+ T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w));
+ T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w));
+ T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w));
+ T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w));
+ T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w));
+ T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w));
+ T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w));
+ T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w));
+ T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w));
+ T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w));
+ T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w));
+ T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w));
+ T n1111 = dot(g1111, Pf1);
+
+ vec<4, T, Q> fade_xyzw = detail::fade(Pf0);
+ vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w);
+ vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w);
+ vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z);
+ vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y);
+ T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x);
+ return T(2.2) * n_xyzw;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T simplex(glm::vec<2, T, Q> const& v)
+ {
+ vec<4, T, Q> const C = vec<4, T, Q>(
+ T( 0.211324865405187), // (3.0 - sqrt(3.0)) / 6.0
+ T( 0.366025403784439), // 0.5 * (sqrt(3.0) - 1.0)
+ T(-0.577350269189626), // -1.0 + 2.0 * C.x
+ T( 0.024390243902439)); // 1.0 / 41.0
+
+ // First corner
+ vec<2, T, Q> i = floor(v + dot(v, vec<2, T, Q>(C[1])));
+ vec<2, T, Q> x0 = v - i + dot(i, vec<2, T, Q>(C[0]));
+
+ // Other corners
+ //i1.x = step( x0.y, x0.x ); // x0.x > x0.y ? 1.0 : 0.0
+ //i1.y = 1.0 - i1.x;
+ vec<2, T, Q> i1 = (x0.x > x0.y) ? vec<2, T, Q>(1, 0) : vec<2, T, Q>(0, 1);
+ // x0 = x0 - 0.0 + 0.0 * C.xx ;
+ // x1 = x0 - i1 + 1.0 * C.xx ;
+ // x2 = x0 - 1.0 + 2.0 * C.xx ;
+ vec<4, T, Q> x12 = vec<4, T, Q>(x0.x, x0.y, x0.x, x0.y) + vec<4, T, Q>(C.x, C.x, C.z, C.z);
+ x12 = vec<4, T, Q>(vec<2, T, Q>(x12) - i1, x12.z, x12.w);
+
+ // Permutations
+ i = mod(i, vec<2, T, Q>(289)); // Avoid truncation effects in permutation
+ vec<3, T, Q> p = detail::permute(
+ detail::permute(i.y + vec<3, T, Q>(T(0), i1.y, T(1)))
+ + i.x + vec<3, T, Q>(T(0), i1.x, T(1)));
+
+ vec<3, T, Q> m = max(vec<3, T, Q>(0.5) - vec<3, T, Q>(
+ dot(x0, x0),
+ dot(vec<2, T, Q>(x12.x, x12.y), vec<2, T, Q>(x12.x, x12.y)),
+ dot(vec<2, T, Q>(x12.z, x12.w), vec<2, T, Q>(x12.z, x12.w))), vec<3, T, Q>(0));
+ m = m * m ;
+ m = m * m ;
+
+ // Gradients: 41 points uniformly over a line, mapped onto a diamond.
+ // The ring size 17*17 = 289 is close to a multiple of 41 (41*7 = 287)
+
+ vec<3, T, Q> x = static_cast<T>(2) * fract(p * C.w) - T(1);
+ vec<3, T, Q> h = abs(x) - T(0.5);
+ vec<3, T, Q> ox = floor(x + T(0.5));
+ vec<3, T, Q> a0 = x - ox;
+
+ // Normalise gradients implicitly by scaling m
+ // Inlined for speed: m *= taylorInvSqrt( a0*a0 + h*h );
+ m *= static_cast<T>(1.79284291400159) - T(0.85373472095314) * (a0 * a0 + h * h);
+
+ // Compute final noise value at P
+ vec<3, T, Q> g;
+ g.x = a0.x * x0.x + h.x * x0.y;
+ //g.yz = a0.yz * x12.xz + h.yz * x12.yw;
+ g.y = a0.y * x12.x + h.y * x12.y;
+ g.z = a0.z * x12.z + h.z * x12.w;
+ return T(130) * dot(m, g);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T simplex(vec<3, T, Q> const& v)
+ {
+ vec<2, T, Q> const C(1.0 / 6.0, 1.0 / 3.0);
+ vec<4, T, Q> const D(0.0, 0.5, 1.0, 2.0);
+
+ // First corner
+ vec<3, T, Q> i(floor(v + dot(v, vec<3, T, Q>(C.y))));
+ vec<3, T, Q> x0(v - i + dot(i, vec<3, T, Q>(C.x)));
+
+ // Other corners
+ vec<3, T, Q> g(step(vec<3, T, Q>(x0.y, x0.z, x0.x), x0));
+ vec<3, T, Q> l(T(1) - g);
+ vec<3, T, Q> i1(min(g, vec<3, T, Q>(l.z, l.x, l.y)));
+ vec<3, T, Q> i2(max(g, vec<3, T, Q>(l.z, l.x, l.y)));
+
+ // x0 = x0 - 0.0 + 0.0 * C.xxx;
+ // x1 = x0 - i1 + 1.0 * C.xxx;
+ // x2 = x0 - i2 + 2.0 * C.xxx;
+ // x3 = x0 - 1.0 + 3.0 * C.xxx;
+ vec<3, T, Q> x1(x0 - i1 + C.x);
+ vec<3, T, Q> x2(x0 - i2 + C.y); // 2.0*C.x = 1/3 = C.y
+ vec<3, T, Q> x3(x0 - D.y); // -1.0+3.0*C.x = -0.5 = -D.y
+
+ // Permutations
+ i = detail::mod289(i);
+ vec<4, T, Q> p(detail::permute(detail::permute(detail::permute(
+ i.z + vec<4, T, Q>(T(0), i1.z, i2.z, T(1))) +
+ i.y + vec<4, T, Q>(T(0), i1.y, i2.y, T(1))) +
+ i.x + vec<4, T, Q>(T(0), i1.x, i2.x, T(1))));
+
+ // Gradients: 7x7 points over a square, mapped onto an octahedron.
+ // The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294)
+ T n_ = static_cast<T>(0.142857142857); // 1.0/7.0
+ vec<3, T, Q> ns(n_ * vec<3, T, Q>(D.w, D.y, D.z) - vec<3, T, Q>(D.x, D.z, D.x));
+
+ vec<4, T, Q> j(p - T(49) * floor(p * ns.z * ns.z)); // mod(p,7*7)
+
+ vec<4, T, Q> x_(floor(j * ns.z));
+ vec<4, T, Q> y_(floor(j - T(7) * x_)); // mod(j,N)
+
+ vec<4, T, Q> x(x_ * ns.x + ns.y);
+ vec<4, T, Q> y(y_ * ns.x + ns.y);
+ vec<4, T, Q> h(T(1) - abs(x) - abs(y));
+
+ vec<4, T, Q> b0(x.x, x.y, y.x, y.y);
+ vec<4, T, Q> b1(x.z, x.w, y.z, y.w);
+
+ // vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - 1.0;
+ // vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - 1.0;
+ vec<4, T, Q> s0(floor(b0) * T(2) + T(1));
+ vec<4, T, Q> s1(floor(b1) * T(2) + T(1));
+ vec<4, T, Q> sh(-step(h, vec<4, T, Q>(0.0)));
+
+ vec<4, T, Q> a0 = vec<4, T, Q>(b0.x, b0.z, b0.y, b0.w) + vec<4, T, Q>(s0.x, s0.z, s0.y, s0.w) * vec<4, T, Q>(sh.x, sh.x, sh.y, sh.y);
+ vec<4, T, Q> a1 = vec<4, T, Q>(b1.x, b1.z, b1.y, b1.w) + vec<4, T, Q>(s1.x, s1.z, s1.y, s1.w) * vec<4, T, Q>(sh.z, sh.z, sh.w, sh.w);
+
+ vec<3, T, Q> p0(a0.x, a0.y, h.x);
+ vec<3, T, Q> p1(a0.z, a0.w, h.y);
+ vec<3, T, Q> p2(a1.x, a1.y, h.z);
+ vec<3, T, Q> p3(a1.z, a1.w, h.w);
+
+ // Normalise gradients
+ vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3)));
+ p0 *= norm.x;
+ p1 *= norm.y;
+ p2 *= norm.z;
+ p3 *= norm.w;
+
+ // Mix final noise value
+ vec<4, T, Q> m = max(T(0.6) - vec<4, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2), dot(x3, x3)), vec<4, T, Q>(0));
+ m = m * m;
+ return T(42) * dot(m * m, vec<4, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2), dot(p3, x3)));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T simplex(vec<4, T, Q> const& v)
+ {
+ vec<4, T, Q> const C(
+ 0.138196601125011, // (5 - sqrt(5))/20 G4
+ 0.276393202250021, // 2 * G4
+ 0.414589803375032, // 3 * G4
+ -0.447213595499958); // -1 + 4 * G4
+
+ // (sqrt(5) - 1)/4 = F4, used once below
+ T const F4 = static_cast<T>(0.309016994374947451);
+
+ // First corner
+ vec<4, T, Q> i = floor(v + dot(v, vec<4, T, Q>(F4)));
+ vec<4, T, Q> x0 = v - i + dot(i, vec<4, T, Q>(C.x));
+
+ // Other corners
+
+ // Rank sorting originally contributed by Bill Licea-Kane, AMD (formerly ATI)
+ vec<4, T, Q> i0;
+ vec<3, T, Q> isX = step(vec<3, T, Q>(x0.y, x0.z, x0.w), vec<3, T, Q>(x0.x));
+ vec<3, T, Q> isYZ = step(vec<3, T, Q>(x0.z, x0.w, x0.w), vec<3, T, Q>(x0.y, x0.y, x0.z));
+ // i0.x = dot(isX, vec3(1.0));
+ //i0.x = isX.x + isX.y + isX.z;
+ //i0.yzw = static_cast<T>(1) - isX;
+ i0 = vec<4, T, Q>(isX.x + isX.y + isX.z, T(1) - isX);
+ // i0.y += dot(isYZ.xy, vec2(1.0));
+ i0.y += isYZ.x + isYZ.y;
+ //i0.zw += 1.0 - vec<2, T, Q>(isYZ.x, isYZ.y);
+ i0.z += static_cast<T>(1) - isYZ.x;
+ i0.w += static_cast<T>(1) - isYZ.y;
+ i0.z += isYZ.z;
+ i0.w += static_cast<T>(1) - isYZ.z;
+
+ // i0 now contains the unique values 0,1,2,3 in each channel
+ vec<4, T, Q> i3 = clamp(i0, T(0), T(1));
+ vec<4, T, Q> i2 = clamp(i0 - T(1), T(0), T(1));
+ vec<4, T, Q> i1 = clamp(i0 - T(2), T(0), T(1));
+
+ // x0 = x0 - 0.0 + 0.0 * C.xxxx
+ // x1 = x0 - i1 + 0.0 * C.xxxx
+ // x2 = x0 - i2 + 0.0 * C.xxxx
+ // x3 = x0 - i3 + 0.0 * C.xxxx
+ // x4 = x0 - 1.0 + 4.0 * C.xxxx
+ vec<4, T, Q> x1 = x0 - i1 + C.x;
+ vec<4, T, Q> x2 = x0 - i2 + C.y;
+ vec<4, T, Q> x3 = x0 - i3 + C.z;
+ vec<4, T, Q> x4 = x0 + C.w;
+
+ // Permutations
+ i = mod(i, vec<4, T, Q>(289));
+ T j0 = detail::permute(detail::permute(detail::permute(detail::permute(i.w) + i.z) + i.y) + i.x);
+ vec<4, T, Q> j1 = detail::permute(detail::permute(detail::permute(detail::permute(
+ i.w + vec<4, T, Q>(i1.w, i2.w, i3.w, T(1))) +
+ i.z + vec<4, T, Q>(i1.z, i2.z, i3.z, T(1))) +
+ i.y + vec<4, T, Q>(i1.y, i2.y, i3.y, T(1))) +
+ i.x + vec<4, T, Q>(i1.x, i2.x, i3.x, T(1)));
+
+ // Gradients: 7x7x6 points over a cube, mapped onto a 4-cross polytope
+ // 7*7*6 = 294, which is close to the ring size 17*17 = 289.
+ vec<4, T, Q> ip = vec<4, T, Q>(T(1) / T(294), T(1) / T(49), T(1) / T(7), T(0));
+
+ vec<4, T, Q> p0 = gtc::grad4(j0, ip);
+ vec<4, T, Q> p1 = gtc::grad4(j1.x, ip);
+ vec<4, T, Q> p2 = gtc::grad4(j1.y, ip);
+ vec<4, T, Q> p3 = gtc::grad4(j1.z, ip);
+ vec<4, T, Q> p4 = gtc::grad4(j1.w, ip);
+
+ // Normalise gradients
+ vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3)));
+ p0 *= norm.x;
+ p1 *= norm.y;
+ p2 *= norm.z;
+ p3 *= norm.w;
+ p4 *= detail::taylorInvSqrt(dot(p4, p4));
+
+ // Mix contributions from the five corners
+ vec<3, T, Q> m0 = max(T(0.6) - vec<3, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2)), vec<3, T, Q>(0));
+ vec<2, T, Q> m1 = max(T(0.6) - vec<2, T, Q>(dot(x3, x3), dot(x4, x4) ), vec<2, T, Q>(0));
+ m0 = m0 * m0;
+ m1 = m1 * m1;
+ return T(49) *
+ (dot(m0 * m0, vec<3, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2))) +
+ dot(m1 * m1, vec<2, T, Q>(dot(p3, x3), dot(p4, x4))));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/packing.hpp b/3rdparty/glm/source/glm/gtc/packing.hpp
new file mode 100644
index 0000000..8e416b3
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/packing.hpp
@@ -0,0 +1,728 @@
+/// @ref gtc_packing
+/// @file glm/gtc/packing.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_packing GLM_GTC_packing
+/// @ingroup gtc
+///
+/// Include <glm/gtc/packing.hpp> to use the features of this extension.
+///
+/// This extension provides a set of function to convert vertors to packed
+/// formats.
+
+#pragma once
+
+// Dependency:
+#include "type_precision.hpp"
+#include "../ext/vector_packing.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_packing extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_packing
+ /// @{
+
+ /// First, converts the normalized floating-point value v into a 8-bit integer value.
+ /// Then, the results are packed into the returned 8-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packUnorm1x8: round(clamp(c, 0, +1) * 255.0)
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packUnorm2x8(vec2 const& v)
+ /// @see uint32 packUnorm4x8(vec4 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packUnorm4x8.xml">GLSL packUnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint8 packUnorm1x8(float v);
+
+ /// Convert a single 8-bit integer to a normalized floating-point value.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackUnorm4x8: f / 255.0
+ ///
+ /// @see gtc_packing
+ /// @see vec2 unpackUnorm2x8(uint16 p)
+ /// @see vec4 unpackUnorm4x8(uint32 p)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackUnorm4x8.xml">GLSL unpackUnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL float unpackUnorm1x8(uint8 p);
+
+ /// First, converts each component of the normalized floating-point value v into 8-bit integer values.
+ /// Then, the results are packed into the returned 16-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packUnorm2x8: round(clamp(c, 0, +1) * 255.0)
+ ///
+ /// The first component of the vector will be written to the least significant bits of the output;
+ /// the last component will be written to the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint8 packUnorm1x8(float const& v)
+ /// @see uint32 packUnorm4x8(vec4 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packUnorm4x8.xml">GLSL packUnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint16 packUnorm2x8(vec2 const& v);
+
+ /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit unsigned integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackUnorm4x8: f / 255.0
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see float unpackUnorm1x8(uint8 v)
+ /// @see vec4 unpackUnorm4x8(uint32 p)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackUnorm4x8.xml">GLSL unpackUnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec2 unpackUnorm2x8(uint16 p);
+
+ /// First, converts the normalized floating-point value v into 8-bit integer value.
+ /// Then, the results are packed into the returned 8-bit unsigned integer.
+ ///
+ /// The conversion to fixed point is done as follows:
+ /// packSnorm1x8: round(clamp(s, -1, +1) * 127.0)
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packSnorm2x8(vec2 const& v)
+ /// @see uint32 packSnorm4x8(vec4 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packSnorm4x8.xml">GLSL packSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint8 packSnorm1x8(float s);
+
+ /// First, unpacks a single 8-bit unsigned integer p into a single 8-bit signed integers.
+ /// Then, the value is converted to a normalized floating-point value to generate the returned scalar.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackSnorm1x8: clamp(f / 127.0, -1, +1)
+ ///
+ /// @see gtc_packing
+ /// @see vec2 unpackSnorm2x8(uint16 p)
+ /// @see vec4 unpackSnorm4x8(uint32 p)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackSnorm4x8.xml">GLSL unpackSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL float unpackSnorm1x8(uint8 p);
+
+ /// First, converts each component of the normalized floating-point value v into 8-bit integer values.
+ /// Then, the results are packed into the returned 16-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packSnorm2x8: round(clamp(c, -1, +1) * 127.0)
+ ///
+ /// The first component of the vector will be written to the least significant bits of the output;
+ /// the last component will be written to the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint8 packSnorm1x8(float const& v)
+ /// @see uint32 packSnorm4x8(vec4 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packSnorm4x8.xml">GLSL packSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint16 packSnorm2x8(vec2 const& v);
+
+ /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackSnorm2x8: clamp(f / 127.0, -1, +1)
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see float unpackSnorm1x8(uint8 p)
+ /// @see vec4 unpackSnorm4x8(uint32 p)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackSnorm4x8.xml">GLSL unpackSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec2 unpackSnorm2x8(uint16 p);
+
+ /// First, converts the normalized floating-point value v into a 16-bit integer value.
+ /// Then, the results are packed into the returned 16-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packUnorm1x16: round(clamp(c, 0, +1) * 65535.0)
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packSnorm1x16(float const& v)
+ /// @see uint64 packSnorm4x16(vec4 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packUnorm4x8.xml">GLSL packUnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint16 packUnorm1x16(float v);
+
+ /// First, unpacks a single 16-bit unsigned integer p into a of 16-bit unsigned integers.
+ /// Then, the value is converted to a normalized floating-point value to generate the returned scalar.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackUnorm1x16: f / 65535.0
+ ///
+ /// @see gtc_packing
+ /// @see vec2 unpackUnorm2x16(uint32 p)
+ /// @see vec4 unpackUnorm4x16(uint64 p)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackUnorm2x16.xml">GLSL unpackUnorm2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL float unpackUnorm1x16(uint16 p);
+
+ /// First, converts each component of the normalized floating-point value v into 16-bit integer values.
+ /// Then, the results are packed into the returned 64-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packUnorm4x16: round(clamp(c, 0, +1) * 65535.0)
+ ///
+ /// The first component of the vector will be written to the least significant bits of the output;
+ /// the last component will be written to the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packUnorm1x16(float const& v)
+ /// @see uint32 packUnorm2x16(vec2 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packUnorm4x8.xml">GLSL packUnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint64 packUnorm4x16(vec4 const& v);
+
+ /// First, unpacks a single 64-bit unsigned integer p into four 16-bit unsigned integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackUnormx4x16: f / 65535.0
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see float unpackUnorm1x16(uint16 p)
+ /// @see vec2 unpackUnorm2x16(uint32 p)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackUnorm2x16.xml">GLSL unpackUnorm2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec4 unpackUnorm4x16(uint64 p);
+
+ /// First, converts the normalized floating-point value v into 16-bit integer value.
+ /// Then, the results are packed into the returned 16-bit unsigned integer.
+ ///
+ /// The conversion to fixed point is done as follows:
+ /// packSnorm1x8: round(clamp(s, -1, +1) * 32767.0)
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packSnorm2x16(vec2 const& v)
+ /// @see uint64 packSnorm4x16(vec4 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packSnorm4x8.xml">GLSL packSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint16 packSnorm1x16(float v);
+
+ /// First, unpacks a single 16-bit unsigned integer p into a single 16-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned scalar.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackSnorm1x16: clamp(f / 32767.0, -1, +1)
+ ///
+ /// @see gtc_packing
+ /// @see vec2 unpackSnorm2x16(uint32 p)
+ /// @see vec4 unpackSnorm4x16(uint64 p)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackSnorm1x16.xml">GLSL unpackSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL float unpackSnorm1x16(uint16 p);
+
+ /// First, converts each component of the normalized floating-point value v into 16-bit integer values.
+ /// Then, the results are packed into the returned 64-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packSnorm2x8: round(clamp(c, -1, +1) * 32767.0)
+ ///
+ /// The first component of the vector will be written to the least significant bits of the output;
+ /// the last component will be written to the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packSnorm1x16(float const& v)
+ /// @see uint32 packSnorm2x16(vec2 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packSnorm4x8.xml">GLSL packSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint64 packSnorm4x16(vec4 const& v);
+
+ /// First, unpacks a single 64-bit unsigned integer p into four 16-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackSnorm4x16: clamp(f / 32767.0, -1, +1)
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see float unpackSnorm1x16(uint16 p)
+ /// @see vec2 unpackSnorm2x16(uint32 p)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackSnorm2x16.xml">GLSL unpackSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec4 unpackSnorm4x16(uint64 p);
+
+ /// Returns an unsigned integer obtained by converting the components of a floating-point scalar
+ /// to the 16-bit floating-point representation found in the OpenGL Specification,
+ /// and then packing this 16-bit value into a 16-bit unsigned integer.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packHalf2x16(vec2 const& v)
+ /// @see uint64 packHalf4x16(vec4 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packHalf2x16.xml">GLSL packHalf2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint16 packHalf1x16(float v);
+
+ /// Returns a floating-point scalar with components obtained by unpacking a 16-bit unsigned integer into a 16-bit value,
+ /// interpreted as a 16-bit floating-point number according to the OpenGL Specification,
+ /// and converting it to 32-bit floating-point values.
+ ///
+ /// @see gtc_packing
+ /// @see vec2 unpackHalf2x16(uint32 const& v)
+ /// @see vec4 unpackHalf4x16(uint64 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackHalf2x16.xml">GLSL unpackHalf2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL float unpackHalf1x16(uint16 v);
+
+ /// Returns an unsigned integer obtained by converting the components of a four-component floating-point vector
+ /// to the 16-bit floating-point representation found in the OpenGL Specification,
+ /// and then packing these four 16-bit values into a 64-bit unsigned integer.
+ /// The first vector component specifies the 16 least-significant bits of the result;
+ /// the forth component specifies the 16 most-significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packHalf1x16(float const& v)
+ /// @see uint32 packHalf2x16(vec2 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packHalf2x16.xml">GLSL packHalf2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint64 packHalf4x16(vec4 const& v);
+
+ /// Returns a four-component floating-point vector with components obtained by unpacking a 64-bit unsigned integer into four 16-bit values,
+ /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification,
+ /// and converting them to 32-bit floating-point values.
+ /// The first component of the vector is obtained from the 16 least-significant bits of v;
+ /// the forth component is obtained from the 16 most-significant bits of v.
+ ///
+ /// @see gtc_packing
+ /// @see float unpackHalf1x16(uint16 const& v)
+ /// @see vec2 unpackHalf2x16(uint32 const& v)
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackHalf2x16.xml">GLSL unpackHalf2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec4 unpackHalf4x16(uint64 p);
+
+ /// Returns an unsigned integer obtained by converting the components of a four-component signed integer vector
+ /// to the 10-10-10-2-bit signed integer representation found in the OpenGL Specification,
+ /// and then packing these four values into a 32-bit unsigned integer.
+ /// The first vector component specifies the 10 least-significant bits of the result;
+ /// the forth component specifies the 2 most-significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packI3x10_1x2(uvec4 const& v)
+ /// @see uint32 packSnorm3x10_1x2(vec4 const& v)
+ /// @see uint32 packUnorm3x10_1x2(vec4 const& v)
+ /// @see ivec4 unpackI3x10_1x2(uint32 const& p)
+ GLM_FUNC_DECL uint32 packI3x10_1x2(ivec4 const& v);
+
+ /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit signed integers.
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packU3x10_1x2(uvec4 const& v)
+ /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p);
+ /// @see uvec4 unpackI3x10_1x2(uint32 const& p);
+ GLM_FUNC_DECL ivec4 unpackI3x10_1x2(uint32 p);
+
+ /// Returns an unsigned integer obtained by converting the components of a four-component unsigned integer vector
+ /// to the 10-10-10-2-bit unsigned integer representation found in the OpenGL Specification,
+ /// and then packing these four values into a 32-bit unsigned integer.
+ /// The first vector component specifies the 10 least-significant bits of the result;
+ /// the forth component specifies the 2 most-significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packI3x10_1x2(ivec4 const& v)
+ /// @see uint32 packSnorm3x10_1x2(vec4 const& v)
+ /// @see uint32 packUnorm3x10_1x2(vec4 const& v)
+ /// @see ivec4 unpackU3x10_1x2(uint32 const& p)
+ GLM_FUNC_DECL uint32 packU3x10_1x2(uvec4 const& v);
+
+ /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit unsigned integers.
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packU3x10_1x2(uvec4 const& v)
+ /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p);
+ /// @see uvec4 unpackI3x10_1x2(uint32 const& p);
+ GLM_FUNC_DECL uvec4 unpackU3x10_1x2(uint32 p);
+
+ /// First, converts the first three components of the normalized floating-point value v into 10-bit signed integer values.
+ /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed integer values.
+ /// Then, the results are packed into the returned 32-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packSnorm3x10_1x2(xyz): round(clamp(c, -1, +1) * 511.0)
+ /// packSnorm3x10_1x2(w): round(clamp(c, -1, +1) * 1.0)
+ ///
+ /// The first vector component specifies the 10 least-significant bits of the result;
+ /// the forth component specifies the 2 most-significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p)
+ /// @see uint32 packUnorm3x10_1x2(vec4 const& v)
+ /// @see uint32 packU3x10_1x2(uvec4 const& v)
+ /// @see uint32 packI3x10_1x2(ivec4 const& v)
+ GLM_FUNC_DECL uint32 packSnorm3x10_1x2(vec4 const& v);
+
+ /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackSnorm3x10_1x2(xyz): clamp(f / 511.0, -1, +1)
+ /// unpackSnorm3x10_1x2(w): clamp(f / 511.0, -1, +1)
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packSnorm3x10_1x2(vec4 const& v)
+ /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p))
+ /// @see uvec4 unpackI3x10_1x2(uint32 const& p)
+ /// @see uvec4 unpackU3x10_1x2(uint32 const& p)
+ GLM_FUNC_DECL vec4 unpackSnorm3x10_1x2(uint32 p);
+
+ /// First, converts the first three components of the normalized floating-point value v into 10-bit unsigned integer values.
+ /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed uninteger values.
+ /// Then, the results are packed into the returned 32-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packUnorm3x10_1x2(xyz): round(clamp(c, 0, +1) * 1023.0)
+ /// packUnorm3x10_1x2(w): round(clamp(c, 0, +1) * 3.0)
+ ///
+ /// The first vector component specifies the 10 least-significant bits of the result;
+ /// the forth component specifies the 2 most-significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p)
+ /// @see uint32 packUnorm3x10_1x2(vec4 const& v)
+ /// @see uint32 packU3x10_1x2(uvec4 const& v)
+ /// @see uint32 packI3x10_1x2(ivec4 const& v)
+ GLM_FUNC_DECL uint32 packUnorm3x10_1x2(vec4 const& v);
+
+ /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackSnorm3x10_1x2(xyz): clamp(f / 1023.0, 0, +1)
+ /// unpackSnorm3x10_1x2(w): clamp(f / 3.0, 0, +1)
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packSnorm3x10_1x2(vec4 const& v)
+ /// @see vec4 unpackInorm3x10_1x2(uint32 const& p))
+ /// @see uvec4 unpackI3x10_1x2(uint32 const& p)
+ /// @see uvec4 unpackU3x10_1x2(uint32 const& p)
+ GLM_FUNC_DECL vec4 unpackUnorm3x10_1x2(uint32 p);
+
+ /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values.
+ /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value.
+ /// Then, the results are packed into the returned 32-bit unsigned integer.
+ ///
+ /// The first vector component specifies the 11 least-significant bits of the result;
+ /// the last component specifies the 10 most-significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see vec3 unpackF2x11_1x10(uint32 const& p)
+ GLM_FUNC_DECL uint32 packF2x11_1x10(vec3 const& v);
+
+ /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value .
+ /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector.
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packF2x11_1x10(vec3 const& v)
+ GLM_FUNC_DECL vec3 unpackF2x11_1x10(uint32 p);
+
+
+ /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values.
+ /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value.
+ /// Then, the results are packed into the returned 32-bit unsigned integer.
+ ///
+ /// The first vector component specifies the 11 least-significant bits of the result;
+ /// the last component specifies the 10 most-significant bits.
+ ///
+ /// packF3x9_E1x5 allows encoding into RGBE / RGB9E5 format
+ ///
+ /// @see gtc_packing
+ /// @see vec3 unpackF3x9_E1x5(uint32 const& p)
+ GLM_FUNC_DECL uint32 packF3x9_E1x5(vec3 const& v);
+
+ /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value .
+ /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector.
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// unpackF3x9_E1x5 allows decoding RGBE / RGB9E5 data
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packF3x9_E1x5(vec3 const& v)
+ GLM_FUNC_DECL vec3 unpackF3x9_E1x5(uint32 p);
+
+ /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector
+ /// to the 16-bit floating-point representation found in the OpenGL Specification.
+ /// The first vector component specifies the 16 least-significant bits of the result;
+ /// the forth component specifies the 16 most-significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& p)
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb);
+
+ /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values.
+ /// The first component of the vector is obtained from the 16 least-significant bits of v;
+ /// the forth component is obtained from the 16 most-significant bits of v.
+ ///
+ /// @see gtc_packing
+ /// @see vec<4, T, Q> packRGBM(vec<3, float, Q> const& v)
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm);
+
+ /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector
+ /// to the 16-bit floating-point representation found in the OpenGL Specification.
+ /// The first vector component specifies the 16 least-significant bits of the result;
+ /// the forth component specifies the 16 most-significant bits.
+ ///
+ /// @see gtc_packing
+ /// @see vec<L, float, Q> unpackHalf(vec<L, uint16, Q> const& p)
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL vec<L, uint16, Q> packHalf(vec<L, float, Q> const& v);
+
+ /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values.
+ /// The first component of the vector is obtained from the 16 least-significant bits of v;
+ /// the forth component is obtained from the 16 most-significant bits of v.
+ ///
+ /// @see gtc_packing
+ /// @see vec<L, uint16, Q> packHalf(vec<L, float, Q> const& v)
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL vec<L, float, Q> unpackHalf(vec<L, uint16, Q> const& p);
+
+ /// Convert each component of the normalized floating-point vector into unsigned integer values.
+ ///
+ /// @see gtc_packing
+ /// @see vec<L, floatType, Q> unpackUnorm(vec<L, intType, Q> const& p);
+ template<typename uintType, length_t L, typename floatType, qualifier Q>
+ GLM_FUNC_DECL vec<L, uintType, Q> packUnorm(vec<L, floatType, Q> const& v);
+
+ /// Convert a packed integer to a normalized floating-point vector.
+ ///
+ /// @see gtc_packing
+ /// @see vec<L, intType, Q> packUnorm(vec<L, floatType, Q> const& v)
+ template<typename floatType, length_t L, typename uintType, qualifier Q>
+ GLM_FUNC_DECL vec<L, floatType, Q> unpackUnorm(vec<L, uintType, Q> const& v);
+
+ /// Convert each component of the normalized floating-point vector into signed integer values.
+ ///
+ /// @see gtc_packing
+ /// @see vec<L, floatType, Q> unpackSnorm(vec<L, intType, Q> const& p);
+ template<typename intType, length_t L, typename floatType, qualifier Q>
+ GLM_FUNC_DECL vec<L, intType, Q> packSnorm(vec<L, floatType, Q> const& v);
+
+ /// Convert a packed integer to a normalized floating-point vector.
+ ///
+ /// @see gtc_packing
+ /// @see vec<L, intType, Q> packSnorm(vec<L, floatType, Q> const& v)
+ template<typename floatType, length_t L, typename intType, qualifier Q>
+ GLM_FUNC_DECL vec<L, floatType, Q> unpackSnorm(vec<L, intType, Q> const& v);
+
+ /// Convert each component of the normalized floating-point vector into unsigned integer values.
+ ///
+ /// @see gtc_packing
+ /// @see vec2 unpackUnorm2x4(uint8 p)
+ GLM_FUNC_DECL uint8 packUnorm2x4(vec2 const& v);
+
+ /// Convert a packed integer to a normalized floating-point vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint8 packUnorm2x4(vec2 const& v)
+ GLM_FUNC_DECL vec2 unpackUnorm2x4(uint8 p);
+
+ /// Convert each component of the normalized floating-point vector into unsigned integer values.
+ ///
+ /// @see gtc_packing
+ /// @see vec4 unpackUnorm4x4(uint16 p)
+ GLM_FUNC_DECL uint16 packUnorm4x4(vec4 const& v);
+
+ /// Convert a packed integer to a normalized floating-point vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packUnorm4x4(vec4 const& v)
+ GLM_FUNC_DECL vec4 unpackUnorm4x4(uint16 p);
+
+ /// Convert each component of the normalized floating-point vector into unsigned integer values.
+ ///
+ /// @see gtc_packing
+ /// @see vec3 unpackUnorm1x5_1x6_1x5(uint16 p)
+ GLM_FUNC_DECL uint16 packUnorm1x5_1x6_1x5(vec3 const& v);
+
+ /// Convert a packed integer to a normalized floating-point vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packUnorm1x5_1x6_1x5(vec3 const& v)
+ GLM_FUNC_DECL vec3 unpackUnorm1x5_1x6_1x5(uint16 p);
+
+ /// Convert each component of the normalized floating-point vector into unsigned integer values.
+ ///
+ /// @see gtc_packing
+ /// @see vec4 unpackUnorm3x5_1x1(uint16 p)
+ GLM_FUNC_DECL uint16 packUnorm3x5_1x1(vec4 const& v);
+
+ /// Convert a packed integer to a normalized floating-point vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packUnorm3x5_1x1(vec4 const& v)
+ GLM_FUNC_DECL vec4 unpackUnorm3x5_1x1(uint16 p);
+
+ /// Convert each component of the normalized floating-point vector into unsigned integer values.
+ ///
+ /// @see gtc_packing
+ /// @see vec3 unpackUnorm2x3_1x2(uint8 p)
+ GLM_FUNC_DECL uint8 packUnorm2x3_1x2(vec3 const& v);
+
+ /// Convert a packed integer to a normalized floating-point vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint8 packUnorm2x3_1x2(vec3 const& v)
+ GLM_FUNC_DECL vec3 unpackUnorm2x3_1x2(uint8 p);
+
+
+
+ /// Convert each component from an integer vector into a packed integer.
+ ///
+ /// @see gtc_packing
+ /// @see i8vec2 unpackInt2x8(int16 p)
+ GLM_FUNC_DECL int16 packInt2x8(i8vec2 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see int16 packInt2x8(i8vec2 const& v)
+ GLM_FUNC_DECL i8vec2 unpackInt2x8(int16 p);
+
+ /// Convert each component from an integer vector into a packed unsigned integer.
+ ///
+ /// @see gtc_packing
+ /// @see u8vec2 unpackInt2x8(uint16 p)
+ GLM_FUNC_DECL uint16 packUint2x8(u8vec2 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint16 packInt2x8(u8vec2 const& v)
+ GLM_FUNC_DECL u8vec2 unpackUint2x8(uint16 p);
+
+ /// Convert each component from an integer vector into a packed integer.
+ ///
+ /// @see gtc_packing
+ /// @see i8vec4 unpackInt4x8(int32 p)
+ GLM_FUNC_DECL int32 packInt4x8(i8vec4 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see int32 packInt2x8(i8vec4 const& v)
+ GLM_FUNC_DECL i8vec4 unpackInt4x8(int32 p);
+
+ /// Convert each component from an integer vector into a packed unsigned integer.
+ ///
+ /// @see gtc_packing
+ /// @see u8vec4 unpackUint4x8(uint32 p)
+ GLM_FUNC_DECL uint32 packUint4x8(u8vec4 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint32 packUint4x8(u8vec2 const& v)
+ GLM_FUNC_DECL u8vec4 unpackUint4x8(uint32 p);
+
+ /// Convert each component from an integer vector into a packed integer.
+ ///
+ /// @see gtc_packing
+ /// @see i16vec2 unpackInt2x16(int p)
+ GLM_FUNC_DECL int packInt2x16(i16vec2 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see int packInt2x16(i16vec2 const& v)
+ GLM_FUNC_DECL i16vec2 unpackInt2x16(int p);
+
+ /// Convert each component from an integer vector into a packed integer.
+ ///
+ /// @see gtc_packing
+ /// @see i16vec4 unpackInt4x16(int64 p)
+ GLM_FUNC_DECL int64 packInt4x16(i16vec4 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see int64 packInt4x16(i16vec4 const& v)
+ GLM_FUNC_DECL i16vec4 unpackInt4x16(int64 p);
+
+ /// Convert each component from an integer vector into a packed unsigned integer.
+ ///
+ /// @see gtc_packing
+ /// @see u16vec2 unpackUint2x16(uint p)
+ GLM_FUNC_DECL uint packUint2x16(u16vec2 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint packUint2x16(u16vec2 const& v)
+ GLM_FUNC_DECL u16vec2 unpackUint2x16(uint p);
+
+ /// Convert each component from an integer vector into a packed unsigned integer.
+ ///
+ /// @see gtc_packing
+ /// @see u16vec4 unpackUint4x16(uint64 p)
+ GLM_FUNC_DECL uint64 packUint4x16(u16vec4 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see uint64 packUint4x16(u16vec4 const& v)
+ GLM_FUNC_DECL u16vec4 unpackUint4x16(uint64 p);
+
+ /// Convert each component from an integer vector into a packed integer.
+ ///
+ /// @see gtc_packing
+ /// @see i32vec2 unpackInt2x32(int p)
+ GLM_FUNC_DECL int64 packInt2x32(i32vec2 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see int packInt2x16(i32vec2 const& v)
+ GLM_FUNC_DECL i32vec2 unpackInt2x32(int64 p);
+
+ /// Convert each component from an integer vector into a packed unsigned integer.
+ ///
+ /// @see gtc_packing
+ /// @see u32vec2 unpackUint2x32(int p)
+ GLM_FUNC_DECL uint64 packUint2x32(u32vec2 const& v);
+
+ /// Convert a packed integer into an integer vector.
+ ///
+ /// @see gtc_packing
+ /// @see int packUint2x16(u32vec2 const& v)
+ GLM_FUNC_DECL u32vec2 unpackUint2x32(uint64 p);
+
+ /// @}
+}// namespace glm
+
+#include "packing.inl"
diff --git a/3rdparty/glm/source/glm/gtc/packing.inl b/3rdparty/glm/source/glm/gtc/packing.inl
new file mode 100644
index 0000000..84ad60c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/packing.inl
@@ -0,0 +1,938 @@
+/// @ref gtc_packing
+
+#include "../ext/scalar_relational.hpp"
+#include "../ext/vector_relational.hpp"
+#include "../common.hpp"
+#include "../vec2.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+#include "../detail/type_half.hpp"
+#include <cstring>
+#include <limits>
+
+namespace glm{
+namespace detail
+{
+ GLM_FUNC_QUALIFIER glm::uint16 float2half(glm::uint32 f)
+ {
+ // 10 bits => EE EEEFFFFF
+ // 11 bits => EEE EEFFFFFF
+ // Half bits => SEEEEEFF FFFFFFFF
+ // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF
+
+ // 0x00007c00 => 00000000 00000000 01111100 00000000
+ // 0x000003ff => 00000000 00000000 00000011 11111111
+ // 0x38000000 => 00111000 00000000 00000000 00000000
+ // 0x7f800000 => 01111111 10000000 00000000 00000000
+ // 0x00008000 => 00000000 00000000 10000000 00000000
+ return
+ ((f >> 16) & 0x8000) | // sign
+ ((((f & 0x7f800000) - 0x38000000) >> 13) & 0x7c00) | // exponential
+ ((f >> 13) & 0x03ff); // Mantissa
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint32 float2packed11(glm::uint32 f)
+ {
+ // 10 bits => EE EEEFFFFF
+ // 11 bits => EEE EEFFFFFF
+ // Half bits => SEEEEEFF FFFFFFFF
+ // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF
+
+ // 0x000007c0 => 00000000 00000000 00000111 11000000
+ // 0x00007c00 => 00000000 00000000 01111100 00000000
+ // 0x000003ff => 00000000 00000000 00000011 11111111
+ // 0x38000000 => 00111000 00000000 00000000 00000000
+ // 0x7f800000 => 01111111 10000000 00000000 00000000
+ // 0x00008000 => 00000000 00000000 10000000 00000000
+ return
+ ((((f & 0x7f800000) - 0x38000000) >> 17) & 0x07c0) | // exponential
+ ((f >> 17) & 0x003f); // Mantissa
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint32 packed11ToFloat(glm::uint32 p)
+ {
+ // 10 bits => EE EEEFFFFF
+ // 11 bits => EEE EEFFFFFF
+ // Half bits => SEEEEEFF FFFFFFFF
+ // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF
+
+ // 0x000007c0 => 00000000 00000000 00000111 11000000
+ // 0x00007c00 => 00000000 00000000 01111100 00000000
+ // 0x000003ff => 00000000 00000000 00000011 11111111
+ // 0x38000000 => 00111000 00000000 00000000 00000000
+ // 0x7f800000 => 01111111 10000000 00000000 00000000
+ // 0x00008000 => 00000000 00000000 10000000 00000000
+ return
+ ((((p & 0x07c0) << 17) + 0x38000000) & 0x7f800000) | // exponential
+ ((p & 0x003f) << 17); // Mantissa
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint32 float2packed10(glm::uint32 f)
+ {
+ // 10 bits => EE EEEFFFFF
+ // 11 bits => EEE EEFFFFFF
+ // Half bits => SEEEEEFF FFFFFFFF
+ // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF
+
+ // 0x0000001F => 00000000 00000000 00000000 00011111
+ // 0x0000003F => 00000000 00000000 00000000 00111111
+ // 0x000003E0 => 00000000 00000000 00000011 11100000
+ // 0x000007C0 => 00000000 00000000 00000111 11000000
+ // 0x00007C00 => 00000000 00000000 01111100 00000000
+ // 0x000003FF => 00000000 00000000 00000011 11111111
+ // 0x38000000 => 00111000 00000000 00000000 00000000
+ // 0x7f800000 => 01111111 10000000 00000000 00000000
+ // 0x00008000 => 00000000 00000000 10000000 00000000
+ return
+ ((((f & 0x7f800000) - 0x38000000) >> 18) & 0x03E0) | // exponential
+ ((f >> 18) & 0x001f); // Mantissa
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint32 packed10ToFloat(glm::uint32 p)
+ {
+ // 10 bits => EE EEEFFFFF
+ // 11 bits => EEE EEFFFFFF
+ // Half bits => SEEEEEFF FFFFFFFF
+ // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF
+
+ // 0x0000001F => 00000000 00000000 00000000 00011111
+ // 0x0000003F => 00000000 00000000 00000000 00111111
+ // 0x000003E0 => 00000000 00000000 00000011 11100000
+ // 0x000007C0 => 00000000 00000000 00000111 11000000
+ // 0x00007C00 => 00000000 00000000 01111100 00000000
+ // 0x000003FF => 00000000 00000000 00000011 11111111
+ // 0x38000000 => 00111000 00000000 00000000 00000000
+ // 0x7f800000 => 01111111 10000000 00000000 00000000
+ // 0x00008000 => 00000000 00000000 10000000 00000000
+ return
+ ((((p & 0x03E0) << 18) + 0x38000000) & 0x7f800000) | // exponential
+ ((p & 0x001f) << 18); // Mantissa
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint half2float(glm::uint h)
+ {
+ return ((h & 0x8000) << 16) | ((( h & 0x7c00) + 0x1C000) << 13) | ((h & 0x03FF) << 13);
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint floatTo11bit(float x)
+ {
+ if(x == 0.0f)
+ return 0u;
+ else if(glm::isnan(x))
+ return ~0u;
+ else if(glm::isinf(x))
+ return 0x1Fu << 6u;
+
+ uint Pack = 0u;
+ memcpy(&Pack, &x, sizeof(Pack));
+ return float2packed11(Pack);
+ }
+
+ GLM_FUNC_QUALIFIER float packed11bitToFloat(glm::uint x)
+ {
+ if(x == 0)
+ return 0.0f;
+ else if(x == ((1 << 11) - 1))
+ return ~0;//NaN
+ else if(x == (0x1f << 6))
+ return ~0;//Inf
+
+ uint Result = packed11ToFloat(x);
+
+ float Temp = 0;
+ memcpy(&Temp, &Result, sizeof(Temp));
+ return Temp;
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint floatTo10bit(float x)
+ {
+ if(x == 0.0f)
+ return 0u;
+ else if(glm::isnan(x))
+ return ~0u;
+ else if(glm::isinf(x))
+ return 0x1Fu << 5u;
+
+ uint Pack = 0;
+ memcpy(&Pack, &x, sizeof(Pack));
+ return float2packed10(Pack);
+ }
+
+ GLM_FUNC_QUALIFIER float packed10bitToFloat(glm::uint x)
+ {
+ if(x == 0)
+ return 0.0f;
+ else if(x == ((1 << 10) - 1))
+ return ~0;//NaN
+ else if(x == (0x1f << 5))
+ return ~0;//Inf
+
+ uint Result = packed10ToFloat(x);
+
+ float Temp = 0;
+ memcpy(&Temp, &Result, sizeof(Temp));
+ return Temp;
+ }
+
+// GLM_FUNC_QUALIFIER glm::uint f11_f11_f10(float x, float y, float z)
+// {
+// return ((floatTo11bit(x) & ((1 << 11) - 1)) << 0) | ((floatTo11bit(y) & ((1 << 11) - 1)) << 11) | ((floatTo10bit(z) & ((1 << 10) - 1)) << 22);
+// }
+
+ union u3u3u2
+ {
+ struct
+ {
+ uint x : 3;
+ uint y : 3;
+ uint z : 2;
+ } data;
+ uint8 pack;
+ };
+
+ union u4u4
+ {
+ struct
+ {
+ uint x : 4;
+ uint y : 4;
+ } data;
+ uint8 pack;
+ };
+
+ union u4u4u4u4
+ {
+ struct
+ {
+ uint x : 4;
+ uint y : 4;
+ uint z : 4;
+ uint w : 4;
+ } data;
+ uint16 pack;
+ };
+
+ union u5u6u5
+ {
+ struct
+ {
+ uint x : 5;
+ uint y : 6;
+ uint z : 5;
+ } data;
+ uint16 pack;
+ };
+
+ union u5u5u5u1
+ {
+ struct
+ {
+ uint x : 5;
+ uint y : 5;
+ uint z : 5;
+ uint w : 1;
+ } data;
+ uint16 pack;
+ };
+
+ union u10u10u10u2
+ {
+ struct
+ {
+ uint x : 10;
+ uint y : 10;
+ uint z : 10;
+ uint w : 2;
+ } data;
+ uint32 pack;
+ };
+
+ union i10i10i10i2
+ {
+ struct
+ {
+ int x : 10;
+ int y : 10;
+ int z : 10;
+ int w : 2;
+ } data;
+ uint32 pack;
+ };
+
+ union u9u9u9e5
+ {
+ struct
+ {
+ uint x : 9;
+ uint y : 9;
+ uint z : 9;
+ uint w : 5;
+ } data;
+ uint32 pack;
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_half
+ {};
+
+ template<qualifier Q>
+ struct compute_half<1, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<1, uint16, Q> pack(vec<1, float, Q> const& v)
+ {
+ int16 const Unpack(detail::toFloat16(v.x));
+ u16vec1 Packed;
+ memcpy(&Packed, &Unpack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER static vec<1, float, Q> unpack(vec<1, uint16, Q> const& v)
+ {
+ i16vec1 Unpack;
+ memcpy(&Unpack, &v, sizeof(Unpack));
+ return vec<1, float, Q>(detail::toFloat32(v.x));
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_half<2, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<2, uint16, Q> pack(vec<2, float, Q> const& v)
+ {
+ vec<2, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y));
+ u16vec2 Packed;
+ memcpy(&Packed, &Unpack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER static vec<2, float, Q> unpack(vec<2, uint16, Q> const& v)
+ {
+ i16vec2 Unpack;
+ memcpy(&Unpack, &v, sizeof(Unpack));
+ return vec<2, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y));
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_half<3, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<3, uint16, Q> pack(vec<3, float, Q> const& v)
+ {
+ vec<3, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z));
+ u16vec3 Packed;
+ memcpy(&Packed, &Unpack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER static vec<3, float, Q> unpack(vec<3, uint16, Q> const& v)
+ {
+ i16vec3 Unpack;
+ memcpy(&Unpack, &v, sizeof(Unpack));
+ return vec<3, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z));
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_half<4, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, uint16, Q> pack(vec<4, float, Q> const& v)
+ {
+ vec<4, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z), detail::toFloat16(v.w));
+ u16vec4 Packed;
+ memcpy(&Packed, &Unpack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER static vec<4, float, Q> unpack(vec<4, uint16, Q> const& v)
+ {
+ i16vec4 Unpack;
+ memcpy(&Unpack, &v, sizeof(Unpack));
+ return vec<4, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z), detail::toFloat32(v.w));
+ }
+ };
+}//namespace detail
+
+ GLM_FUNC_QUALIFIER uint8 packUnorm1x8(float v)
+ {
+ return static_cast<uint8>(round(clamp(v, 0.0f, 1.0f) * 255.0f));
+ }
+
+ GLM_FUNC_QUALIFIER float unpackUnorm1x8(uint8 p)
+ {
+ float const Unpack(p);
+ return Unpack * static_cast<float>(0.0039215686274509803921568627451); // 1 / 255
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packUnorm2x8(vec2 const& v)
+ {
+ u8vec2 const Topack(round(clamp(v, 0.0f, 1.0f) * 255.0f));
+
+ uint16 Unpack = 0;
+ memcpy(&Unpack, &Topack, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER vec2 unpackUnorm2x8(uint16 p)
+ {
+ u8vec2 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return vec2(Unpack) * float(0.0039215686274509803921568627451); // 1 / 255
+ }
+
+ GLM_FUNC_QUALIFIER uint8 packSnorm1x8(float v)
+ {
+ int8 const Topack(static_cast<int8>(round(clamp(v ,-1.0f, 1.0f) * 127.0f)));
+ uint8 Packed = 0;
+ memcpy(&Packed, &Topack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER float unpackSnorm1x8(uint8 p)
+ {
+ int8 Unpack = 0;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return clamp(
+ static_cast<float>(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f
+ -1.0f, 1.0f);
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packSnorm2x8(vec2 const& v)
+ {
+ i8vec2 const Topack(round(clamp(v, -1.0f, 1.0f) * 127.0f));
+ uint16 Packed = 0;
+ memcpy(&Packed, &Topack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER vec2 unpackSnorm2x8(uint16 p)
+ {
+ i8vec2 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return clamp(
+ vec2(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f
+ -1.0f, 1.0f);
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packUnorm1x16(float s)
+ {
+ return static_cast<uint16>(round(clamp(s, 0.0f, 1.0f) * 65535.0f));
+ }
+
+ GLM_FUNC_QUALIFIER float unpackUnorm1x16(uint16 p)
+ {
+ float const Unpack(p);
+ return Unpack * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0
+ }
+
+ GLM_FUNC_QUALIFIER uint64 packUnorm4x16(vec4 const& v)
+ {
+ u16vec4 const Topack(round(clamp(v , 0.0f, 1.0f) * 65535.0f));
+ uint64 Packed = 0;
+ memcpy(&Packed, &Topack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER vec4 unpackUnorm4x16(uint64 p)
+ {
+ u16vec4 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return vec4(Unpack) * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packSnorm1x16(float v)
+ {
+ int16 const Topack = static_cast<int16>(round(clamp(v ,-1.0f, 1.0f) * 32767.0f));
+ uint16 Packed = 0;
+ memcpy(&Packed, &Topack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER float unpackSnorm1x16(uint16 p)
+ {
+ int16 Unpack = 0;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return clamp(
+ static_cast<float>(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f,
+ -1.0f, 1.0f);
+ }
+
+ GLM_FUNC_QUALIFIER uint64 packSnorm4x16(vec4 const& v)
+ {
+ i16vec4 const Topack(round(clamp(v ,-1.0f, 1.0f) * 32767.0f));
+ uint64 Packed = 0;
+ memcpy(&Packed, &Topack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER vec4 unpackSnorm4x16(uint64 p)
+ {
+ i16vec4 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return clamp(
+ vec4(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f,
+ -1.0f, 1.0f);
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packHalf1x16(float v)
+ {
+ int16 const Topack(detail::toFloat16(v));
+ uint16 Packed = 0;
+ memcpy(&Packed, &Topack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER float unpackHalf1x16(uint16 v)
+ {
+ int16 Unpack = 0;
+ memcpy(&Unpack, &v, sizeof(Unpack));
+ return detail::toFloat32(Unpack);
+ }
+
+ GLM_FUNC_QUALIFIER uint64 packHalf4x16(glm::vec4 const& v)
+ {
+ i16vec4 const Unpack(
+ detail::toFloat16(v.x),
+ detail::toFloat16(v.y),
+ detail::toFloat16(v.z),
+ detail::toFloat16(v.w));
+ uint64 Packed = 0;
+ memcpy(&Packed, &Unpack, sizeof(Packed));
+ return Packed;
+ }
+
+ GLM_FUNC_QUALIFIER glm::vec4 unpackHalf4x16(uint64 v)
+ {
+ i16vec4 Unpack;
+ memcpy(&Unpack, &v, sizeof(Unpack));
+ return vec4(
+ detail::toFloat32(Unpack.x),
+ detail::toFloat32(Unpack.y),
+ detail::toFloat32(Unpack.z),
+ detail::toFloat32(Unpack.w));
+ }
+
+ GLM_FUNC_QUALIFIER uint32 packI3x10_1x2(ivec4 const& v)
+ {
+ detail::i10i10i10i2 Result;
+ Result.data.x = v.x;
+ Result.data.y = v.y;
+ Result.data.z = v.z;
+ Result.data.w = v.w;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER ivec4 unpackI3x10_1x2(uint32 v)
+ {
+ detail::i10i10i10i2 Unpack;
+ Unpack.pack = v;
+ return ivec4(
+ Unpack.data.x,
+ Unpack.data.y,
+ Unpack.data.z,
+ Unpack.data.w);
+ }
+
+ GLM_FUNC_QUALIFIER uint32 packU3x10_1x2(uvec4 const& v)
+ {
+ detail::u10u10u10u2 Result;
+ Result.data.x = v.x;
+ Result.data.y = v.y;
+ Result.data.z = v.z;
+ Result.data.w = v.w;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER uvec4 unpackU3x10_1x2(uint32 v)
+ {
+ detail::u10u10u10u2 Unpack;
+ Unpack.pack = v;
+ return uvec4(
+ Unpack.data.x,
+ Unpack.data.y,
+ Unpack.data.z,
+ Unpack.data.w);
+ }
+
+ GLM_FUNC_QUALIFIER uint32 packSnorm3x10_1x2(vec4 const& v)
+ {
+ ivec4 const Pack(round(clamp(v,-1.0f, 1.0f) * vec4(511.f, 511.f, 511.f, 1.f)));
+
+ detail::i10i10i10i2 Result;
+ Result.data.x = Pack.x;
+ Result.data.y = Pack.y;
+ Result.data.z = Pack.z;
+ Result.data.w = Pack.w;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER vec4 unpackSnorm3x10_1x2(uint32 v)
+ {
+ detail::i10i10i10i2 Unpack;
+ Unpack.pack = v;
+
+ vec4 const Result(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w);
+
+ return clamp(Result * vec4(1.f / 511.f, 1.f / 511.f, 1.f / 511.f, 1.f), -1.0f, 1.0f);
+ }
+
+ GLM_FUNC_QUALIFIER uint32 packUnorm3x10_1x2(vec4 const& v)
+ {
+ uvec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(1023.f, 1023.f, 1023.f, 3.f)));
+
+ detail::u10u10u10u2 Result;
+ Result.data.x = Unpack.x;
+ Result.data.y = Unpack.y;
+ Result.data.z = Unpack.z;
+ Result.data.w = Unpack.w;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER vec4 unpackUnorm3x10_1x2(uint32 v)
+ {
+ vec4 const ScaleFactors(1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 3.f);
+
+ detail::u10u10u10u2 Unpack;
+ Unpack.pack = v;
+ return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactors;
+ }
+
+ GLM_FUNC_QUALIFIER uint32 packF2x11_1x10(vec3 const& v)
+ {
+ return
+ ((detail::floatTo11bit(v.x) & ((1 << 11) - 1)) << 0) |
+ ((detail::floatTo11bit(v.y) & ((1 << 11) - 1)) << 11) |
+ ((detail::floatTo10bit(v.z) & ((1 << 10) - 1)) << 22);
+ }
+
+ GLM_FUNC_QUALIFIER vec3 unpackF2x11_1x10(uint32 v)
+ {
+ return vec3(
+ detail::packed11bitToFloat(v >> 0),
+ detail::packed11bitToFloat(v >> 11),
+ detail::packed10bitToFloat(v >> 22));
+ }
+
+ GLM_FUNC_QUALIFIER uint32 packF3x9_E1x5(vec3 const& v)
+ {
+ float const SharedExpMax = (pow(2.0f, 9.0f - 1.0f) / pow(2.0f, 9.0f)) * pow(2.0f, 31.f - 15.f);
+ vec3 const Color = clamp(v, 0.0f, SharedExpMax);
+ float const MaxColor = max(Color.x, max(Color.y, Color.z));
+
+ float const ExpSharedP = max(-15.f - 1.f, floor(log2(MaxColor))) + 1.0f + 15.f;
+ float const MaxShared = floor(MaxColor / pow(2.0f, (ExpSharedP - 15.f - 9.f)) + 0.5f);
+ float const ExpShared = equal(MaxShared, pow(2.0f, 9.0f), epsilon<float>()) ? ExpSharedP + 1.0f : ExpSharedP;
+
+ uvec3 const ColorComp(floor(Color / pow(2.f, (ExpShared - 15.f - 9.f)) + 0.5f));
+
+ detail::u9u9u9e5 Unpack;
+ Unpack.data.x = ColorComp.x;
+ Unpack.data.y = ColorComp.y;
+ Unpack.data.z = ColorComp.z;
+ Unpack.data.w = uint(ExpShared);
+ return Unpack.pack;
+ }
+
+ GLM_FUNC_QUALIFIER vec3 unpackF3x9_E1x5(uint32 v)
+ {
+ detail::u9u9u9e5 Unpack;
+ Unpack.pack = v;
+
+ return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * pow(2.0f, Unpack.data.w - 15.f - 9.f);
+ }
+
+ // Based on Brian Karis http://graphicrants.blogspot.fr/2009/04/rgbm-color-encoding.html
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb)
+ {
+ vec<3, T, Q> const Color(rgb * static_cast<T>(1.0 / 6.0));
+ T Alpha = clamp(max(max(Color.x, Color.y), max(Color.z, static_cast<T>(1e-6))), static_cast<T>(0), static_cast<T>(1));
+ Alpha = ceil(Alpha * static_cast<T>(255.0)) / static_cast<T>(255.0);
+ return vec<4, T, Q>(Color / Alpha, Alpha);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm)
+ {
+ return vec<3, T, Q>(rgbm.x, rgbm.y, rgbm.z) * rgbm.w * static_cast<T>(6);
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, uint16, Q> packHalf(vec<L, float, Q> const& v)
+ {
+ return detail::compute_half<L, Q>::pack(v);
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, float, Q> unpackHalf(vec<L, uint16, Q> const& v)
+ {
+ return detail::compute_half<L, Q>::unpack(v);
+ }
+
+ template<typename uintType, length_t L, typename floatType, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, uintType, Q> packUnorm(vec<L, floatType, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<uintType>::is_integer, "uintType must be an integer type");
+ GLM_STATIC_ASSERT(std::numeric_limits<floatType>::is_iec559, "floatType must be a floating point type");
+
+ return vec<L, uintType, Q>(round(clamp(v, static_cast<floatType>(0), static_cast<floatType>(1)) * static_cast<floatType>(std::numeric_limits<uintType>::max())));
+ }
+
+ template<typename floatType, length_t L, typename uintType, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, floatType, Q> unpackUnorm(vec<L, uintType, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<uintType>::is_integer, "uintType must be an integer type");
+ GLM_STATIC_ASSERT(std::numeric_limits<floatType>::is_iec559, "floatType must be a floating point type");
+
+ return vec<L, floatType, Q>(v) * (static_cast<floatType>(1) / static_cast<floatType>(std::numeric_limits<uintType>::max()));
+ }
+
+ template<typename intType, length_t L, typename floatType, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, intType, Q> packSnorm(vec<L, floatType, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<intType>::is_integer, "uintType must be an integer type");
+ GLM_STATIC_ASSERT(std::numeric_limits<floatType>::is_iec559, "floatType must be a floating point type");
+
+ return vec<L, intType, Q>(round(clamp(v , static_cast<floatType>(-1), static_cast<floatType>(1)) * static_cast<floatType>(std::numeric_limits<intType>::max())));
+ }
+
+ template<typename floatType, length_t L, typename intType, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, floatType, Q> unpackSnorm(vec<L, intType, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<intType>::is_integer, "uintType must be an integer type");
+ GLM_STATIC_ASSERT(std::numeric_limits<floatType>::is_iec559, "floatType must be a floating point type");
+
+ return clamp(vec<L, floatType, Q>(v) * (static_cast<floatType>(1) / static_cast<floatType>(std::numeric_limits<intType>::max())), static_cast<floatType>(-1), static_cast<floatType>(1));
+ }
+
+ GLM_FUNC_QUALIFIER uint8 packUnorm2x4(vec2 const& v)
+ {
+ u32vec2 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f));
+ detail::u4u4 Result;
+ Result.data.x = Unpack.x;
+ Result.data.y = Unpack.y;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER vec2 unpackUnorm2x4(uint8 v)
+ {
+ float const ScaleFactor(1.f / 15.f);
+ detail::u4u4 Unpack;
+ Unpack.pack = v;
+ return vec2(Unpack.data.x, Unpack.data.y) * ScaleFactor;
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packUnorm4x4(vec4 const& v)
+ {
+ u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f));
+ detail::u4u4u4u4 Result;
+ Result.data.x = Unpack.x;
+ Result.data.y = Unpack.y;
+ Result.data.z = Unpack.z;
+ Result.data.w = Unpack.w;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER vec4 unpackUnorm4x4(uint16 v)
+ {
+ float const ScaleFactor(1.f / 15.f);
+ detail::u4u4u4u4 Unpack;
+ Unpack.pack = v;
+ return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor;
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packUnorm1x5_1x6_1x5(vec3 const& v)
+ {
+ u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(31.f, 63.f, 31.f)));
+ detail::u5u6u5 Result;
+ Result.data.x = Unpack.x;
+ Result.data.y = Unpack.y;
+ Result.data.z = Unpack.z;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER vec3 unpackUnorm1x5_1x6_1x5(uint16 v)
+ {
+ vec3 const ScaleFactor(1.f / 31.f, 1.f / 63.f, 1.f / 31.f);
+ detail::u5u6u5 Unpack;
+ Unpack.pack = v;
+ return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor;
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packUnorm3x5_1x1(vec4 const& v)
+ {
+ u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(31.f, 31.f, 31.f, 1.f)));
+ detail::u5u5u5u1 Result;
+ Result.data.x = Unpack.x;
+ Result.data.y = Unpack.y;
+ Result.data.z = Unpack.z;
+ Result.data.w = Unpack.w;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER vec4 unpackUnorm3x5_1x1(uint16 v)
+ {
+ vec4 const ScaleFactor(1.f / 31.f, 1.f / 31.f, 1.f / 31.f, 1.f);
+ detail::u5u5u5u1 Unpack;
+ Unpack.pack = v;
+ return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor;
+ }
+
+ GLM_FUNC_QUALIFIER uint8 packUnorm2x3_1x2(vec3 const& v)
+ {
+ u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(7.f, 7.f, 3.f)));
+ detail::u3u3u2 Result;
+ Result.data.x = Unpack.x;
+ Result.data.y = Unpack.y;
+ Result.data.z = Unpack.z;
+ return Result.pack;
+ }
+
+ GLM_FUNC_QUALIFIER vec3 unpackUnorm2x3_1x2(uint8 v)
+ {
+ vec3 const ScaleFactor(1.f / 7.f, 1.f / 7.f, 1.f / 3.f);
+ detail::u3u3u2 Unpack;
+ Unpack.pack = v;
+ return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor;
+ }
+
+ GLM_FUNC_QUALIFIER int16 packInt2x8(i8vec2 const& v)
+ {
+ int16 Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER i8vec2 unpackInt2x8(int16 p)
+ {
+ i8vec2 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER uint16 packUint2x8(u8vec2 const& v)
+ {
+ uint16 Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER u8vec2 unpackUint2x8(uint16 p)
+ {
+ u8vec2 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER int32 packInt4x8(i8vec4 const& v)
+ {
+ int32 Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER i8vec4 unpackInt4x8(int32 p)
+ {
+ i8vec4 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER uint32 packUint4x8(u8vec4 const& v)
+ {
+ uint32 Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER u8vec4 unpackUint4x8(uint32 p)
+ {
+ u8vec4 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER int packInt2x16(i16vec2 const& v)
+ {
+ int Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER i16vec2 unpackInt2x16(int p)
+ {
+ i16vec2 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER int64 packInt4x16(i16vec4 const& v)
+ {
+ int64 Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER i16vec4 unpackInt4x16(int64 p)
+ {
+ i16vec4 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER uint packUint2x16(u16vec2 const& v)
+ {
+ uint Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER u16vec2 unpackUint2x16(uint p)
+ {
+ u16vec2 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER uint64 packUint4x16(u16vec4 const& v)
+ {
+ uint64 Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER u16vec4 unpackUint4x16(uint64 p)
+ {
+ u16vec4 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER int64 packInt2x32(i32vec2 const& v)
+ {
+ int64 Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER i32vec2 unpackInt2x32(int64 p)
+ {
+ i32vec2 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+
+ GLM_FUNC_QUALIFIER uint64 packUint2x32(u32vec2 const& v)
+ {
+ uint64 Pack = 0;
+ memcpy(&Pack, &v, sizeof(Pack));
+ return Pack;
+ }
+
+ GLM_FUNC_QUALIFIER u32vec2 unpackUint2x32(uint64 p)
+ {
+ u32vec2 Unpack;
+ memcpy(&Unpack, &p, sizeof(Unpack));
+ return Unpack;
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/gtc/quaternion.hpp b/3rdparty/glm/source/glm/gtc/quaternion.hpp
new file mode 100644
index 0000000..359e072
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/quaternion.hpp
@@ -0,0 +1,173 @@
+/// @ref gtc_quaternion
+/// @file glm/gtc/quaternion.hpp
+///
+/// @see core (dependence)
+/// @see gtc_constants (dependence)
+///
+/// @defgroup gtc_quaternion GLM_GTC_quaternion
+/// @ingroup gtc
+///
+/// Include <glm/gtc/quaternion.hpp> to use the features of this extension.
+///
+/// Defines a templated quaternion type and several quaternion operations.
+
+#pragma once
+
+// Dependency:
+#include "../gtc/constants.hpp"
+#include "../gtc/matrix_transform.hpp"
+#include "../ext/vector_relational.hpp"
+#include "../ext/quaternion_common.hpp"
+#include "../ext/quaternion_float.hpp"
+#include "../ext/quaternion_float_precision.hpp"
+#include "../ext/quaternion_double.hpp"
+#include "../ext/quaternion_double_precision.hpp"
+#include "../ext/quaternion_relational.hpp"
+#include "../ext/quaternion_geometric.hpp"
+#include "../ext/quaternion_trigonometric.hpp"
+#include "../ext/quaternion_transform.hpp"
+#include "../detail/type_mat3x3.hpp"
+#include "../detail/type_mat4x4.hpp"
+#include "../detail/type_vec3.hpp"
+#include "../detail/type_vec4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_quaternion extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_quaternion
+ /// @{
+
+ /// Returns euler angles, pitch as x, yaw as y, roll as z.
+ /// The result is expressed in radians.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see gtc_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> eulerAngles(qua<T, Q> const& x);
+
+ /// Returns roll value of euler angles expressed in radians.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see gtc_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T roll(qua<T, Q> const& x);
+
+ /// Returns pitch value of euler angles expressed in radians.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see gtc_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T pitch(qua<T, Q> const& x);
+
+ /// Returns yaw value of euler angles expressed in radians.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see gtc_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T yaw(qua<T, Q> const& x);
+
+ /// Converts a quaternion to a 3 * 3 matrix.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see gtc_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> mat3_cast(qua<T, Q> const& x);
+
+ /// Converts a quaternion to a 4 * 4 matrix.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see gtc_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> mat4_cast(qua<T, Q> const& x);
+
+ /// Converts a pure rotation 3 * 3 matrix to a quaternion.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see gtc_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> quat_cast(mat<3, 3, T, Q> const& x);
+
+ /// Converts a pure rotation 4 * 4 matrix to a quaternion.
+ ///
+ /// @tparam T Floating-point scalar types.
+ ///
+ /// @see gtc_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> quat_cast(mat<4, 4, T, Q> const& x);
+
+ /// Returns the component-wise comparison result of x < y.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_quaternion_relational
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> lessThan(qua<T, Q> const& x, qua<T, Q> const& y);
+
+ /// Returns the component-wise comparison of result x <= y.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_quaternion_relational
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> lessThanEqual(qua<T, Q> const& x, qua<T, Q> const& y);
+
+ /// Returns the component-wise comparison of result x > y.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_quaternion_relational
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> greaterThan(qua<T, Q> const& x, qua<T, Q> const& y);
+
+ /// Returns the component-wise comparison of result x >= y.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_quaternion_relational
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, bool, Q> greaterThanEqual(qua<T, Q> const& x, qua<T, Q> const& y);
+
+ /// Build a look at quaternion based on the default handedness.
+ ///
+ /// @param direction Desired forward direction. Needs to be normalized.
+ /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0).
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> quatLookAt(
+ vec<3, T, Q> const& direction,
+ vec<3, T, Q> const& up);
+
+ /// Build a right-handed look at quaternion.
+ ///
+ /// @param direction Desired forward direction onto which the -z-axis gets mapped. Needs to be normalized.
+ /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0).
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> quatLookAtRH(
+ vec<3, T, Q> const& direction,
+ vec<3, T, Q> const& up);
+
+ /// Build a left-handed look at quaternion.
+ ///
+ /// @param direction Desired forward direction onto which the +z-axis gets mapped. Needs to be normalized.
+ /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0).
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> quatLookAtLH(
+ vec<3, T, Q> const& direction,
+ vec<3, T, Q> const& up);
+ /// @}
+} //namespace glm
+
+#include "quaternion.inl"
diff --git a/3rdparty/glm/source/glm/gtc/quaternion.inl b/3rdparty/glm/source/glm/gtc/quaternion.inl
new file mode 100644
index 0000000..e1ef032
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/quaternion.inl
@@ -0,0 +1,208 @@
+#include "../trigonometric.hpp"
+#include "../geometric.hpp"
+#include "../exponential.hpp"
+#include "epsilon.hpp"
+#include <limits>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> eulerAngles(qua<T, Q> const& x)
+ {
+ return vec<3, T, Q>(pitch(x), yaw(x), roll(x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T roll(qua<T, Q> const& q)
+ {
+ T const y = static_cast<T>(2) * (q.x * q.y + q.w * q.z);
+ T const x = q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z;
+
+ if(all(equal(vec<2, T, Q>(x, y), vec<2, T, Q>(0), epsilon<T>()))) //avoid atan2(0,0) - handle singularity - Matiis
+ return static_cast<T>(0);
+
+ return static_cast<T>(atan(y, x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T pitch(qua<T, Q> const& q)
+ {
+ //return T(atan(T(2) * (q.y * q.z + q.w * q.x), q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z));
+ T const y = static_cast<T>(2) * (q.y * q.z + q.w * q.x);
+ T const x = q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z;
+
+ if(all(equal(vec<2, T, Q>(x, y), vec<2, T, Q>(0), epsilon<T>()))) //avoid atan2(0,0) - handle singularity - Matiis
+ return static_cast<T>(static_cast<T>(2) * atan(q.x, q.w));
+
+ return static_cast<T>(atan(y, x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T yaw(qua<T, Q> const& q)
+ {
+ return asin(clamp(static_cast<T>(-2) * (q.x * q.z - q.w * q.y), static_cast<T>(-1), static_cast<T>(1)));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat3_cast(qua<T, Q> const& q)
+ {
+ mat<3, 3, T, Q> Result(T(1));
+ T qxx(q.x * q.x);
+ T qyy(q.y * q.y);
+ T qzz(q.z * q.z);
+ T qxz(q.x * q.z);
+ T qxy(q.x * q.y);
+ T qyz(q.y * q.z);
+ T qwx(q.w * q.x);
+ T qwy(q.w * q.y);
+ T qwz(q.w * q.z);
+
+ Result[0][0] = T(1) - T(2) * (qyy + qzz);
+ Result[0][1] = T(2) * (qxy + qwz);
+ Result[0][2] = T(2) * (qxz - qwy);
+
+ Result[1][0] = T(2) * (qxy - qwz);
+ Result[1][1] = T(1) - T(2) * (qxx + qzz);
+ Result[1][2] = T(2) * (qyz + qwx);
+
+ Result[2][0] = T(2) * (qxz + qwy);
+ Result[2][1] = T(2) * (qyz - qwx);
+ Result[2][2] = T(1) - T(2) * (qxx + qyy);
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat4_cast(qua<T, Q> const& q)
+ {
+ return mat<4, 4, T, Q>(mat3_cast(q));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> quat_cast(mat<3, 3, T, Q> const& m)
+ {
+ T fourXSquaredMinus1 = m[0][0] - m[1][1] - m[2][2];
+ T fourYSquaredMinus1 = m[1][1] - m[0][0] - m[2][2];
+ T fourZSquaredMinus1 = m[2][2] - m[0][0] - m[1][1];
+ T fourWSquaredMinus1 = m[0][0] + m[1][1] + m[2][2];
+
+ int biggestIndex = 0;
+ T fourBiggestSquaredMinus1 = fourWSquaredMinus1;
+ if(fourXSquaredMinus1 > fourBiggestSquaredMinus1)
+ {
+ fourBiggestSquaredMinus1 = fourXSquaredMinus1;
+ biggestIndex = 1;
+ }
+ if(fourYSquaredMinus1 > fourBiggestSquaredMinus1)
+ {
+ fourBiggestSquaredMinus1 = fourYSquaredMinus1;
+ biggestIndex = 2;
+ }
+ if(fourZSquaredMinus1 > fourBiggestSquaredMinus1)
+ {
+ fourBiggestSquaredMinus1 = fourZSquaredMinus1;
+ biggestIndex = 3;
+ }
+
+ T biggestVal = sqrt(fourBiggestSquaredMinus1 + static_cast<T>(1)) * static_cast<T>(0.5);
+ T mult = static_cast<T>(0.25) / biggestVal;
+
+ switch(biggestIndex)
+ {
+ case 0:
+ return qua<T, Q>(biggestVal, (m[1][2] - m[2][1]) * mult, (m[2][0] - m[0][2]) * mult, (m[0][1] - m[1][0]) * mult);
+ case 1:
+ return qua<T, Q>((m[1][2] - m[2][1]) * mult, biggestVal, (m[0][1] + m[1][0]) * mult, (m[2][0] + m[0][2]) * mult);
+ case 2:
+ return qua<T, Q>((m[2][0] - m[0][2]) * mult, (m[0][1] + m[1][0]) * mult, biggestVal, (m[1][2] + m[2][1]) * mult);
+ case 3:
+ return qua<T, Q>((m[0][1] - m[1][0]) * mult, (m[2][0] + m[0][2]) * mult, (m[1][2] + m[2][1]) * mult, biggestVal);
+ default: // Silence a -Wswitch-default warning in GCC. Should never actually get here. Assert is just for sanity.
+ assert(false);
+ return qua<T, Q>(1, 0, 0, 0);
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> quat_cast(mat<4, 4, T, Q> const& m4)
+ {
+ return quat_cast(mat<3, 3, T, Q>(m4));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> lessThan(qua<T, Q> const& x, qua<T, Q> const& y)
+ {
+ vec<4, bool, Q> Result;
+ for(length_t i = 0; i < x.length(); ++i)
+ Result[i] = x[i] < y[i];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> lessThanEqual(qua<T, Q> const& x, qua<T, Q> const& y)
+ {
+ vec<4, bool, Q> Result;
+ for(length_t i = 0; i < x.length(); ++i)
+ Result[i] = x[i] <= y[i];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> greaterThan(qua<T, Q> const& x, qua<T, Q> const& y)
+ {
+ vec<4, bool, Q> Result;
+ for(length_t i = 0; i < x.length(); ++i)
+ Result[i] = x[i] > y[i];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> greaterThanEqual(qua<T, Q> const& x, qua<T, Q> const& y)
+ {
+ vec<4, bool, Q> Result;
+ for(length_t i = 0; i < x.length(); ++i)
+ Result[i] = x[i] >= y[i];
+ return Result;
+ }
+
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> quatLookAt(vec<3, T, Q> const& direction, vec<3, T, Q> const& up)
+ {
+# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT
+ return quatLookAtLH(direction, up);
+# else
+ return quatLookAtRH(direction, up);
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> quatLookAtRH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up)
+ {
+ mat<3, 3, T, Q> Result;
+
+ Result[2] = -direction;
+ vec<3, T, Q> const& Right = cross(up, Result[2]);
+ Result[0] = Right * inversesqrt(max(static_cast<T>(0.00001), dot(Right, Right)));
+ Result[1] = cross(Result[2], Result[0]);
+
+ return quat_cast(Result);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> quatLookAtLH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up)
+ {
+ mat<3, 3, T, Q> Result;
+
+ Result[2] = direction;
+ vec<3, T, Q> const& Right = cross(up, Result[2]);
+ Result[0] = Right * inversesqrt(max(static_cast<T>(0.00001), dot(Right, Right)));
+ Result[1] = cross(Result[2], Result[0]);
+
+ return quat_cast(Result);
+ }
+}//namespace glm
+
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+# include "quaternion_simd.inl"
+#endif
+
diff --git a/3rdparty/glm/source/glm/gtc/quaternion_simd.inl b/3rdparty/glm/source/glm/gtc/quaternion_simd.inl
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/quaternion_simd.inl
diff --git a/3rdparty/glm/source/glm/gtc/random.hpp b/3rdparty/glm/source/glm/gtc/random.hpp
new file mode 100644
index 0000000..9a85958
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/random.hpp
@@ -0,0 +1,82 @@
+/// @ref gtc_random
+/// @file glm/gtc/random.hpp
+///
+/// @see core (dependence)
+/// @see gtx_random (extended)
+///
+/// @defgroup gtc_random GLM_GTC_random
+/// @ingroup gtc
+///
+/// Include <glm/gtc/random.hpp> to use the features of this extension.
+///
+/// Generate random number from various distribution methods.
+
+#pragma once
+
+// Dependency:
+#include "../ext/scalar_int_sized.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_random extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_random
+ /// @{
+
+ /// Generate random numbers in the interval [Min, Max], according a linear distribution
+ ///
+ /// @param Min Minimum value included in the sampling
+ /// @param Max Maximum value included in the sampling
+ /// @tparam genType Value type. Currently supported: float or double scalars.
+ /// @see gtc_random
+ template<typename genType>
+ GLM_FUNC_DECL genType linearRand(genType Min, genType Max);
+
+ /// Generate random numbers in the interval [Min, Max], according a linear distribution
+ ///
+ /// @param Min Minimum value included in the sampling
+ /// @param Max Maximum value included in the sampling
+ /// @tparam T Value type. Currently supported: float or double.
+ ///
+ /// @see gtc_random
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> linearRand(vec<L, T, Q> const& Min, vec<L, T, Q> const& Max);
+
+ /// Generate random numbers in the interval [Min, Max], according a gaussian distribution
+ ///
+ /// @see gtc_random
+ template<typename genType>
+ GLM_FUNC_DECL genType gaussRand(genType Mean, genType Deviation);
+
+ /// Generate a random 2D vector which coordinates are regulary distributed on a circle of a given radius
+ ///
+ /// @see gtc_random
+ template<typename T>
+ GLM_FUNC_DECL vec<2, T, defaultp> circularRand(T Radius);
+
+ /// Generate a random 3D vector which coordinates are regulary distributed on a sphere of a given radius
+ ///
+ /// @see gtc_random
+ template<typename T>
+ GLM_FUNC_DECL vec<3, T, defaultp> sphericalRand(T Radius);
+
+ /// Generate a random 2D vector which coordinates are regulary distributed within the area of a disk of a given radius
+ ///
+ /// @see gtc_random
+ template<typename T>
+ GLM_FUNC_DECL vec<2, T, defaultp> diskRand(T Radius);
+
+ /// Generate a random 3D vector which coordinates are regulary distributed within the volume of a ball of a given radius
+ ///
+ /// @see gtc_random
+ template<typename T>
+ GLM_FUNC_DECL vec<3, T, defaultp> ballRand(T Radius);
+
+ /// @}
+}//namespace glm
+
+#include "random.inl"
diff --git a/3rdparty/glm/source/glm/gtc/random.inl b/3rdparty/glm/source/glm/gtc/random.inl
new file mode 100644
index 0000000..7048509
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/random.inl
@@ -0,0 +1,303 @@
+#include "../geometric.hpp"
+#include "../exponential.hpp"
+#include "../trigonometric.hpp"
+#include "../detail/type_vec1.hpp"
+#include <cstdlib>
+#include <ctime>
+#include <cassert>
+#include <cmath>
+
+namespace glm{
+namespace detail
+{
+ template <length_t L, typename T, qualifier Q>
+ struct compute_rand
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call();
+ };
+
+ template <qualifier P>
+ struct compute_rand<1, uint8, P>
+ {
+ GLM_FUNC_QUALIFIER static vec<1, uint8, P> call()
+ {
+ return vec<1, uint8, P>(
+ std::rand() % std::numeric_limits<uint8>::max());
+ }
+ };
+
+ template <qualifier P>
+ struct compute_rand<2, uint8, P>
+ {
+ GLM_FUNC_QUALIFIER static vec<2, uint8, P> call()
+ {
+ return vec<2, uint8, P>(
+ std::rand() % std::numeric_limits<uint8>::max(),
+ std::rand() % std::numeric_limits<uint8>::max());
+ }
+ };
+
+ template <qualifier P>
+ struct compute_rand<3, uint8, P>
+ {
+ GLM_FUNC_QUALIFIER static vec<3, uint8, P> call()
+ {
+ return vec<3, uint8, P>(
+ std::rand() % std::numeric_limits<uint8>::max(),
+ std::rand() % std::numeric_limits<uint8>::max(),
+ std::rand() % std::numeric_limits<uint8>::max());
+ }
+ };
+
+ template <qualifier P>
+ struct compute_rand<4, uint8, P>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, uint8, P> call()
+ {
+ return vec<4, uint8, P>(
+ std::rand() % std::numeric_limits<uint8>::max(),
+ std::rand() % std::numeric_limits<uint8>::max(),
+ std::rand() % std::numeric_limits<uint8>::max(),
+ std::rand() % std::numeric_limits<uint8>::max());
+ }
+ };
+
+ template <length_t L, qualifier Q>
+ struct compute_rand<L, uint16, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, uint16, Q> call()
+ {
+ return
+ (vec<L, uint16, Q>(compute_rand<L, uint8, Q>::call()) << static_cast<uint16>(8)) |
+ (vec<L, uint16, Q>(compute_rand<L, uint8, Q>::call()) << static_cast<uint16>(0));
+ }
+ };
+
+ template <length_t L, qualifier Q>
+ struct compute_rand<L, uint32, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, uint32, Q> call()
+ {
+ return
+ (vec<L, uint32, Q>(compute_rand<L, uint16, Q>::call()) << static_cast<uint32>(16)) |
+ (vec<L, uint32, Q>(compute_rand<L, uint16, Q>::call()) << static_cast<uint32>(0));
+ }
+ };
+
+ template <length_t L, qualifier Q>
+ struct compute_rand<L, uint64, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, uint64, Q> call()
+ {
+ return
+ (vec<L, uint64, Q>(compute_rand<L, uint32, Q>::call()) << static_cast<uint64>(32)) |
+ (vec<L, uint64, Q>(compute_rand<L, uint32, Q>::call()) << static_cast<uint64>(0));
+ }
+ };
+
+ template <length_t L, typename T, qualifier Q>
+ struct compute_linearRand
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& Min, vec<L, T, Q> const& Max);
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, int8, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, int8, Q> call(vec<L, int8, Q> const& Min, vec<L, int8, Q> const& Max)
+ {
+ return (vec<L, int8, Q>(compute_rand<L, uint8, Q>::call() % vec<L, uint8, Q>(Max + static_cast<int8>(1) - Min))) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, uint8, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, uint8, Q> call(vec<L, uint8, Q> const& Min, vec<L, uint8, Q> const& Max)
+ {
+ return (compute_rand<L, uint8, Q>::call() % (Max + static_cast<uint8>(1) - Min)) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, int16, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, int16, Q> call(vec<L, int16, Q> const& Min, vec<L, int16, Q> const& Max)
+ {
+ return (vec<L, int16, Q>(compute_rand<L, uint16, Q>::call() % vec<L, uint16, Q>(Max + static_cast<int16>(1) - Min))) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, uint16, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, uint16, Q> call(vec<L, uint16, Q> const& Min, vec<L, uint16, Q> const& Max)
+ {
+ return (compute_rand<L, uint16, Q>::call() % (Max + static_cast<uint16>(1) - Min)) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, int32, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, int32, Q> call(vec<L, int32, Q> const& Min, vec<L, int32, Q> const& Max)
+ {
+ return (vec<L, int32, Q>(compute_rand<L, uint32, Q>::call() % vec<L, uint32, Q>(Max + static_cast<int32>(1) - Min))) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, uint32, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, uint32, Q> call(vec<L, uint32, Q> const& Min, vec<L, uint32, Q> const& Max)
+ {
+ return (compute_rand<L, uint32, Q>::call() % (Max + static_cast<uint32>(1) - Min)) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, int64, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, int64, Q> call(vec<L, int64, Q> const& Min, vec<L, int64, Q> const& Max)
+ {
+ return (vec<L, int64, Q>(compute_rand<L, uint64, Q>::call() % vec<L, uint64, Q>(Max + static_cast<int64>(1) - Min))) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, uint64, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, uint64, Q> call(vec<L, uint64, Q> const& Min, vec<L, uint64, Q> const& Max)
+ {
+ return (compute_rand<L, uint64, Q>::call() % (Max + static_cast<uint64>(1) - Min)) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, float, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, float, Q> call(vec<L, float, Q> const& Min, vec<L, float, Q> const& Max)
+ {
+ return vec<L, float, Q>(compute_rand<L, uint32, Q>::call()) / static_cast<float>(std::numeric_limits<uint32>::max()) * (Max - Min) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, double, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, double, Q> call(vec<L, double, Q> const& Min, vec<L, double, Q> const& Max)
+ {
+ return vec<L, double, Q>(compute_rand<L, uint64, Q>::call()) / static_cast<double>(std::numeric_limits<uint64>::max()) * (Max - Min) + Min;
+ }
+ };
+
+ template<length_t L, qualifier Q>
+ struct compute_linearRand<L, long double, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, long double, Q> call(vec<L, long double, Q> const& Min, vec<L, long double, Q> const& Max)
+ {
+ return vec<L, long double, Q>(compute_rand<L, uint64, Q>::call()) / static_cast<long double>(std::numeric_limits<uint64>::max()) * (Max - Min) + Min;
+ }
+ };
+}//namespace detail
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType linearRand(genType Min, genType Max)
+ {
+ return detail::compute_linearRand<1, genType, highp>::call(
+ vec<1, genType, highp>(Min),
+ vec<1, genType, highp>(Max)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> linearRand(vec<L, T, Q> const& Min, vec<L, T, Q> const& Max)
+ {
+ return detail::compute_linearRand<L, T, Q>::call(Min, Max);
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType gaussRand(genType Mean, genType Deviation)
+ {
+ genType w, x1, x2;
+
+ do
+ {
+ x1 = linearRand(genType(-1), genType(1));
+ x2 = linearRand(genType(-1), genType(1));
+
+ w = x1 * x1 + x2 * x2;
+ } while(w > genType(1));
+
+ return static_cast<genType>(x2 * Deviation * Deviation * sqrt((genType(-2) * log(w)) / w) + Mean);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> gaussRand(vec<L, T, Q> const& Mean, vec<L, T, Q> const& Deviation)
+ {
+ return detail::functor2<vec, L, T, Q>::call(gaussRand, Mean, Deviation);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER vec<2, T, defaultp> diskRand(T Radius)
+ {
+ assert(Radius > static_cast<T>(0));
+
+ vec<2, T, defaultp> Result(T(0));
+ T LenRadius(T(0));
+
+ do
+ {
+ Result = linearRand(
+ vec<2, T, defaultp>(-Radius),
+ vec<2, T, defaultp>(Radius));
+ LenRadius = length(Result);
+ }
+ while(LenRadius > Radius);
+
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER vec<3, T, defaultp> ballRand(T Radius)
+ {
+ assert(Radius > static_cast<T>(0));
+
+ vec<3, T, defaultp> Result(T(0));
+ T LenRadius(T(0));
+
+ do
+ {
+ Result = linearRand(
+ vec<3, T, defaultp>(-Radius),
+ vec<3, T, defaultp>(Radius));
+ LenRadius = length(Result);
+ }
+ while(LenRadius > Radius);
+
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER vec<2, T, defaultp> circularRand(T Radius)
+ {
+ assert(Radius > static_cast<T>(0));
+
+ T a = linearRand(T(0), static_cast<T>(6.283185307179586476925286766559));
+ return vec<2, T, defaultp>(glm::cos(a), glm::sin(a)) * Radius;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER vec<3, T, defaultp> sphericalRand(T Radius)
+ {
+ assert(Radius > static_cast<T>(0));
+
+ T theta = linearRand(T(0), T(6.283185307179586476925286766559f));
+ T phi = std::acos(linearRand(T(-1.0f), T(1.0f)));
+
+ T x = std::sin(phi) * std::cos(theta);
+ T y = std::sin(phi) * std::sin(theta);
+ T z = std::cos(phi);
+
+ return vec<3, T, defaultp>(x, y, z) * Radius;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/reciprocal.hpp b/3rdparty/glm/source/glm/gtc/reciprocal.hpp
new file mode 100644
index 0000000..4d0fc91
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/reciprocal.hpp
@@ -0,0 +1,24 @@
+/// @ref gtc_reciprocal
+/// @file glm/gtc/reciprocal.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_reciprocal GLM_GTC_reciprocal
+/// @ingroup gtc
+///
+/// Include <glm/gtc/reciprocal.hpp> to use the features of this extension.
+///
+/// Define secant, cosecant and cotangent functions.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_reciprocal extension included")
+#endif
+
+#include "../ext/scalar_reciprocal.hpp"
+#include "../ext/vector_reciprocal.hpp"
+
diff --git a/3rdparty/glm/source/glm/gtc/round.hpp b/3rdparty/glm/source/glm/gtc/round.hpp
new file mode 100644
index 0000000..56edbbc
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/round.hpp
@@ -0,0 +1,160 @@
+/// @ref gtc_round
+/// @file glm/gtc/round.hpp
+///
+/// @see core (dependence)
+/// @see gtc_round (dependence)
+///
+/// @defgroup gtc_round GLM_GTC_round
+/// @ingroup gtc
+///
+/// Include <glm/gtc/round.hpp> to use the features of this extension.
+///
+/// Rounding value to specific boundings
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../detail/_vectorize.hpp"
+#include "../vector_relational.hpp"
+#include "../common.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_round extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_round
+ /// @{
+
+ /// Return the power of two number which value is just higher the input value,
+ /// round up to a power of two.
+ ///
+ /// @see gtc_round
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType ceilPowerOfTwo(genIUType v);
+
+ /// Return the power of two number which value is just higher the input value,
+ /// round up to a power of two.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_round
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> ceilPowerOfTwo(vec<L, T, Q> const& v);
+
+ /// Return the power of two number which value is just lower the input value,
+ /// round down to a power of two.
+ ///
+ /// @see gtc_round
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType floorPowerOfTwo(genIUType v);
+
+ /// Return the power of two number which value is just lower the input value,
+ /// round down to a power of two.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_round
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> floorPowerOfTwo(vec<L, T, Q> const& v);
+
+ /// Return the power of two number which value is the closet to the input value.
+ ///
+ /// @see gtc_round
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType roundPowerOfTwo(genIUType v);
+
+ /// Return the power of two number which value is the closet to the input value.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_round
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> roundPowerOfTwo(vec<L, T, Q> const& v);
+
+ /// Higher multiple number of Source.
+ ///
+ /// @tparam genType Floating-point or integer scalar or vector types.
+ ///
+ /// @param v Source value to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see gtc_round
+ template<typename genType>
+ GLM_FUNC_DECL genType ceilMultiple(genType v, genType Multiple);
+
+ /// Higher multiple number of Source.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @param v Source values to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see gtc_round
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> ceilMultiple(vec<L, T, Q> const& v, vec<L, T, Q> const& Multiple);
+
+ /// Lower multiple number of Source.
+ ///
+ /// @tparam genType Floating-point or integer scalar or vector types.
+ ///
+ /// @param v Source value to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see gtc_round
+ template<typename genType>
+ GLM_FUNC_DECL genType floorMultiple(genType v, genType Multiple);
+
+ /// Lower multiple number of Source.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @param v Source values to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see gtc_round
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> floorMultiple(vec<L, T, Q> const& v, vec<L, T, Q> const& Multiple);
+
+ /// Lower multiple number of Source.
+ ///
+ /// @tparam genType Floating-point or integer scalar or vector types.
+ ///
+ /// @param v Source value to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see gtc_round
+ template<typename genType>
+ GLM_FUNC_DECL genType roundMultiple(genType v, genType Multiple);
+
+ /// Lower multiple number of Source.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @param v Source values to which is applied the function
+ /// @param Multiple Must be a null or positive value
+ ///
+ /// @see gtc_round
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> roundMultiple(vec<L, T, Q> const& v, vec<L, T, Q> const& Multiple);
+
+ /// @}
+} //namespace glm
+
+#include "round.inl"
diff --git a/3rdparty/glm/source/glm/gtc/round.inl b/3rdparty/glm/source/glm/gtc/round.inl
new file mode 100644
index 0000000..48411e4
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/round.inl
@@ -0,0 +1,155 @@
+/// @ref gtc_round
+
+#include "../integer.hpp"
+#include "../ext/vector_integer.hpp"
+
+namespace glm{
+namespace detail
+{
+ template<bool is_float, bool is_signed>
+ struct compute_roundMultiple {};
+
+ template<>
+ struct compute_roundMultiple<true, true>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ if (Source >= genType(0))
+ return Source - std::fmod(Source, Multiple);
+ else
+ {
+ genType Tmp = Source + genType(1);
+ return Tmp - std::fmod(Tmp, Multiple) - Multiple;
+ }
+ }
+ };
+
+ template<>
+ struct compute_roundMultiple<false, false>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ if (Source >= genType(0))
+ return Source - Source % Multiple;
+ else
+ {
+ genType Tmp = Source + genType(1);
+ return Tmp - Tmp % Multiple - Multiple;
+ }
+ }
+ };
+
+ template<>
+ struct compute_roundMultiple<false, true>
+ {
+ template<typename genType>
+ GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple)
+ {
+ if (Source >= genType(0))
+ return Source - Source % Multiple;
+ else
+ {
+ genType Tmp = Source + genType(1);
+ return Tmp - Tmp % Multiple - Multiple;
+ }
+ }
+ };
+}//namespace detail
+
+ //////////////////
+ // ceilPowerOfTwo
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType ceilPowerOfTwo(genType value)
+ {
+ return detail::compute_ceilPowerOfTwo<1, genType, defaultp, std::numeric_limits<genType>::is_signed>::call(vec<1, genType, defaultp>(value)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> ceilPowerOfTwo(vec<L, T, Q> const& v)
+ {
+ return detail::compute_ceilPowerOfTwo<L, T, Q, std::numeric_limits<T>::is_signed>::call(v);
+ }
+
+ ///////////////////
+ // floorPowerOfTwo
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType floorPowerOfTwo(genType value)
+ {
+ return isPowerOfTwo(value) ? value : static_cast<genType>(1) << findMSB(value);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> floorPowerOfTwo(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(floorPowerOfTwo, v);
+ }
+
+ ///////////////////
+ // roundPowerOfTwo
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType roundPowerOfTwo(genIUType value)
+ {
+ if(isPowerOfTwo(value))
+ return value;
+
+ genIUType const prev = static_cast<genIUType>(1) << findMSB(value);
+ genIUType const next = prev << static_cast<genIUType>(1);
+ return (next - value) < (value - prev) ? next : prev;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> roundPowerOfTwo(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(roundPowerOfTwo, v);
+ }
+
+ //////////////////////
+ // ceilMultiple
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType ceilMultiple(genType Source, genType Multiple)
+ {
+ return detail::compute_ceilMultiple<std::numeric_limits<genType>::is_iec559, std::numeric_limits<genType>::is_signed>::call(Source, Multiple);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> ceilMultiple(vec<L, T, Q> const& Source, vec<L, T, Q> const& Multiple)
+ {
+ return detail::functor2<vec, L, T, Q>::call(ceilMultiple, Source, Multiple);
+ }
+
+ //////////////////////
+ // floorMultiple
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType floorMultiple(genType Source, genType Multiple)
+ {
+ return detail::compute_floorMultiple<std::numeric_limits<genType>::is_iec559, std::numeric_limits<genType>::is_signed>::call(Source, Multiple);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> floorMultiple(vec<L, T, Q> const& Source, vec<L, T, Q> const& Multiple)
+ {
+ return detail::functor2<vec, L, T, Q>::call(floorMultiple, Source, Multiple);
+ }
+
+ //////////////////////
+ // roundMultiple
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType roundMultiple(genType Source, genType Multiple)
+ {
+ return detail::compute_roundMultiple<std::numeric_limits<genType>::is_iec559, std::numeric_limits<genType>::is_signed>::call(Source, Multiple);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> roundMultiple(vec<L, T, Q> const& Source, vec<L, T, Q> const& Multiple)
+ {
+ return detail::functor2<vec, L, T, Q>::call(roundMultiple, Source, Multiple);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/type_aligned.hpp b/3rdparty/glm/source/glm/gtc/type_aligned.hpp
new file mode 100644
index 0000000..5403abf
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/type_aligned.hpp
@@ -0,0 +1,1315 @@
+/// @ref gtc_type_aligned
+/// @file glm/gtc/type_aligned.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_type_aligned GLM_GTC_type_aligned
+/// @ingroup gtc
+///
+/// Include <glm/gtc/type_aligned.hpp> to use the features of this extension.
+///
+/// Aligned types allowing SIMD optimizations of vectors and matrices types
+
+#pragma once
+
+#if (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE)
+# error "GLM: Aligned gentypes require to enable C++ language extensions. Define GLM_FORCE_ALIGNED_GENTYPES before including GLM headers to use aligned types."
+#endif
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_type_aligned extension included")
+#endif
+
+#include "../mat4x4.hpp"
+#include "../mat4x3.hpp"
+#include "../mat4x2.hpp"
+#include "../mat3x4.hpp"
+#include "../mat3x3.hpp"
+#include "../mat3x2.hpp"
+#include "../mat2x4.hpp"
+#include "../mat2x3.hpp"
+#include "../mat2x2.hpp"
+#include "../gtc/vec1.hpp"
+#include "../vec2.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+
+namespace glm
+{
+ /// @addtogroup gtc_type_aligned
+ /// @{
+
+ // -- *vec1 --
+
+ /// 1 component vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<1, float, aligned_highp> aligned_highp_vec1;
+
+ /// 1 component vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<1, float, aligned_mediump> aligned_mediump_vec1;
+
+ /// 1 component vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<1, float, aligned_lowp> aligned_lowp_vec1;
+
+ /// 1 component vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<1, double, aligned_highp> aligned_highp_dvec1;
+
+ /// 1 component vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<1, double, aligned_mediump> aligned_mediump_dvec1;
+
+ /// 1 component vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<1, double, aligned_lowp> aligned_lowp_dvec1;
+
+ /// 1 component vector aligned in memory of signed integer numbers.
+ typedef vec<1, int, aligned_highp> aligned_highp_ivec1;
+
+ /// 1 component vector aligned in memory of signed integer numbers.
+ typedef vec<1, int, aligned_mediump> aligned_mediump_ivec1;
+
+ /// 1 component vector aligned in memory of signed integer numbers.
+ typedef vec<1, int, aligned_lowp> aligned_lowp_ivec1;
+
+ /// 1 component vector aligned in memory of unsigned integer numbers.
+ typedef vec<1, uint, aligned_highp> aligned_highp_uvec1;
+
+ /// 1 component vector aligned in memory of unsigned integer numbers.
+ typedef vec<1, uint, aligned_mediump> aligned_mediump_uvec1;
+
+ /// 1 component vector aligned in memory of unsigned integer numbers.
+ typedef vec<1, uint, aligned_lowp> aligned_lowp_uvec1;
+
+ /// 1 component vector aligned in memory of bool values.
+ typedef vec<1, bool, aligned_highp> aligned_highp_bvec1;
+
+ /// 1 component vector aligned in memory of bool values.
+ typedef vec<1, bool, aligned_mediump> aligned_mediump_bvec1;
+
+ /// 1 component vector aligned in memory of bool values.
+ typedef vec<1, bool, aligned_lowp> aligned_lowp_bvec1;
+
+ /// 1 component vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<1, float, packed_highp> packed_highp_vec1;
+
+ /// 1 component vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<1, float, packed_mediump> packed_mediump_vec1;
+
+ /// 1 component vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<1, float, packed_lowp> packed_lowp_vec1;
+
+ /// 1 component vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<1, double, packed_highp> packed_highp_dvec1;
+
+ /// 1 component vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<1, double, packed_mediump> packed_mediump_dvec1;
+
+ /// 1 component vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<1, double, packed_lowp> packed_lowp_dvec1;
+
+ /// 1 component vector tightly packed in memory of signed integer numbers.
+ typedef vec<1, int, packed_highp> packed_highp_ivec1;
+
+ /// 1 component vector tightly packed in memory of signed integer numbers.
+ typedef vec<1, int, packed_mediump> packed_mediump_ivec1;
+
+ /// 1 component vector tightly packed in memory of signed integer numbers.
+ typedef vec<1, int, packed_lowp> packed_lowp_ivec1;
+
+ /// 1 component vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<1, uint, packed_highp> packed_highp_uvec1;
+
+ /// 1 component vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<1, uint, packed_mediump> packed_mediump_uvec1;
+
+ /// 1 component vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<1, uint, packed_lowp> packed_lowp_uvec1;
+
+ /// 1 component vector tightly packed in memory of bool values.
+ typedef vec<1, bool, packed_highp> packed_highp_bvec1;
+
+ /// 1 component vector tightly packed in memory of bool values.
+ typedef vec<1, bool, packed_mediump> packed_mediump_bvec1;
+
+ /// 1 component vector tightly packed in memory of bool values.
+ typedef vec<1, bool, packed_lowp> packed_lowp_bvec1;
+
+ // -- *vec2 --
+
+ /// 2 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<2, float, aligned_highp> aligned_highp_vec2;
+
+ /// 2 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<2, float, aligned_mediump> aligned_mediump_vec2;
+
+ /// 2 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<2, float, aligned_lowp> aligned_lowp_vec2;
+
+ /// 2 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<2, double, aligned_highp> aligned_highp_dvec2;
+
+ /// 2 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<2, double, aligned_mediump> aligned_mediump_dvec2;
+
+ /// 2 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<2, double, aligned_lowp> aligned_lowp_dvec2;
+
+ /// 2 components vector aligned in memory of signed integer numbers.
+ typedef vec<2, int, aligned_highp> aligned_highp_ivec2;
+
+ /// 2 components vector aligned in memory of signed integer numbers.
+ typedef vec<2, int, aligned_mediump> aligned_mediump_ivec2;
+
+ /// 2 components vector aligned in memory of signed integer numbers.
+ typedef vec<2, int, aligned_lowp> aligned_lowp_ivec2;
+
+ /// 2 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<2, uint, aligned_highp> aligned_highp_uvec2;
+
+ /// 2 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<2, uint, aligned_mediump> aligned_mediump_uvec2;
+
+ /// 2 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<2, uint, aligned_lowp> aligned_lowp_uvec2;
+
+ /// 2 components vector aligned in memory of bool values.
+ typedef vec<2, bool, aligned_highp> aligned_highp_bvec2;
+
+ /// 2 components vector aligned in memory of bool values.
+ typedef vec<2, bool, aligned_mediump> aligned_mediump_bvec2;
+
+ /// 2 components vector aligned in memory of bool values.
+ typedef vec<2, bool, aligned_lowp> aligned_lowp_bvec2;
+
+ /// 2 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<2, float, packed_highp> packed_highp_vec2;
+
+ /// 2 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<2, float, packed_mediump> packed_mediump_vec2;
+
+ /// 2 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<2, float, packed_lowp> packed_lowp_vec2;
+
+ /// 2 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<2, double, packed_highp> packed_highp_dvec2;
+
+ /// 2 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<2, double, packed_mediump> packed_mediump_dvec2;
+
+ /// 2 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<2, double, packed_lowp> packed_lowp_dvec2;
+
+ /// 2 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<2, int, packed_highp> packed_highp_ivec2;
+
+ /// 2 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<2, int, packed_mediump> packed_mediump_ivec2;
+
+ /// 2 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<2, int, packed_lowp> packed_lowp_ivec2;
+
+ /// 2 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<2, uint, packed_highp> packed_highp_uvec2;
+
+ /// 2 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<2, uint, packed_mediump> packed_mediump_uvec2;
+
+ /// 2 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<2, uint, packed_lowp> packed_lowp_uvec2;
+
+ /// 2 components vector tightly packed in memory of bool values.
+ typedef vec<2, bool, packed_highp> packed_highp_bvec2;
+
+ /// 2 components vector tightly packed in memory of bool values.
+ typedef vec<2, bool, packed_mediump> packed_mediump_bvec2;
+
+ /// 2 components vector tightly packed in memory of bool values.
+ typedef vec<2, bool, packed_lowp> packed_lowp_bvec2;
+
+ // -- *vec3 --
+
+ /// 3 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<3, float, aligned_highp> aligned_highp_vec3;
+
+ /// 3 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<3, float, aligned_mediump> aligned_mediump_vec3;
+
+ /// 3 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<3, float, aligned_lowp> aligned_lowp_vec3;
+
+ /// 3 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<3, double, aligned_highp> aligned_highp_dvec3;
+
+ /// 3 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<3, double, aligned_mediump> aligned_mediump_dvec3;
+
+ /// 3 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<3, double, aligned_lowp> aligned_lowp_dvec3;
+
+ /// 3 components vector aligned in memory of signed integer numbers.
+ typedef vec<3, int, aligned_highp> aligned_highp_ivec3;
+
+ /// 3 components vector aligned in memory of signed integer numbers.
+ typedef vec<3, int, aligned_mediump> aligned_mediump_ivec3;
+
+ /// 3 components vector aligned in memory of signed integer numbers.
+ typedef vec<3, int, aligned_lowp> aligned_lowp_ivec3;
+
+ /// 3 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<3, uint, aligned_highp> aligned_highp_uvec3;
+
+ /// 3 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<3, uint, aligned_mediump> aligned_mediump_uvec3;
+
+ /// 3 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<3, uint, aligned_lowp> aligned_lowp_uvec3;
+
+ /// 3 components vector aligned in memory of bool values.
+ typedef vec<3, bool, aligned_highp> aligned_highp_bvec3;
+
+ /// 3 components vector aligned in memory of bool values.
+ typedef vec<3, bool, aligned_mediump> aligned_mediump_bvec3;
+
+ /// 3 components vector aligned in memory of bool values.
+ typedef vec<3, bool, aligned_lowp> aligned_lowp_bvec3;
+
+ /// 3 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<3, float, packed_highp> packed_highp_vec3;
+
+ /// 3 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<3, float, packed_mediump> packed_mediump_vec3;
+
+ /// 3 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<3, float, packed_lowp> packed_lowp_vec3;
+
+ /// 3 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<3, double, packed_highp> packed_highp_dvec3;
+
+ /// 3 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<3, double, packed_mediump> packed_mediump_dvec3;
+
+ /// 3 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<3, double, packed_lowp> packed_lowp_dvec3;
+
+ /// 3 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<3, int, packed_highp> packed_highp_ivec3;
+
+ /// 3 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<3, int, packed_mediump> packed_mediump_ivec3;
+
+ /// 3 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<3, int, packed_lowp> packed_lowp_ivec3;
+
+ /// 3 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<3, uint, packed_highp> packed_highp_uvec3;
+
+ /// 3 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<3, uint, packed_mediump> packed_mediump_uvec3;
+
+ /// 3 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<3, uint, packed_lowp> packed_lowp_uvec3;
+
+ /// 3 components vector tightly packed in memory of bool values.
+ typedef vec<3, bool, packed_highp> packed_highp_bvec3;
+
+ /// 3 components vector tightly packed in memory of bool values.
+ typedef vec<3, bool, packed_mediump> packed_mediump_bvec3;
+
+ /// 3 components vector tightly packed in memory of bool values.
+ typedef vec<3, bool, packed_lowp> packed_lowp_bvec3;
+
+ // -- *vec4 --
+
+ /// 4 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<4, float, aligned_highp> aligned_highp_vec4;
+
+ /// 4 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<4, float, aligned_mediump> aligned_mediump_vec4;
+
+ /// 4 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<4, float, aligned_lowp> aligned_lowp_vec4;
+
+ /// 4 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<4, double, aligned_highp> aligned_highp_dvec4;
+
+ /// 4 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<4, double, aligned_mediump> aligned_mediump_dvec4;
+
+ /// 4 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<4, double, aligned_lowp> aligned_lowp_dvec4;
+
+ /// 4 components vector aligned in memory of signed integer numbers.
+ typedef vec<4, int, aligned_highp> aligned_highp_ivec4;
+
+ /// 4 components vector aligned in memory of signed integer numbers.
+ typedef vec<4, int, aligned_mediump> aligned_mediump_ivec4;
+
+ /// 4 components vector aligned in memory of signed integer numbers.
+ typedef vec<4, int, aligned_lowp> aligned_lowp_ivec4;
+
+ /// 4 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<4, uint, aligned_highp> aligned_highp_uvec4;
+
+ /// 4 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<4, uint, aligned_mediump> aligned_mediump_uvec4;
+
+ /// 4 components vector aligned in memory of unsigned integer numbers.
+ typedef vec<4, uint, aligned_lowp> aligned_lowp_uvec4;
+
+ /// 4 components vector aligned in memory of bool values.
+ typedef vec<4, bool, aligned_highp> aligned_highp_bvec4;
+
+ /// 4 components vector aligned in memory of bool values.
+ typedef vec<4, bool, aligned_mediump> aligned_mediump_bvec4;
+
+ /// 4 components vector aligned in memory of bool values.
+ typedef vec<4, bool, aligned_lowp> aligned_lowp_bvec4;
+
+ /// 4 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<4, float, packed_highp> packed_highp_vec4;
+
+ /// 4 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<4, float, packed_mediump> packed_mediump_vec4;
+
+ /// 4 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<4, float, packed_lowp> packed_lowp_vec4;
+
+ /// 4 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef vec<4, double, packed_highp> packed_highp_dvec4;
+
+ /// 4 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef vec<4, double, packed_mediump> packed_mediump_dvec4;
+
+ /// 4 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef vec<4, double, packed_lowp> packed_lowp_dvec4;
+
+ /// 4 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<4, int, packed_highp> packed_highp_ivec4;
+
+ /// 4 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<4, int, packed_mediump> packed_mediump_ivec4;
+
+ /// 4 components vector tightly packed in memory of signed integer numbers.
+ typedef vec<4, int, packed_lowp> packed_lowp_ivec4;
+
+ /// 4 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<4, uint, packed_highp> packed_highp_uvec4;
+
+ /// 4 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<4, uint, packed_mediump> packed_mediump_uvec4;
+
+ /// 4 components vector tightly packed in memory of unsigned integer numbers.
+ typedef vec<4, uint, packed_lowp> packed_lowp_uvec4;
+
+ /// 4 components vector tightly packed in memory of bool values.
+ typedef vec<4, bool, packed_highp> packed_highp_bvec4;
+
+ /// 4 components vector tightly packed in memory of bool values.
+ typedef vec<4, bool, packed_mediump> packed_mediump_bvec4;
+
+ /// 4 components vector tightly packed in memory of bool values.
+ typedef vec<4, bool, packed_lowp> packed_lowp_bvec4;
+
+ // -- *mat2 --
+
+ /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2;
+
+ /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2;
+
+ /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2;
+
+ /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2;
+
+ /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2;
+
+ /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2;
+
+ /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, packed_highp> packed_highp_mat2;
+
+ /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2;
+
+ /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2;
+
+ /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, packed_highp> packed_highp_dmat2;
+
+ /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2;
+
+ /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2;
+
+ // -- *mat3 --
+
+ /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3;
+
+ /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3;
+
+ /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3;
+
+ /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3;
+
+ /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3;
+
+ /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3;
+
+ /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, packed_highp> packed_highp_mat3;
+
+ /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3;
+
+ /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3;
+
+ /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, packed_highp> packed_highp_dmat3;
+
+ /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3;
+
+ /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3;
+
+ // -- *mat4 --
+
+ /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4;
+
+ /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4;
+
+ /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4;
+
+ /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4;
+
+ /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4;
+
+ /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4;
+
+ /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, packed_highp> packed_highp_mat4;
+
+ /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4;
+
+ /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4;
+
+ /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, packed_highp> packed_highp_dmat4;
+
+ /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4;
+
+ /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4;
+
+ // -- *mat2x2 --
+
+ /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2x2;
+
+ /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2x2;
+
+ /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2x2;
+
+ /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2x2;
+
+ /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2x2;
+
+ /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2x2;
+
+ /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, packed_highp> packed_highp_mat2x2;
+
+ /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2x2;
+
+ /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2x2;
+
+ /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, packed_highp> packed_highp_dmat2x2;
+
+ /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2x2;
+
+ /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2x2;
+
+ // -- *mat2x3 --
+
+ /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 3, float, aligned_highp> aligned_highp_mat2x3;
+
+ /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 3, float, aligned_mediump> aligned_mediump_mat2x3;
+
+ /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 3, float, aligned_lowp> aligned_lowp_mat2x3;
+
+ /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 3, double, aligned_highp> aligned_highp_dmat2x3;
+
+ /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 3, double, aligned_mediump> aligned_mediump_dmat2x3;
+
+ /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 3, double, aligned_lowp> aligned_lowp_dmat2x3;
+
+ /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 3, float, packed_highp> packed_highp_mat2x3;
+
+ /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 3, float, packed_mediump> packed_mediump_mat2x3;
+
+ /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 3, float, packed_lowp> packed_lowp_mat2x3;
+
+ /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 3, double, packed_highp> packed_highp_dmat2x3;
+
+ /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 3, double, packed_mediump> packed_mediump_dmat2x3;
+
+ /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 3, double, packed_lowp> packed_lowp_dmat2x3;
+
+ // -- *mat2x4 --
+
+ /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 4, float, aligned_highp> aligned_highp_mat2x4;
+
+ /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 4, float, aligned_mediump> aligned_mediump_mat2x4;
+
+ /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 4, float, aligned_lowp> aligned_lowp_mat2x4;
+
+ /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 4, double, aligned_highp> aligned_highp_dmat2x4;
+
+ /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 4, double, aligned_mediump> aligned_mediump_dmat2x4;
+
+ /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 4, double, aligned_lowp> aligned_lowp_dmat2x4;
+
+ /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 4, float, packed_highp> packed_highp_mat2x4;
+
+ /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 4, float, packed_mediump> packed_mediump_mat2x4;
+
+ /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 4, float, packed_lowp> packed_lowp_mat2x4;
+
+ /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<2, 4, double, packed_highp> packed_highp_dmat2x4;
+
+ /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<2, 4, double, packed_mediump> packed_mediump_dmat2x4;
+
+ /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<2, 4, double, packed_lowp> packed_lowp_dmat2x4;
+
+ // -- *mat3x2 --
+
+ /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 2, float, aligned_highp> aligned_highp_mat3x2;
+
+ /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 2, float, aligned_mediump> aligned_mediump_mat3x2;
+
+ /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 2, float, aligned_lowp> aligned_lowp_mat3x2;
+
+ /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 2, double, aligned_highp> aligned_highp_dmat3x2;
+
+ /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 2, double, aligned_mediump> aligned_mediump_dmat3x2;
+
+ /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 2, double, aligned_lowp> aligned_lowp_dmat3x2;
+
+ /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 2, float, packed_highp> packed_highp_mat3x2;
+
+ /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 2, float, packed_mediump> packed_mediump_mat3x2;
+
+ /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 2, float, packed_lowp> packed_lowp_mat3x2;
+
+ /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 2, double, packed_highp> packed_highp_dmat3x2;
+
+ /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 2, double, packed_mediump> packed_mediump_dmat3x2;
+
+ /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 2, double, packed_lowp> packed_lowp_dmat3x2;
+
+ // -- *mat3x3 --
+
+ /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3x3;
+
+ /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3x3;
+
+ /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3x3;
+
+ /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3x3;
+
+ /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3x3;
+
+ /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3x3;
+
+ /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, packed_highp> packed_highp_mat3x3;
+
+ /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3x3;
+
+ /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3x3;
+
+ /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, packed_highp> packed_highp_dmat3x3;
+
+ /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3x3;
+
+ /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3x3;
+
+ // -- *mat3x4 --
+
+ /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 4, float, aligned_highp> aligned_highp_mat3x4;
+
+ /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 4, float, aligned_mediump> aligned_mediump_mat3x4;
+
+ /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 4, float, aligned_lowp> aligned_lowp_mat3x4;
+
+ /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 4, double, aligned_highp> aligned_highp_dmat3x4;
+
+ /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 4, double, aligned_mediump> aligned_mediump_dmat3x4;
+
+ /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 4, double, aligned_lowp> aligned_lowp_dmat3x4;
+
+ /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 4, float, packed_highp> packed_highp_mat3x4;
+
+ /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 4, float, packed_mediump> packed_mediump_mat3x4;
+
+ /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 4, float, packed_lowp> packed_lowp_mat3x4;
+
+ /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<3, 4, double, packed_highp> packed_highp_dmat3x4;
+
+ /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<3, 4, double, packed_mediump> packed_mediump_dmat3x4;
+
+ /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<3, 4, double, packed_lowp> packed_lowp_dmat3x4;
+
+ // -- *mat4x2 --
+
+ /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 2, float, aligned_highp> aligned_highp_mat4x2;
+
+ /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 2, float, aligned_mediump> aligned_mediump_mat4x2;
+
+ /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 2, float, aligned_lowp> aligned_lowp_mat4x2;
+
+ /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 2, double, aligned_highp> aligned_highp_dmat4x2;
+
+ /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 2, double, aligned_mediump> aligned_mediump_dmat4x2;
+
+ /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 2, double, aligned_lowp> aligned_lowp_dmat4x2;
+
+ /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 2, float, packed_highp> packed_highp_mat4x2;
+
+ /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 2, float, packed_mediump> packed_mediump_mat4x2;
+
+ /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 2, float, packed_lowp> packed_lowp_mat4x2;
+
+ /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 2, double, packed_highp> packed_highp_dmat4x2;
+
+ /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 2, double, packed_mediump> packed_mediump_dmat4x2;
+
+ /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 2, double, packed_lowp> packed_lowp_dmat4x2;
+
+ // -- *mat4x3 --
+
+ /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 3, float, aligned_highp> aligned_highp_mat4x3;
+
+ /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 3, float, aligned_mediump> aligned_mediump_mat4x3;
+
+ /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 3, float, aligned_lowp> aligned_lowp_mat4x3;
+
+ /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 3, double, aligned_highp> aligned_highp_dmat4x3;
+
+ /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 3, double, aligned_mediump> aligned_mediump_dmat4x3;
+
+ /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 3, double, aligned_lowp> aligned_lowp_dmat4x3;
+
+ /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 3, float, packed_highp> packed_highp_mat4x3;
+
+ /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 3, float, packed_mediump> packed_mediump_mat4x3;
+
+ /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 3, float, packed_lowp> packed_lowp_mat4x3;
+
+ /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 3, double, packed_highp> packed_highp_dmat4x3;
+
+ /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 3, double, packed_mediump> packed_mediump_dmat4x3;
+
+ /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 3, double, packed_lowp> packed_lowp_dmat4x3;
+
+ // -- *mat4x4 --
+
+ /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4x4;
+
+ /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4x4;
+
+ /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4x4;
+
+ /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4x4;
+
+ /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4x4;
+
+ /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4x4;
+
+ /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, packed_highp> packed_highp_mat4x4;
+
+ /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4x4;
+
+ /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4x4;
+
+ /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, packed_highp> packed_highp_dmat4x4;
+
+ /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4x4;
+
+ /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs.
+ typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4x4;
+
+ // -- default --
+
+#if(defined(GLM_PRECISION_LOWP_FLOAT))
+ typedef aligned_lowp_vec1 aligned_vec1;
+ typedef aligned_lowp_vec2 aligned_vec2;
+ typedef aligned_lowp_vec3 aligned_vec3;
+ typedef aligned_lowp_vec4 aligned_vec4;
+ typedef packed_lowp_vec1 packed_vec1;
+ typedef packed_lowp_vec2 packed_vec2;
+ typedef packed_lowp_vec3 packed_vec3;
+ typedef packed_lowp_vec4 packed_vec4;
+
+ typedef aligned_lowp_mat2 aligned_mat2;
+ typedef aligned_lowp_mat3 aligned_mat3;
+ typedef aligned_lowp_mat4 aligned_mat4;
+ typedef packed_lowp_mat2 packed_mat2;
+ typedef packed_lowp_mat3 packed_mat3;
+ typedef packed_lowp_mat4 packed_mat4;
+
+ typedef aligned_lowp_mat2x2 aligned_mat2x2;
+ typedef aligned_lowp_mat2x3 aligned_mat2x3;
+ typedef aligned_lowp_mat2x4 aligned_mat2x4;
+ typedef aligned_lowp_mat3x2 aligned_mat3x2;
+ typedef aligned_lowp_mat3x3 aligned_mat3x3;
+ typedef aligned_lowp_mat3x4 aligned_mat3x4;
+ typedef aligned_lowp_mat4x2 aligned_mat4x2;
+ typedef aligned_lowp_mat4x3 aligned_mat4x3;
+ typedef aligned_lowp_mat4x4 aligned_mat4x4;
+ typedef packed_lowp_mat2x2 packed_mat2x2;
+ typedef packed_lowp_mat2x3 packed_mat2x3;
+ typedef packed_lowp_mat2x4 packed_mat2x4;
+ typedef packed_lowp_mat3x2 packed_mat3x2;
+ typedef packed_lowp_mat3x3 packed_mat3x3;
+ typedef packed_lowp_mat3x4 packed_mat3x4;
+ typedef packed_lowp_mat4x2 packed_mat4x2;
+ typedef packed_lowp_mat4x3 packed_mat4x3;
+ typedef packed_lowp_mat4x4 packed_mat4x4;
+#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT))
+ typedef aligned_mediump_vec1 aligned_vec1;
+ typedef aligned_mediump_vec2 aligned_vec2;
+ typedef aligned_mediump_vec3 aligned_vec3;
+ typedef aligned_mediump_vec4 aligned_vec4;
+ typedef packed_mediump_vec1 packed_vec1;
+ typedef packed_mediump_vec2 packed_vec2;
+ typedef packed_mediump_vec3 packed_vec3;
+ typedef packed_mediump_vec4 packed_vec4;
+
+ typedef aligned_mediump_mat2 aligned_mat2;
+ typedef aligned_mediump_mat3 aligned_mat3;
+ typedef aligned_mediump_mat4 aligned_mat4;
+ typedef packed_mediump_mat2 packed_mat2;
+ typedef packed_mediump_mat3 packed_mat3;
+ typedef packed_mediump_mat4 packed_mat4;
+
+ typedef aligned_mediump_mat2x2 aligned_mat2x2;
+ typedef aligned_mediump_mat2x3 aligned_mat2x3;
+ typedef aligned_mediump_mat2x4 aligned_mat2x4;
+ typedef aligned_mediump_mat3x2 aligned_mat3x2;
+ typedef aligned_mediump_mat3x3 aligned_mat3x3;
+ typedef aligned_mediump_mat3x4 aligned_mat3x4;
+ typedef aligned_mediump_mat4x2 aligned_mat4x2;
+ typedef aligned_mediump_mat4x3 aligned_mat4x3;
+ typedef aligned_mediump_mat4x4 aligned_mat4x4;
+ typedef packed_mediump_mat2x2 packed_mat2x2;
+ typedef packed_mediump_mat2x3 packed_mat2x3;
+ typedef packed_mediump_mat2x4 packed_mat2x4;
+ typedef packed_mediump_mat3x2 packed_mat3x2;
+ typedef packed_mediump_mat3x3 packed_mat3x3;
+ typedef packed_mediump_mat3x4 packed_mat3x4;
+ typedef packed_mediump_mat4x2 packed_mat4x2;
+ typedef packed_mediump_mat4x3 packed_mat4x3;
+ typedef packed_mediump_mat4x4 packed_mat4x4;
+#else //defined(GLM_PRECISION_HIGHP_FLOAT)
+ /// 1 component vector aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_vec1 aligned_vec1;
+
+ /// 2 components vector aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_vec2 aligned_vec2;
+
+ /// 3 components vector aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_vec3 aligned_vec3;
+
+ /// 4 components vector aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_vec4 aligned_vec4;
+
+ /// 1 component vector tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_vec1 packed_vec1;
+
+ /// 2 components vector tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_vec2 packed_vec2;
+
+ /// 3 components vector tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_vec3 packed_vec3;
+
+ /// 4 components vector tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_vec4 packed_vec4;
+
+ /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat2 aligned_mat2;
+
+ /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat3 aligned_mat3;
+
+ /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat4 aligned_mat4;
+
+ /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat2 packed_mat2;
+
+ /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat3 packed_mat3;
+
+ /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat4 packed_mat4;
+
+ /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat2x2 aligned_mat2x2;
+
+ /// 2 by 3 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat2x3 aligned_mat2x3;
+
+ /// 2 by 4 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat2x4 aligned_mat2x4;
+
+ /// 3 by 2 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat3x2 aligned_mat3x2;
+
+ /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat3x3 aligned_mat3x3;
+
+ /// 3 by 4 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat3x4 aligned_mat3x4;
+
+ /// 4 by 2 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat4x2 aligned_mat4x2;
+
+ /// 4 by 3 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat4x3 aligned_mat4x3;
+
+ /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers.
+ typedef aligned_highp_mat4x4 aligned_mat4x4;
+
+ /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat2x2 packed_mat2x2;
+
+ /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat2x3 packed_mat2x3;
+
+ /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat2x4 packed_mat2x4;
+
+ /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat3x2 packed_mat3x2;
+
+ /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat3x3 packed_mat3x3;
+
+ /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat3x4 packed_mat3x4;
+
+ /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat4x2 packed_mat4x2;
+
+ /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat4x3 packed_mat4x3;
+
+ /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers.
+ typedef packed_highp_mat4x4 packed_mat4x4;
+#endif//GLM_PRECISION
+
+#if(defined(GLM_PRECISION_LOWP_DOUBLE))
+ typedef aligned_lowp_dvec1 aligned_dvec1;
+ typedef aligned_lowp_dvec2 aligned_dvec2;
+ typedef aligned_lowp_dvec3 aligned_dvec3;
+ typedef aligned_lowp_dvec4 aligned_dvec4;
+ typedef packed_lowp_dvec1 packed_dvec1;
+ typedef packed_lowp_dvec2 packed_dvec2;
+ typedef packed_lowp_dvec3 packed_dvec3;
+ typedef packed_lowp_dvec4 packed_dvec4;
+
+ typedef aligned_lowp_dmat2 aligned_dmat2;
+ typedef aligned_lowp_dmat3 aligned_dmat3;
+ typedef aligned_lowp_dmat4 aligned_dmat4;
+ typedef packed_lowp_dmat2 packed_dmat2;
+ typedef packed_lowp_dmat3 packed_dmat3;
+ typedef packed_lowp_dmat4 packed_dmat4;
+
+ typedef aligned_lowp_dmat2x2 aligned_dmat2x2;
+ typedef aligned_lowp_dmat2x3 aligned_dmat2x3;
+ typedef aligned_lowp_dmat2x4 aligned_dmat2x4;
+ typedef aligned_lowp_dmat3x2 aligned_dmat3x2;
+ typedef aligned_lowp_dmat3x3 aligned_dmat3x3;
+ typedef aligned_lowp_dmat3x4 aligned_dmat3x4;
+ typedef aligned_lowp_dmat4x2 aligned_dmat4x2;
+ typedef aligned_lowp_dmat4x3 aligned_dmat4x3;
+ typedef aligned_lowp_dmat4x4 aligned_dmat4x4;
+ typedef packed_lowp_dmat2x2 packed_dmat2x2;
+ typedef packed_lowp_dmat2x3 packed_dmat2x3;
+ typedef packed_lowp_dmat2x4 packed_dmat2x4;
+ typedef packed_lowp_dmat3x2 packed_dmat3x2;
+ typedef packed_lowp_dmat3x3 packed_dmat3x3;
+ typedef packed_lowp_dmat3x4 packed_dmat3x4;
+ typedef packed_lowp_dmat4x2 packed_dmat4x2;
+ typedef packed_lowp_dmat4x3 packed_dmat4x3;
+ typedef packed_lowp_dmat4x4 packed_dmat4x4;
+#elif(defined(GLM_PRECISION_MEDIUMP_DOUBLE))
+ typedef aligned_mediump_dvec1 aligned_dvec1;
+ typedef aligned_mediump_dvec2 aligned_dvec2;
+ typedef aligned_mediump_dvec3 aligned_dvec3;
+ typedef aligned_mediump_dvec4 aligned_dvec4;
+ typedef packed_mediump_dvec1 packed_dvec1;
+ typedef packed_mediump_dvec2 packed_dvec2;
+ typedef packed_mediump_dvec3 packed_dvec3;
+ typedef packed_mediump_dvec4 packed_dvec4;
+
+ typedef aligned_mediump_dmat2 aligned_dmat2;
+ typedef aligned_mediump_dmat3 aligned_dmat3;
+ typedef aligned_mediump_dmat4 aligned_dmat4;
+ typedef packed_mediump_dmat2 packed_dmat2;
+ typedef packed_mediump_dmat3 packed_dmat3;
+ typedef packed_mediump_dmat4 packed_dmat4;
+
+ typedef aligned_mediump_dmat2x2 aligned_dmat2x2;
+ typedef aligned_mediump_dmat2x3 aligned_dmat2x3;
+ typedef aligned_mediump_dmat2x4 aligned_dmat2x4;
+ typedef aligned_mediump_dmat3x2 aligned_dmat3x2;
+ typedef aligned_mediump_dmat3x3 aligned_dmat3x3;
+ typedef aligned_mediump_dmat3x4 aligned_dmat3x4;
+ typedef aligned_mediump_dmat4x2 aligned_dmat4x2;
+ typedef aligned_mediump_dmat4x3 aligned_dmat4x3;
+ typedef aligned_mediump_dmat4x4 aligned_dmat4x4;
+ typedef packed_mediump_dmat2x2 packed_dmat2x2;
+ typedef packed_mediump_dmat2x3 packed_dmat2x3;
+ typedef packed_mediump_dmat2x4 packed_dmat2x4;
+ typedef packed_mediump_dmat3x2 packed_dmat3x2;
+ typedef packed_mediump_dmat3x3 packed_dmat3x3;
+ typedef packed_mediump_dmat3x4 packed_dmat3x4;
+ typedef packed_mediump_dmat4x2 packed_dmat4x2;
+ typedef packed_mediump_dmat4x3 packed_dmat4x3;
+ typedef packed_mediump_dmat4x4 packed_dmat4x4;
+#else //defined(GLM_PRECISION_HIGHP_DOUBLE)
+ /// 1 component vector aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dvec1 aligned_dvec1;
+
+ /// 2 components vector aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dvec2 aligned_dvec2;
+
+ /// 3 components vector aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dvec3 aligned_dvec3;
+
+ /// 4 components vector aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dvec4 aligned_dvec4;
+
+ /// 1 component vector tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dvec1 packed_dvec1;
+
+ /// 2 components vector tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dvec2 packed_dvec2;
+
+ /// 3 components vector tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dvec3 packed_dvec3;
+
+ /// 4 components vector tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dvec4 packed_dvec4;
+
+ /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat2 aligned_dmat2;
+
+ /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat3 aligned_dmat3;
+
+ /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat4 aligned_dmat4;
+
+ /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat2 packed_dmat2;
+
+ /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat3 packed_dmat3;
+
+ /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat4 packed_dmat4;
+
+ /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat2x2 aligned_dmat2x2;
+
+ /// 2 by 3 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat2x3 aligned_dmat2x3;
+
+ /// 2 by 4 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat2x4 aligned_dmat2x4;
+
+ /// 3 by 2 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat3x2 aligned_dmat3x2;
+
+ /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat3x3 aligned_dmat3x3;
+
+ /// 3 by 4 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat3x4 aligned_dmat3x4;
+
+ /// 4 by 2 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat4x2 aligned_dmat4x2;
+
+ /// 4 by 3 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat4x3 aligned_dmat4x3;
+
+ /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers.
+ typedef aligned_highp_dmat4x4 aligned_dmat4x4;
+
+ /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat2x2 packed_dmat2x2;
+
+ /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat2x3 packed_dmat2x3;
+
+ /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat2x4 packed_dmat2x4;
+
+ /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat3x2 packed_dmat3x2;
+
+ /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat3x3 packed_dmat3x3;
+
+ /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat3x4 packed_dmat3x4;
+
+ /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat4x2 packed_dmat4x2;
+
+ /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat4x3 packed_dmat4x3;
+
+ /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers.
+ typedef packed_highp_dmat4x4 packed_dmat4x4;
+#endif//GLM_PRECISION
+
+#if(defined(GLM_PRECISION_LOWP_INT))
+ typedef aligned_lowp_ivec1 aligned_ivec1;
+ typedef aligned_lowp_ivec2 aligned_ivec2;
+ typedef aligned_lowp_ivec3 aligned_ivec3;
+ typedef aligned_lowp_ivec4 aligned_ivec4;
+#elif(defined(GLM_PRECISION_MEDIUMP_INT))
+ typedef aligned_mediump_ivec1 aligned_ivec1;
+ typedef aligned_mediump_ivec2 aligned_ivec2;
+ typedef aligned_mediump_ivec3 aligned_ivec3;
+ typedef aligned_mediump_ivec4 aligned_ivec4;
+#else //defined(GLM_PRECISION_HIGHP_INT)
+ /// 1 component vector aligned in memory of signed integer numbers.
+ typedef aligned_highp_ivec1 aligned_ivec1;
+
+ /// 2 components vector aligned in memory of signed integer numbers.
+ typedef aligned_highp_ivec2 aligned_ivec2;
+
+ /// 3 components vector aligned in memory of signed integer numbers.
+ typedef aligned_highp_ivec3 aligned_ivec3;
+
+ /// 4 components vector aligned in memory of signed integer numbers.
+ typedef aligned_highp_ivec4 aligned_ivec4;
+
+ /// 1 component vector tightly packed in memory of signed integer numbers.
+ typedef packed_highp_ivec1 packed_ivec1;
+
+ /// 2 components vector tightly packed in memory of signed integer numbers.
+ typedef packed_highp_ivec2 packed_ivec2;
+
+ /// 3 components vector tightly packed in memory of signed integer numbers.
+ typedef packed_highp_ivec3 packed_ivec3;
+
+ /// 4 components vector tightly packed in memory of signed integer numbers.
+ typedef packed_highp_ivec4 packed_ivec4;
+#endif//GLM_PRECISION
+
+ // -- Unsigned integer definition --
+
+#if(defined(GLM_PRECISION_LOWP_UINT))
+ typedef aligned_lowp_uvec1 aligned_uvec1;
+ typedef aligned_lowp_uvec2 aligned_uvec2;
+ typedef aligned_lowp_uvec3 aligned_uvec3;
+ typedef aligned_lowp_uvec4 aligned_uvec4;
+#elif(defined(GLM_PRECISION_MEDIUMP_UINT))
+ typedef aligned_mediump_uvec1 aligned_uvec1;
+ typedef aligned_mediump_uvec2 aligned_uvec2;
+ typedef aligned_mediump_uvec3 aligned_uvec3;
+ typedef aligned_mediump_uvec4 aligned_uvec4;
+#else //defined(GLM_PRECISION_HIGHP_UINT)
+ /// 1 component vector aligned in memory of unsigned integer numbers.
+ typedef aligned_highp_uvec1 aligned_uvec1;
+
+ /// 2 components vector aligned in memory of unsigned integer numbers.
+ typedef aligned_highp_uvec2 aligned_uvec2;
+
+ /// 3 components vector aligned in memory of unsigned integer numbers.
+ typedef aligned_highp_uvec3 aligned_uvec3;
+
+ /// 4 components vector aligned in memory of unsigned integer numbers.
+ typedef aligned_highp_uvec4 aligned_uvec4;
+
+ /// 1 component vector tightly packed in memory of unsigned integer numbers.
+ typedef packed_highp_uvec1 packed_uvec1;
+
+ /// 2 components vector tightly packed in memory of unsigned integer numbers.
+ typedef packed_highp_uvec2 packed_uvec2;
+
+ /// 3 components vector tightly packed in memory of unsigned integer numbers.
+ typedef packed_highp_uvec3 packed_uvec3;
+
+ /// 4 components vector tightly packed in memory of unsigned integer numbers.
+ typedef packed_highp_uvec4 packed_uvec4;
+#endif//GLM_PRECISION
+
+#if(defined(GLM_PRECISION_LOWP_BOOL))
+ typedef aligned_lowp_bvec1 aligned_bvec1;
+ typedef aligned_lowp_bvec2 aligned_bvec2;
+ typedef aligned_lowp_bvec3 aligned_bvec3;
+ typedef aligned_lowp_bvec4 aligned_bvec4;
+#elif(defined(GLM_PRECISION_MEDIUMP_BOOL))
+ typedef aligned_mediump_bvec1 aligned_bvec1;
+ typedef aligned_mediump_bvec2 aligned_bvec2;
+ typedef aligned_mediump_bvec3 aligned_bvec3;
+ typedef aligned_mediump_bvec4 aligned_bvec4;
+#else //defined(GLM_PRECISION_HIGHP_BOOL)
+ /// 1 component vector aligned in memory of bool values.
+ typedef aligned_highp_bvec1 aligned_bvec1;
+
+ /// 2 components vector aligned in memory of bool values.
+ typedef aligned_highp_bvec2 aligned_bvec2;
+
+ /// 3 components vector aligned in memory of bool values.
+ typedef aligned_highp_bvec3 aligned_bvec3;
+
+ /// 4 components vector aligned in memory of bool values.
+ typedef aligned_highp_bvec4 aligned_bvec4;
+
+ /// 1 components vector tightly packed in memory of bool values.
+ typedef packed_highp_bvec1 packed_bvec1;
+
+ /// 2 components vector tightly packed in memory of bool values.
+ typedef packed_highp_bvec2 packed_bvec2;
+
+ /// 3 components vector tightly packed in memory of bool values.
+ typedef packed_highp_bvec3 packed_bvec3;
+
+ /// 4 components vector tightly packed in memory of bool values.
+ typedef packed_highp_bvec4 packed_bvec4;
+#endif//GLM_PRECISION
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtc/type_precision.hpp b/3rdparty/glm/source/glm/gtc/type_precision.hpp
new file mode 100644
index 0000000..775e2f4
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/type_precision.hpp
@@ -0,0 +1,2094 @@
+/// @ref gtc_type_precision
+/// @file glm/gtc/type_precision.hpp
+///
+/// @see core (dependence)
+/// @see gtc_quaternion (dependence)
+///
+/// @defgroup gtc_type_precision GLM_GTC_type_precision
+/// @ingroup gtc
+///
+/// Include <glm/gtc/type_precision.hpp> to use the features of this extension.
+///
+/// Defines specific C++-based qualifier types.
+
+#pragma once
+
+// Dependency:
+#include "../gtc/quaternion.hpp"
+#include "../gtc/vec1.hpp"
+#include "../ext/vector_int1_sized.hpp"
+#include "../ext/vector_int2_sized.hpp"
+#include "../ext/vector_int3_sized.hpp"
+#include "../ext/vector_int4_sized.hpp"
+#include "../ext/scalar_int_sized.hpp"
+#include "../ext/vector_uint1_sized.hpp"
+#include "../ext/vector_uint2_sized.hpp"
+#include "../ext/vector_uint3_sized.hpp"
+#include "../ext/vector_uint4_sized.hpp"
+#include "../ext/scalar_uint_sized.hpp"
+#include "../detail/type_vec2.hpp"
+#include "../detail/type_vec3.hpp"
+#include "../detail/type_vec4.hpp"
+#include "../detail/type_mat2x2.hpp"
+#include "../detail/type_mat2x3.hpp"
+#include "../detail/type_mat2x4.hpp"
+#include "../detail/type_mat3x2.hpp"
+#include "../detail/type_mat3x3.hpp"
+#include "../detail/type_mat3x4.hpp"
+#include "../detail/type_mat4x2.hpp"
+#include "../detail/type_mat4x3.hpp"
+#include "../detail/type_mat4x4.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_type_precision extension included")
+#endif
+
+namespace glm
+{
+ ///////////////////////////
+ // Signed int vector types
+
+ /// @addtogroup gtc_type_precision
+ /// @{
+
+ /// Low qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 lowp_int8;
+
+ /// Low qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 lowp_int16;
+
+ /// Low qualifier 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 lowp_int32;
+
+ /// Low qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 lowp_int64;
+
+ /// Low qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 lowp_int8_t;
+
+ /// Low qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 lowp_int16_t;
+
+ /// Low qualifier 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 lowp_int32_t;
+
+ /// Low qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 lowp_int64_t;
+
+ /// Low qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 lowp_i8;
+
+ /// Low qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 lowp_i16;
+
+ /// Low qualifier 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 lowp_i32;
+
+ /// Low qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 lowp_i64;
+
+ /// Medium qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 mediump_int8;
+
+ /// Medium qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 mediump_int16;
+
+ /// Medium qualifier 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 mediump_int32;
+
+ /// Medium qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 mediump_int64;
+
+ /// Medium qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 mediump_int8_t;
+
+ /// Medium qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 mediump_int16_t;
+
+ /// Medium qualifier 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 mediump_int32_t;
+
+ /// Medium qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 mediump_int64_t;
+
+ /// Medium qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 mediump_i8;
+
+ /// Medium qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 mediump_i16;
+
+ /// Medium qualifier 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 mediump_i32;
+
+ /// Medium qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 mediump_i64;
+
+ /// High qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 highp_int8;
+
+ /// High qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 highp_int16;
+
+ /// High qualifier 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 highp_int32;
+
+ /// High qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 highp_int64;
+
+ /// High qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 highp_int8_t;
+
+ /// High qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 highp_int16_t;
+
+ /// 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 highp_int32_t;
+
+ /// High qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 highp_int64_t;
+
+ /// High qualifier 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 highp_i8;
+
+ /// High qualifier 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 highp_i16;
+
+ /// High qualifier 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 highp_i32;
+
+ /// High qualifier 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 highp_i64;
+
+
+#if GLM_HAS_EXTENDED_INTEGER_TYPE
+ using std::int8_t;
+ using std::int16_t;
+ using std::int32_t;
+ using std::int64_t;
+#else
+ /// 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 int8_t;
+
+ /// 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 int16_t;
+
+ /// 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 int32_t;
+
+ /// 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 int64_t;
+#endif
+
+ /// 8 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int8 i8;
+
+ /// 16 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int16 i16;
+
+ /// 32 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int32 i32;
+
+ /// 64 bit signed integer type.
+ /// @see gtc_type_precision
+ typedef detail::int64 i64;
+
+ /////////////////////////////
+ // Unsigned int vector types
+
+ /// Low qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 lowp_uint8;
+
+ /// Low qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 lowp_uint16;
+
+ /// Low qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 lowp_uint32;
+
+ /// Low qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 lowp_uint64;
+
+ /// Low qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 lowp_uint8_t;
+
+ /// Low qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 lowp_uint16_t;
+
+ /// Low qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 lowp_uint32_t;
+
+ /// Low qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 lowp_uint64_t;
+
+ /// Low qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 lowp_u8;
+
+ /// Low qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 lowp_u16;
+
+ /// Low qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 lowp_u32;
+
+ /// Low qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 lowp_u64;
+
+ /// Medium qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 mediump_uint8;
+
+ /// Medium qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 mediump_uint16;
+
+ /// Medium qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 mediump_uint32;
+
+ /// Medium qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 mediump_uint64;
+
+ /// Medium qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 mediump_uint8_t;
+
+ /// Medium qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 mediump_uint16_t;
+
+ /// Medium qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 mediump_uint32_t;
+
+ /// Medium qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 mediump_uint64_t;
+
+ /// Medium qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 mediump_u8;
+
+ /// Medium qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 mediump_u16;
+
+ /// Medium qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 mediump_u32;
+
+ /// Medium qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 mediump_u64;
+
+ /// High qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 highp_uint8;
+
+ /// High qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 highp_uint16;
+
+ /// High qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 highp_uint32;
+
+ /// High qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 highp_uint64;
+
+ /// High qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 highp_uint8_t;
+
+ /// High qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 highp_uint16_t;
+
+ /// High qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 highp_uint32_t;
+
+ /// High qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 highp_uint64_t;
+
+ /// High qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 highp_u8;
+
+ /// High qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 highp_u16;
+
+ /// High qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 highp_u32;
+
+ /// High qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 highp_u64;
+
+#if GLM_HAS_EXTENDED_INTEGER_TYPE
+ using std::uint8_t;
+ using std::uint16_t;
+ using std::uint32_t;
+ using std::uint64_t;
+#else
+ /// Default qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 uint8_t;
+
+ /// Default qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 uint16_t;
+
+ /// Default qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 uint32_t;
+
+ /// Default qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 uint64_t;
+#endif
+
+ /// Default qualifier 8 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint8 u8;
+
+ /// Default qualifier 16 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint16 u16;
+
+ /// Default qualifier 32 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint32 u32;
+
+ /// Default qualifier 64 bit unsigned integer type.
+ /// @see gtc_type_precision
+ typedef detail::uint64 u64;
+
+
+
+
+
+ //////////////////////
+ // Float vector types
+
+ /// Single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float float32;
+
+ /// Double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef double float64;
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_float32;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_float64;
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_float32_t;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_float64_t;
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_f32;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_f64;
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_float32;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_float64;
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_float32_t;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_float64_t;
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_f32;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_f64;
+
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_float32;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_float64;
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_float32_t;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_float64_t;
+
+ /// Low 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 lowp_f32;
+
+ /// Low 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 lowp_f64;
+
+
+ /// Medium 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 mediump_float32;
+
+ /// Medium 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 mediump_float64;
+
+ /// Medium 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 mediump_float32_t;
+
+ /// Medium 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 mediump_float64_t;
+
+ /// Medium 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 mediump_f32;
+
+ /// Medium 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 mediump_f64;
+
+
+ /// High 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 highp_float32;
+
+ /// High 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 highp_float64;
+
+ /// High 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 highp_float32_t;
+
+ /// High 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 highp_float64_t;
+
+ /// High 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 highp_f32;
+
+ /// High 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 highp_f64;
+
+
+#if(defined(GLM_PRECISION_LOWP_FLOAT))
+ /// Default 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef lowp_float32_t float32_t;
+
+ /// Default 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef lowp_float64_t float64_t;
+
+ /// Default 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef lowp_f32 f32;
+
+ /// Default 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef lowp_f64 f64;
+
+#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT))
+ /// Default 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef mediump_float32 float32_t;
+
+ /// Default 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef mediump_float64 float64_t;
+
+ /// Default 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef mediump_float32 f32;
+
+ /// Default 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef mediump_float64 f64;
+
+#else//(defined(GLM_PRECISION_HIGHP_FLOAT))
+
+ /// Default 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef highp_float32_t float32_t;
+
+ /// Default 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef highp_float64_t float64_t;
+
+ /// Default 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef highp_float32_t f32;
+
+ /// Default 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef highp_float64_t f64;
+#endif
+
+
+ /// Low single-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, float, lowp> lowp_fvec1;
+
+ /// Low single-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, float, lowp> lowp_fvec2;
+
+ /// Low single-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, float, lowp> lowp_fvec3;
+
+ /// Low single-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, float, lowp> lowp_fvec4;
+
+
+ /// Medium single-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, float, mediump> mediump_fvec1;
+
+ /// Medium Single-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, float, mediump> mediump_fvec2;
+
+ /// Medium Single-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, float, mediump> mediump_fvec3;
+
+ /// Medium Single-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, float, mediump> mediump_fvec4;
+
+
+ /// High single-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, float, highp> highp_fvec1;
+
+ /// High Single-qualifier floating-point vector of 2 components.
+ /// @see core_precision
+ typedef vec<2, float, highp> highp_fvec2;
+
+ /// High Single-qualifier floating-point vector of 3 components.
+ /// @see core_precision
+ typedef vec<3, float, highp> highp_fvec3;
+
+ /// High Single-qualifier floating-point vector of 4 components.
+ /// @see core_precision
+ typedef vec<4, float, highp> highp_fvec4;
+
+
+ /// Low single-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, f32, lowp> lowp_f32vec1;
+
+ /// Low single-qualifier floating-point vector of 2 components.
+ /// @see core_precision
+ typedef vec<2, f32, lowp> lowp_f32vec2;
+
+ /// Low single-qualifier floating-point vector of 3 components.
+ /// @see core_precision
+ typedef vec<3, f32, lowp> lowp_f32vec3;
+
+ /// Low single-qualifier floating-point vector of 4 components.
+ /// @see core_precision
+ typedef vec<4, f32, lowp> lowp_f32vec4;
+
+ /// Medium single-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, f32, mediump> mediump_f32vec1;
+
+ /// Medium single-qualifier floating-point vector of 2 components.
+ /// @see core_precision
+ typedef vec<2, f32, mediump> mediump_f32vec2;
+
+ /// Medium single-qualifier floating-point vector of 3 components.
+ /// @see core_precision
+ typedef vec<3, f32, mediump> mediump_f32vec3;
+
+ /// Medium single-qualifier floating-point vector of 4 components.
+ /// @see core_precision
+ typedef vec<4, f32, mediump> mediump_f32vec4;
+
+ /// High single-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, f32, highp> highp_f32vec1;
+
+ /// High single-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, f32, highp> highp_f32vec2;
+
+ /// High single-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, f32, highp> highp_f32vec3;
+
+ /// High single-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, f32, highp> highp_f32vec4;
+
+
+ /// Low double-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, f64, lowp> lowp_f64vec1;
+
+ /// Low double-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, f64, lowp> lowp_f64vec2;
+
+ /// Low double-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, f64, lowp> lowp_f64vec3;
+
+ /// Low double-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, f64, lowp> lowp_f64vec4;
+
+ /// Medium double-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, f64, mediump> mediump_f64vec1;
+
+ /// Medium double-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, f64, mediump> mediump_f64vec2;
+
+ /// Medium double-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, f64, mediump> mediump_f64vec3;
+
+ /// Medium double-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, f64, mediump> mediump_f64vec4;
+
+ /// High double-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, f64, highp> highp_f64vec1;
+
+ /// High double-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, f64, highp> highp_f64vec2;
+
+ /// High double-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, f64, highp> highp_f64vec3;
+
+ /// High double-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, f64, highp> highp_f64vec4;
+
+
+
+ //////////////////////
+ // Float matrix types
+
+ /// Low single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef lowp_f32 lowp_fmat1x1;
+
+ /// Low single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, lowp> lowp_fmat2x2;
+
+ /// Low single-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f32, lowp> lowp_fmat2x3;
+
+ /// Low single-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f32, lowp> lowp_fmat2x4;
+
+ /// Low single-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f32, lowp> lowp_fmat3x2;
+
+ /// Low single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, lowp> lowp_fmat3x3;
+
+ /// Low single-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f32, lowp> lowp_fmat3x4;
+
+ /// Low single-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f32, lowp> lowp_fmat4x2;
+
+ /// Low single-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f32, lowp> lowp_fmat4x3;
+
+ /// Low single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, lowp> lowp_fmat4x4;
+
+ /// Low single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef lowp_fmat1x1 lowp_fmat1;
+
+ /// Low single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_fmat2x2 lowp_fmat2;
+
+ /// Low single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_fmat3x3 lowp_fmat3;
+
+ /// Low single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_fmat4x4 lowp_fmat4;
+
+
+ /// Medium single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef mediump_f32 mediump_fmat1x1;
+
+ /// Medium single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, mediump> mediump_fmat2x2;
+
+ /// Medium single-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f32, mediump> mediump_fmat2x3;
+
+ /// Medium single-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f32, mediump> mediump_fmat2x4;
+
+ /// Medium single-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f32, mediump> mediump_fmat3x2;
+
+ /// Medium single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, mediump> mediump_fmat3x3;
+
+ /// Medium single-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f32, mediump> mediump_fmat3x4;
+
+ /// Medium single-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f32, mediump> mediump_fmat4x2;
+
+ /// Medium single-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f32, mediump> mediump_fmat4x3;
+
+ /// Medium single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, mediump> mediump_fmat4x4;
+
+ /// Medium single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef mediump_fmat1x1 mediump_fmat1;
+
+ /// Medium single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_fmat2x2 mediump_fmat2;
+
+ /// Medium single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_fmat3x3 mediump_fmat3;
+
+ /// Medium single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_fmat4x4 mediump_fmat4;
+
+
+ /// High single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef highp_f32 highp_fmat1x1;
+
+ /// High single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, highp> highp_fmat2x2;
+
+ /// High single-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f32, highp> highp_fmat2x3;
+
+ /// High single-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f32, highp> highp_fmat2x4;
+
+ /// High single-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f32, highp> highp_fmat3x2;
+
+ /// High single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, highp> highp_fmat3x3;
+
+ /// High single-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f32, highp> highp_fmat3x4;
+
+ /// High single-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f32, highp> highp_fmat4x2;
+
+ /// High single-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f32, highp> highp_fmat4x3;
+
+ /// High single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, highp> highp_fmat4x4;
+
+ /// High single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef highp_fmat1x1 highp_fmat1;
+
+ /// High single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef highp_fmat2x2 highp_fmat2;
+
+ /// High single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef highp_fmat3x3 highp_fmat3;
+
+ /// High single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef highp_fmat4x4 highp_fmat4;
+
+
+ /// Low single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f32 lowp_f32mat1x1;
+
+ /// Low single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, lowp> lowp_f32mat2x2;
+
+ /// Low single-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f32, lowp> lowp_f32mat2x3;
+
+ /// Low single-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f32, lowp> lowp_f32mat2x4;
+
+ /// Low single-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f32, lowp> lowp_f32mat3x2;
+
+ /// Low single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, lowp> lowp_f32mat3x3;
+
+ /// Low single-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f32, lowp> lowp_f32mat3x4;
+
+ /// Low single-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f32, lowp> lowp_f32mat4x2;
+
+ /// Low single-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f32, lowp> lowp_f32mat4x3;
+
+ /// Low single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, lowp> lowp_f32mat4x4;
+
+ /// Low single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef detail::tmat1x1<f32, lowp> lowp_f32mat1;
+
+ /// Low single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_f32mat2x2 lowp_f32mat2;
+
+ /// Low single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_f32mat3x3 lowp_f32mat3;
+
+ /// Low single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_f32mat4x4 lowp_f32mat4;
+
+
+ /// High single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f32 mediump_f32mat1x1;
+
+ /// Low single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, mediump> mediump_f32mat2x2;
+
+ /// Medium single-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f32, mediump> mediump_f32mat2x3;
+
+ /// Medium single-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f32, mediump> mediump_f32mat2x4;
+
+ /// Medium single-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f32, mediump> mediump_f32mat3x2;
+
+ /// Medium single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, mediump> mediump_f32mat3x3;
+
+ /// Medium single-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f32, mediump> mediump_f32mat3x4;
+
+ /// Medium single-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f32, mediump> mediump_f32mat4x2;
+
+ /// Medium single-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f32, mediump> mediump_f32mat4x3;
+
+ /// Medium single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, mediump> mediump_f32mat4x4;
+
+ /// Medium single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef detail::tmat1x1<f32, mediump> f32mat1;
+
+ /// Medium single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_f32mat2x2 mediump_f32mat2;
+
+ /// Medium single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_f32mat3x3 mediump_f32mat3;
+
+ /// Medium single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_f32mat4x4 mediump_f32mat4;
+
+
+ /// High single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f32 highp_f32mat1x1;
+
+ /// High single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, highp> highp_f32mat2x2;
+
+ /// High single-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f32, highp> highp_f32mat2x3;
+
+ /// High single-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f32, highp> highp_f32mat2x4;
+
+ /// High single-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f32, highp> highp_f32mat3x2;
+
+ /// High single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, highp> highp_f32mat3x3;
+
+ /// High single-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f32, highp> highp_f32mat3x4;
+
+ /// High single-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f32, highp> highp_f32mat4x2;
+
+ /// High single-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f32, highp> highp_f32mat4x3;
+
+ /// High single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, highp> highp_f32mat4x4;
+
+ /// High single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef detail::tmat1x1<f32, highp> f32mat1;
+
+ /// High single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef highp_f32mat2x2 highp_f32mat2;
+
+ /// High single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef highp_f32mat3x3 highp_f32mat3;
+
+ /// High single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef highp_f32mat4x4 highp_f32mat4;
+
+
+ /// Low double-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f64 lowp_f64mat1x1;
+
+ /// Low double-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f64, lowp> lowp_f64mat2x2;
+
+ /// Low double-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f64, lowp> lowp_f64mat2x3;
+
+ /// Low double-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f64, lowp> lowp_f64mat2x4;
+
+ /// Low double-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f64, lowp> lowp_f64mat3x2;
+
+ /// Low double-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f64, lowp> lowp_f64mat3x3;
+
+ /// Low double-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f64, lowp> lowp_f64mat3x4;
+
+ /// Low double-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f64, lowp> lowp_f64mat4x2;
+
+ /// Low double-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f64, lowp> lowp_f64mat4x3;
+
+ /// Low double-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f64, lowp> lowp_f64mat4x4;
+
+ /// Low double-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef lowp_f64mat1x1 lowp_f64mat1;
+
+ /// Low double-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_f64mat2x2 lowp_f64mat2;
+
+ /// Low double-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_f64mat3x3 lowp_f64mat3;
+
+ /// Low double-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef lowp_f64mat4x4 lowp_f64mat4;
+
+
+ /// Medium double-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f64 Highp_f64mat1x1;
+
+ /// Medium double-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f64, mediump> mediump_f64mat2x2;
+
+ /// Medium double-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f64, mediump> mediump_f64mat2x3;
+
+ /// Medium double-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f64, mediump> mediump_f64mat2x4;
+
+ /// Medium double-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f64, mediump> mediump_f64mat3x2;
+
+ /// Medium double-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f64, mediump> mediump_f64mat3x3;
+
+ /// Medium double-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f64, mediump> mediump_f64mat3x4;
+
+ /// Medium double-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f64, mediump> mediump_f64mat4x2;
+
+ /// Medium double-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f64, mediump> mediump_f64mat4x3;
+
+ /// Medium double-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f64, mediump> mediump_f64mat4x4;
+
+ /// Medium double-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef mediump_f64mat1x1 mediump_f64mat1;
+
+ /// Medium double-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_f64mat2x2 mediump_f64mat2;
+
+ /// Medium double-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_f64mat3x3 mediump_f64mat3;
+
+ /// Medium double-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mediump_f64mat4x4 mediump_f64mat4;
+
+ /// High double-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f64 highp_f64mat1x1;
+
+ /// High double-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f64, highp> highp_f64mat2x2;
+
+ /// High double-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f64, highp> highp_f64mat2x3;
+
+ /// High double-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f64, highp> highp_f64mat2x4;
+
+ /// High double-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f64, highp> highp_f64mat3x2;
+
+ /// High double-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f64, highp> highp_f64mat3x3;
+
+ /// High double-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f64, highp> highp_f64mat3x4;
+
+ /// High double-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f64, highp> highp_f64mat4x2;
+
+ /// High double-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f64, highp> highp_f64mat4x3;
+
+ /// High double-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f64, highp> highp_f64mat4x4;
+
+ /// High double-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef highp_f64mat1x1 highp_f64mat1;
+
+ /// High double-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef highp_f64mat2x2 highp_f64mat2;
+
+ /// High double-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef highp_f64mat3x3 highp_f64mat3;
+
+ /// High double-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef highp_f64mat4x4 highp_f64mat4;
+
+
+ /////////////////////////////
+ // Signed int vector types
+
+ /// Low qualifier signed integer vector of 1 component type.
+ /// @see gtc_type_precision
+ typedef vec<1, int, lowp> lowp_ivec1;
+
+ /// Low qualifier signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, int, lowp> lowp_ivec2;
+
+ /// Low qualifier signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, int, lowp> lowp_ivec3;
+
+ /// Low qualifier signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, int, lowp> lowp_ivec4;
+
+
+ /// Medium qualifier signed integer vector of 1 component type.
+ /// @see gtc_type_precision
+ typedef vec<1, int, mediump> mediump_ivec1;
+
+ /// Medium qualifier signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, int, mediump> mediump_ivec2;
+
+ /// Medium qualifier signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, int, mediump> mediump_ivec3;
+
+ /// Medium qualifier signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, int, mediump> mediump_ivec4;
+
+
+ /// High qualifier signed integer vector of 1 component type.
+ /// @see gtc_type_precision
+ typedef vec<1, int, highp> highp_ivec1;
+
+ /// High qualifier signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, int, highp> highp_ivec2;
+
+ /// High qualifier signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, int, highp> highp_ivec3;
+
+ /// High qualifier signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, int, highp> highp_ivec4;
+
+
+ /// Low qualifier 8 bit signed integer vector of 1 component type.
+ /// @see gtc_type_precision
+ typedef vec<1, i8, lowp> lowp_i8vec1;
+
+ /// Low qualifier 8 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i8, lowp> lowp_i8vec2;
+
+ /// Low qualifier 8 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i8, lowp> lowp_i8vec3;
+
+ /// Low qualifier 8 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i8, lowp> lowp_i8vec4;
+
+
+ /// Medium qualifier 8 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i8, mediump> mediump_i8vec1;
+
+ /// Medium qualifier 8 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i8, mediump> mediump_i8vec2;
+
+ /// Medium qualifier 8 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i8, mediump> mediump_i8vec3;
+
+ /// Medium qualifier 8 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i8, mediump> mediump_i8vec4;
+
+
+ /// High qualifier 8 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i8, highp> highp_i8vec1;
+
+ /// High qualifier 8 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i8, highp> highp_i8vec2;
+
+ /// High qualifier 8 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i8, highp> highp_i8vec3;
+
+ /// High qualifier 8 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i8, highp> highp_i8vec4;
+
+
+ /// Low qualifier 16 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i16, lowp> lowp_i16vec1;
+
+ /// Low qualifier 16 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i16, lowp> lowp_i16vec2;
+
+ /// Low qualifier 16 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i16, lowp> lowp_i16vec3;
+
+ /// Low qualifier 16 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i16, lowp> lowp_i16vec4;
+
+
+ /// Medium qualifier 16 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i16, mediump> mediump_i16vec1;
+
+ /// Medium qualifier 16 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i16, mediump> mediump_i16vec2;
+
+ /// Medium qualifier 16 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i16, mediump> mediump_i16vec3;
+
+ /// Medium qualifier 16 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i16, mediump> mediump_i16vec4;
+
+
+ /// High qualifier 16 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i16, highp> highp_i16vec1;
+
+ /// High qualifier 16 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i16, highp> highp_i16vec2;
+
+ /// High qualifier 16 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i16, highp> highp_i16vec3;
+
+ /// High qualifier 16 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i16, highp> highp_i16vec4;
+
+
+ /// Low qualifier 32 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i32, lowp> lowp_i32vec1;
+
+ /// Low qualifier 32 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i32, lowp> lowp_i32vec2;
+
+ /// Low qualifier 32 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i32, lowp> lowp_i32vec3;
+
+ /// Low qualifier 32 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i32, lowp> lowp_i32vec4;
+
+
+ /// Medium qualifier 32 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i32, mediump> mediump_i32vec1;
+
+ /// Medium qualifier 32 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i32, mediump> mediump_i32vec2;
+
+ /// Medium qualifier 32 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i32, mediump> mediump_i32vec3;
+
+ /// Medium qualifier 32 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i32, mediump> mediump_i32vec4;
+
+
+ /// High qualifier 32 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i32, highp> highp_i32vec1;
+
+ /// High qualifier 32 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i32, highp> highp_i32vec2;
+
+ /// High qualifier 32 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i32, highp> highp_i32vec3;
+
+ /// High qualifier 32 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i32, highp> highp_i32vec4;
+
+
+ /// Low qualifier 64 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i64, lowp> lowp_i64vec1;
+
+ /// Low qualifier 64 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i64, lowp> lowp_i64vec2;
+
+ /// Low qualifier 64 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i64, lowp> lowp_i64vec3;
+
+ /// Low qualifier 64 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i64, lowp> lowp_i64vec4;
+
+
+ /// Medium qualifier 64 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i64, mediump> mediump_i64vec1;
+
+ /// Medium qualifier 64 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i64, mediump> mediump_i64vec2;
+
+ /// Medium qualifier 64 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i64, mediump> mediump_i64vec3;
+
+ /// Medium qualifier 64 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i64, mediump> mediump_i64vec4;
+
+
+ /// High qualifier 64 bit signed integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, i64, highp> highp_i64vec1;
+
+ /// High qualifier 64 bit signed integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, i64, highp> highp_i64vec2;
+
+ /// High qualifier 64 bit signed integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, i64, highp> highp_i64vec3;
+
+ /// High qualifier 64 bit signed integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, i64, highp> highp_i64vec4;
+
+
+ /////////////////////////////
+ // Unsigned int vector types
+
+ /// Low qualifier unsigned integer vector of 1 component type.
+ /// @see gtc_type_precision
+ typedef vec<1, uint, lowp> lowp_uvec1;
+
+ /// Low qualifier unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, uint, lowp> lowp_uvec2;
+
+ /// Low qualifier unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, uint, lowp> lowp_uvec3;
+
+ /// Low qualifier unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, uint, lowp> lowp_uvec4;
+
+
+ /// Medium qualifier unsigned integer vector of 1 component type.
+ /// @see gtc_type_precision
+ typedef vec<1, uint, mediump> mediump_uvec1;
+
+ /// Medium qualifier unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, uint, mediump> mediump_uvec2;
+
+ /// Medium qualifier unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, uint, mediump> mediump_uvec3;
+
+ /// Medium qualifier unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, uint, mediump> mediump_uvec4;
+
+
+ /// High qualifier unsigned integer vector of 1 component type.
+ /// @see gtc_type_precision
+ typedef vec<1, uint, highp> highp_uvec1;
+
+ /// High qualifier unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, uint, highp> highp_uvec2;
+
+ /// High qualifier unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, uint, highp> highp_uvec3;
+
+ /// High qualifier unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, uint, highp> highp_uvec4;
+
+
+ /// Low qualifier 8 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u8, lowp> lowp_u8vec1;
+
+ /// Low qualifier 8 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u8, lowp> lowp_u8vec2;
+
+ /// Low qualifier 8 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u8, lowp> lowp_u8vec3;
+
+ /// Low qualifier 8 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u8, lowp> lowp_u8vec4;
+
+
+ /// Medium qualifier 8 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u8, mediump> mediump_u8vec1;
+
+ /// Medium qualifier 8 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u8, mediump> mediump_u8vec2;
+
+ /// Medium qualifier 8 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u8, mediump> mediump_u8vec3;
+
+ /// Medium qualifier 8 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u8, mediump> mediump_u8vec4;
+
+
+ /// High qualifier 8 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u8, highp> highp_u8vec1;
+
+ /// High qualifier 8 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u8, highp> highp_u8vec2;
+
+ /// High qualifier 8 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u8, highp> highp_u8vec3;
+
+ /// High qualifier 8 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u8, highp> highp_u8vec4;
+
+
+ /// Low qualifier 16 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u16, lowp> lowp_u16vec1;
+
+ /// Low qualifier 16 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u16, lowp> lowp_u16vec2;
+
+ /// Low qualifier 16 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u16, lowp> lowp_u16vec3;
+
+ /// Low qualifier 16 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u16, lowp> lowp_u16vec4;
+
+
+ /// Medium qualifier 16 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u16, mediump> mediump_u16vec1;
+
+ /// Medium qualifier 16 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u16, mediump> mediump_u16vec2;
+
+ /// Medium qualifier 16 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u16, mediump> mediump_u16vec3;
+
+ /// Medium qualifier 16 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u16, mediump> mediump_u16vec4;
+
+
+ /// High qualifier 16 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u16, highp> highp_u16vec1;
+
+ /// High qualifier 16 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u16, highp> highp_u16vec2;
+
+ /// High qualifier 16 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u16, highp> highp_u16vec3;
+
+ /// High qualifier 16 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u16, highp> highp_u16vec4;
+
+
+ /// Low qualifier 32 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u32, lowp> lowp_u32vec1;
+
+ /// Low qualifier 32 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u32, lowp> lowp_u32vec2;
+
+ /// Low qualifier 32 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u32, lowp> lowp_u32vec3;
+
+ /// Low qualifier 32 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u32, lowp> lowp_u32vec4;
+
+
+ /// Medium qualifier 32 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u32, mediump> mediump_u32vec1;
+
+ /// Medium qualifier 32 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u32, mediump> mediump_u32vec2;
+
+ /// Medium qualifier 32 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u32, mediump> mediump_u32vec3;
+
+ /// Medium qualifier 32 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u32, mediump> mediump_u32vec4;
+
+
+ /// High qualifier 32 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u32, highp> highp_u32vec1;
+
+ /// High qualifier 32 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u32, highp> highp_u32vec2;
+
+ /// High qualifier 32 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u32, highp> highp_u32vec3;
+
+ /// High qualifier 32 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u32, highp> highp_u32vec4;
+
+
+ /// Low qualifier 64 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u64, lowp> lowp_u64vec1;
+
+ /// Low qualifier 64 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u64, lowp> lowp_u64vec2;
+
+ /// Low qualifier 64 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u64, lowp> lowp_u64vec3;
+
+ /// Low qualifier 64 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u64, lowp> lowp_u64vec4;
+
+
+ /// Medium qualifier 64 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u64, mediump> mediump_u64vec1;
+
+ /// Medium qualifier 64 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u64, mediump> mediump_u64vec2;
+
+ /// Medium qualifier 64 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u64, mediump> mediump_u64vec3;
+
+ /// Medium qualifier 64 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u64, mediump> mediump_u64vec4;
+
+
+ /// High qualifier 64 bit unsigned integer scalar type.
+ /// @see gtc_type_precision
+ typedef vec<1, u64, highp> highp_u64vec1;
+
+ /// High qualifier 64 bit unsigned integer vector of 2 components type.
+ /// @see gtc_type_precision
+ typedef vec<2, u64, highp> highp_u64vec2;
+
+ /// High qualifier 64 bit unsigned integer vector of 3 components type.
+ /// @see gtc_type_precision
+ typedef vec<3, u64, highp> highp_u64vec3;
+
+ /// High qualifier 64 bit unsigned integer vector of 4 components type.
+ /// @see gtc_type_precision
+ typedef vec<4, u64, highp> highp_u64vec4;
+
+
+ //////////////////////
+ // Float vector types
+
+ /// 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 float32_t;
+
+ /// 32 bit single-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float32 f32;
+
+# ifndef GLM_FORCE_SINGLE_ONLY
+
+ /// 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 float64_t;
+
+ /// 64 bit double-qualifier floating-point scalar.
+ /// @see gtc_type_precision
+ typedef float64 f64;
+# endif//GLM_FORCE_SINGLE_ONLY
+
+ /// Single-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, float, defaultp> fvec1;
+
+ /// Single-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, float, defaultp> fvec2;
+
+ /// Single-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, float, defaultp> fvec3;
+
+ /// Single-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, float, defaultp> fvec4;
+
+
+ /// Single-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, f32, defaultp> f32vec1;
+
+ /// Single-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, f32, defaultp> f32vec2;
+
+ /// Single-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, f32, defaultp> f32vec3;
+
+ /// Single-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, f32, defaultp> f32vec4;
+
+# ifndef GLM_FORCE_SINGLE_ONLY
+ /// Double-qualifier floating-point vector of 1 component.
+ /// @see gtc_type_precision
+ typedef vec<1, f64, defaultp> f64vec1;
+
+ /// Double-qualifier floating-point vector of 2 components.
+ /// @see gtc_type_precision
+ typedef vec<2, f64, defaultp> f64vec2;
+
+ /// Double-qualifier floating-point vector of 3 components.
+ /// @see gtc_type_precision
+ typedef vec<3, f64, defaultp> f64vec3;
+
+ /// Double-qualifier floating-point vector of 4 components.
+ /// @see gtc_type_precision
+ typedef vec<4, f64, defaultp> f64vec4;
+# endif//GLM_FORCE_SINGLE_ONLY
+
+
+ //////////////////////
+ // Float matrix types
+
+ /// Single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef detail::tmat1x1<f32> fmat1;
+
+ /// Single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, defaultp> fmat2;
+
+ /// Single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, defaultp> fmat3;
+
+ /// Single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, defaultp> fmat4;
+
+
+ /// Single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f32 fmat1x1;
+
+ /// Single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, defaultp> fmat2x2;
+
+ /// Single-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f32, defaultp> fmat2x3;
+
+ /// Single-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f32, defaultp> fmat2x4;
+
+ /// Single-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f32, defaultp> fmat3x2;
+
+ /// Single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, defaultp> fmat3x3;
+
+ /// Single-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f32, defaultp> fmat3x4;
+
+ /// Single-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f32, defaultp> fmat4x2;
+
+ /// Single-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f32, defaultp> fmat4x3;
+
+ /// Single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, defaultp> fmat4x4;
+
+
+ /// Single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef detail::tmat1x1<f32, defaultp> f32mat1;
+
+ /// Single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, defaultp> f32mat2;
+
+ /// Single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, defaultp> f32mat3;
+
+ /// Single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, defaultp> f32mat4;
+
+
+ /// Single-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f32 f32mat1x1;
+
+ /// Single-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f32, defaultp> f32mat2x2;
+
+ /// Single-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f32, defaultp> f32mat2x3;
+
+ /// Single-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f32, defaultp> f32mat2x4;
+
+ /// Single-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f32, defaultp> f32mat3x2;
+
+ /// Single-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f32, defaultp> f32mat3x3;
+
+ /// Single-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f32, defaultp> f32mat3x4;
+
+ /// Single-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f32, defaultp> f32mat4x2;
+
+ /// Single-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f32, defaultp> f32mat4x3;
+
+ /// Single-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f32, defaultp> f32mat4x4;
+
+
+# ifndef GLM_FORCE_SINGLE_ONLY
+
+ /// Double-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef detail::tmat1x1<f64, defaultp> f64mat1;
+
+ /// Double-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f64, defaultp> f64mat2;
+
+ /// Double-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f64, defaultp> f64mat3;
+
+ /// Double-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f64, defaultp> f64mat4;
+
+
+ /// Double-qualifier floating-point 1x1 matrix.
+ /// @see gtc_type_precision
+ //typedef f64 f64mat1x1;
+
+ /// Double-qualifier floating-point 2x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 2, f64, defaultp> f64mat2x2;
+
+ /// Double-qualifier floating-point 2x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 3, f64, defaultp> f64mat2x3;
+
+ /// Double-qualifier floating-point 2x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<2, 4, f64, defaultp> f64mat2x4;
+
+ /// Double-qualifier floating-point 3x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 2, f64, defaultp> f64mat3x2;
+
+ /// Double-qualifier floating-point 3x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 3, f64, defaultp> f64mat3x3;
+
+ /// Double-qualifier floating-point 3x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<3, 4, f64, defaultp> f64mat3x4;
+
+ /// Double-qualifier floating-point 4x2 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 2, f64, defaultp> f64mat4x2;
+
+ /// Double-qualifier floating-point 4x3 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 3, f64, defaultp> f64mat4x3;
+
+ /// Double-qualifier floating-point 4x4 matrix.
+ /// @see gtc_type_precision
+ typedef mat<4, 4, f64, defaultp> f64mat4x4;
+
+# endif//GLM_FORCE_SINGLE_ONLY
+
+ //////////////////////////
+ // Quaternion types
+
+ /// Single-qualifier floating-point quaternion.
+ /// @see gtc_type_precision
+ typedef qua<f32, defaultp> f32quat;
+
+ /// Low single-qualifier floating-point quaternion.
+ /// @see gtc_type_precision
+ typedef qua<f32, lowp> lowp_f32quat;
+
+ /// Low double-qualifier floating-point quaternion.
+ /// @see gtc_type_precision
+ typedef qua<f64, lowp> lowp_f64quat;
+
+ /// Medium single-qualifier floating-point quaternion.
+ /// @see gtc_type_precision
+ typedef qua<f32, mediump> mediump_f32quat;
+
+# ifndef GLM_FORCE_SINGLE_ONLY
+
+ /// Medium double-qualifier floating-point quaternion.
+ /// @see gtc_type_precision
+ typedef qua<f64, mediump> mediump_f64quat;
+
+ /// High single-qualifier floating-point quaternion.
+ /// @see gtc_type_precision
+ typedef qua<f32, highp> highp_f32quat;
+
+ /// High double-qualifier floating-point quaternion.
+ /// @see gtc_type_precision
+ typedef qua<f64, highp> highp_f64quat;
+
+ /// Double-qualifier floating-point quaternion.
+ /// @see gtc_type_precision
+ typedef qua<f64, defaultp> f64quat;
+
+# endif//GLM_FORCE_SINGLE_ONLY
+
+ /// @}
+}//namespace glm
+
+#include "type_precision.inl"
diff --git a/3rdparty/glm/source/glm/gtc/type_precision.inl b/3rdparty/glm/source/glm/gtc/type_precision.inl
new file mode 100644
index 0000000..ae80912
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/type_precision.inl
@@ -0,0 +1,6 @@
+/// @ref gtc_precision
+
+namespace glm
+{
+
+}
diff --git a/3rdparty/glm/source/glm/gtc/type_ptr.hpp b/3rdparty/glm/source/glm/gtc/type_ptr.hpp
new file mode 100644
index 0000000..d7e625a
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/type_ptr.hpp
@@ -0,0 +1,230 @@
+/// @ref gtc_type_ptr
+/// @file glm/gtc/type_ptr.hpp
+///
+/// @see core (dependence)
+/// @see gtc_quaternion (dependence)
+///
+/// @defgroup gtc_type_ptr GLM_GTC_type_ptr
+/// @ingroup gtc
+///
+/// Include <glm/gtc/type_ptr.hpp> to use the features of this extension.
+///
+/// Handles the interaction between pointers and vector, matrix types.
+///
+/// This extension defines an overloaded function, glm::value_ptr. It returns
+/// a pointer to the memory layout of the object. Matrix types store their values
+/// in column-major order.
+///
+/// This is useful for uploading data to matrices or copying data to buffer objects.
+///
+/// Example:
+/// @code
+/// #include <glm/glm.hpp>
+/// #include <glm/gtc/type_ptr.hpp>
+///
+/// glm::vec3 aVector(3);
+/// glm::mat4 someMatrix(1.0);
+///
+/// glUniform3fv(uniformLoc, 1, glm::value_ptr(aVector));
+/// glUniformMatrix4fv(uniformMatrixLoc, 1, GL_FALSE, glm::value_ptr(someMatrix));
+/// @endcode
+///
+/// <glm/gtc/type_ptr.hpp> need to be included to use the features of this extension.
+
+#pragma once
+
+// Dependency:
+#include "../gtc/quaternion.hpp"
+#include "../gtc/vec1.hpp"
+#include "../vec2.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+#include "../mat2x2.hpp"
+#include "../mat2x3.hpp"
+#include "../mat2x4.hpp"
+#include "../mat3x2.hpp"
+#include "../mat3x3.hpp"
+#include "../mat3x4.hpp"
+#include "../mat4x2.hpp"
+#include "../mat4x3.hpp"
+#include "../mat4x4.hpp"
+#include <cstring>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_type_ptr extension included")
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtc_type_ptr
+ /// @{
+
+ /// Return the constant address to the data of the input parameter.
+ /// @see gtc_type_ptr
+ template<typename genType>
+ GLM_FUNC_DECL typename genType::value_type const * value_ptr(genType const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<1, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<2, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<3, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<4, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<1, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<2, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<3, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<4, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<1, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<2, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<3, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<4, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<1, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<2, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<3, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<4, T, Q> const& v);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL vec<2, T, defaultp> make_vec2(T const * const ptr);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL vec<3, T, defaultp> make_vec3(T const * const ptr);
+
+ /// Build a vector from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL vec<4, T, defaultp> make_vec4(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2x2(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<2, 3, T, defaultp> make_mat2x3(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<2, 4, T, defaultp> make_mat2x4(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<3, 2, T, defaultp> make_mat3x2(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3x3(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<3, 4, T, defaultp> make_mat3x4(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 2, T, defaultp> make_mat4x2(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 3, T, defaultp> make_mat4x3(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4x4(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3(T const * const ptr);
+
+ /// Build a matrix from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4(T const * const ptr);
+
+ /// Build a quaternion from a pointer.
+ /// @see gtc_type_ptr
+ template<typename T>
+ GLM_FUNC_DECL qua<T, defaultp> make_quat(T const * const ptr);
+
+ /// @}
+}//namespace glm
+
+#include "type_ptr.inl"
diff --git a/3rdparty/glm/source/glm/gtc/type_ptr.inl b/3rdparty/glm/source/glm/gtc/type_ptr.inl
new file mode 100644
index 0000000..26b20b5
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/type_ptr.inl
@@ -0,0 +1,386 @@
+/// @ref gtc_type_ptr
+
+#include <cstring>
+
+namespace glm
+{
+ /// @addtogroup gtc_type_ptr
+ /// @{
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(vec<2, T, Q> const& v)
+ {
+ return &(v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(vec<2, T, Q>& v)
+ {
+ return &(v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const * value_ptr(vec<3, T, Q> const& v)
+ {
+ return &(v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(vec<3, T, Q>& v)
+ {
+ return &(v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(vec<4, T, Q> const& v)
+ {
+ return &(v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(vec<4, T, Q>& v)
+ {
+ return &(v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 2, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 2, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 3, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 3, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 4, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 4, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 3, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 3, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 2, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 2, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 4, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 4, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 2, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 2, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 4, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 4, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 3, T, Q> const& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T * value_ptr(mat<4, 3, T, Q>& m)
+ {
+ return &(m[0].x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T const * value_ptr(qua<T, Q> const& q)
+ {
+ return &(q[0]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T* value_ptr(qua<T, Q>& q)
+ {
+ return &(q[0]);
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<1, T, Q> const& v)
+ {
+ return v;
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<2, T, Q> const& v)
+ {
+ return vec<1, T, Q>(v);
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<3, T, Q> const& v)
+ {
+ return vec<1, T, Q>(v);
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<4, T, Q> const& v)
+ {
+ return vec<1, T, Q>(v);
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<1, T, Q> const& v)
+ {
+ return vec<2, T, Q>(v.x, static_cast<T>(0));
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<2, T, Q> const& v)
+ {
+ return v;
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<3, T, Q> const& v)
+ {
+ return vec<2, T, Q>(v);
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<4, T, Q> const& v)
+ {
+ return vec<2, T, Q>(v);
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<1, T, Q> const& v)
+ {
+ return vec<3, T, Q>(v.x, static_cast<T>(0), static_cast<T>(0));
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<2, T, Q> const& v)
+ {
+ return vec<3, T, Q>(v.x, v.y, static_cast<T>(0));
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<3, T, Q> const& v)
+ {
+ return v;
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<4, T, Q> const& v)
+ {
+ return vec<3, T, Q>(v);
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<1, T, Q> const& v)
+ {
+ return vec<4, T, Q>(v.x, static_cast<T>(0), static_cast<T>(0), static_cast<T>(1));
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<2, T, Q> const& v)
+ {
+ return vec<4, T, Q>(v.x, v.y, static_cast<T>(0), static_cast<T>(1));
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<3, T, Q> const& v)
+ {
+ return vec<4, T, Q>(v.x, v.y, v.z, static_cast<T>(1));
+ }
+
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<4, T, Q> const& v)
+ {
+ return v;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER vec<2, T, defaultp> make_vec2(T const *const ptr)
+ {
+ vec<2, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(vec<2, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER vec<3, T, defaultp> make_vec3(T const *const ptr)
+ {
+ vec<3, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(vec<3, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER vec<4, T, defaultp> make_vec4(T const *const ptr)
+ {
+ vec<4, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(vec<4, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2x2(T const *const ptr)
+ {
+ mat<2, 2, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<2, 2, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, defaultp> make_mat2x3(T const *const ptr)
+ {
+ mat<2, 3, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<2, 3, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, defaultp> make_mat2x4(T const *const ptr)
+ {
+ mat<2, 4, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<2, 4, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, defaultp> make_mat3x2(T const *const ptr)
+ {
+ mat<3, 2, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<3, 2, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3x3(T const *const ptr)
+ {
+ mat<3, 3, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<3, 3, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, defaultp> make_mat3x4(T const *const ptr)
+ {
+ mat<3, 4, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<3, 4, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, defaultp> make_mat4x2(T const *const ptr)
+ {
+ mat<4, 2, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<4, 2, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, defaultp> make_mat4x3(T const *const ptr)
+ {
+ mat<4, 3, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<4, 3, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4x4(T const *const ptr)
+ {
+ mat<4, 4, T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(mat<4, 4, T, defaultp>));
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2(T const *const ptr)
+ {
+ return make_mat2x2(ptr);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3(T const *const ptr)
+ {
+ return make_mat3x3(ptr);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4(T const *const ptr)
+ {
+ return make_mat4x4(ptr);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER qua<T, defaultp> make_quat(T const *const ptr)
+ {
+ qua<T, defaultp> Result;
+ memcpy(value_ptr(Result), ptr, sizeof(qua<T, defaultp>));
+ return Result;
+ }
+
+ /// @}
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/gtc/ulp.hpp b/3rdparty/glm/source/glm/gtc/ulp.hpp
new file mode 100644
index 0000000..0d80a75
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/ulp.hpp
@@ -0,0 +1,152 @@
+/// @ref gtc_ulp
+/// @file glm/gtc/ulp.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_ulp GLM_GTC_ulp
+/// @ingroup gtc
+///
+/// Include <glm/gtc/ulp.hpp> to use the features of this extension.
+///
+/// Allow the measurement of the accuracy of a function against a reference
+/// implementation. This extension works on floating-point data and provide results
+/// in ULP.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../detail/_vectorize.hpp"
+#include "../ext/scalar_int_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_ulp extension included")
+#endif
+
+namespace glm
+{
+ /// Return the next ULP value(s) after the input value(s).
+ ///
+ /// @tparam genType A floating-point scalar type.
+ ///
+ /// @see gtc_ulp
+ template<typename genType>
+ GLM_FUNC_DECL genType next_float(genType x);
+
+ /// Return the previous ULP value(s) before the input value(s).
+ ///
+ /// @tparam genType A floating-point scalar type.
+ ///
+ /// @see gtc_ulp
+ template<typename genType>
+ GLM_FUNC_DECL genType prev_float(genType x);
+
+ /// Return the value(s) ULP distance after the input value(s).
+ ///
+ /// @tparam genType A floating-point scalar type.
+ ///
+ /// @see gtc_ulp
+ template<typename genType>
+ GLM_FUNC_DECL genType next_float(genType x, int ULPs);
+
+ /// Return the value(s) ULP distance before the input value(s).
+ ///
+ /// @tparam genType A floating-point scalar type.
+ ///
+ /// @see gtc_ulp
+ template<typename genType>
+ GLM_FUNC_DECL genType prev_float(genType x, int ULPs);
+
+ /// Return the distance in the number of ULP between 2 single-precision floating-point scalars.
+ ///
+ /// @see gtc_ulp
+ GLM_FUNC_DECL int float_distance(float x, float y);
+
+ /// Return the distance in the number of ULP between 2 double-precision floating-point scalars.
+ ///
+ /// @see gtc_ulp
+ GLM_FUNC_DECL int64 float_distance(double x, double y);
+
+ /// Return the next ULP value(s) after the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> next_float(vec<L, T, Q> const& x);
+
+ /// Return the value(s) ULP distance after the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> next_float(vec<L, T, Q> const& x, int ULPs);
+
+ /// Return the value(s) ULP distance after the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> next_float(vec<L, T, Q> const& x, vec<L, int, Q> const& ULPs);
+
+ /// Return the previous ULP value(s) before the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prev_float(vec<L, T, Q> const& x);
+
+ /// Return the value(s) ULP distance before the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prev_float(vec<L, T, Q> const& x, int ULPs);
+
+ /// Return the value(s) ULP distance before the input value(s).
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> prev_float(vec<L, T, Q> const& x, vec<L, int, Q> const& ULPs);
+
+ /// Return the distance in the number of ULP between 2 single-precision floating-point scalars.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int, Q> float_distance(vec<L, float, Q> const& x, vec<L, float, Q> const& y);
+
+ /// Return the distance in the number of ULP between 2 double-precision floating-point scalars.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see gtc_ulp
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int64, Q> float_distance(vec<L, double, Q> const& x, vec<L, double, Q> const& y);
+
+ /// @}
+}//namespace glm
+
+#include "ulp.inl"
diff --git a/3rdparty/glm/source/glm/gtc/ulp.inl b/3rdparty/glm/source/glm/gtc/ulp.inl
new file mode 100644
index 0000000..4ecbd3f
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/ulp.inl
@@ -0,0 +1,173 @@
+/// @ref gtc_ulp
+
+#include "../ext/scalar_ulp.hpp"
+
+namespace glm
+{
+ template<>
+ GLM_FUNC_QUALIFIER float next_float(float x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::nextafter(x, std::numeric_limits<float>::max());
+# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)))
+ return detail::nextafterf(x, FLT_MAX);
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+ return __builtin_nextafterf(x, FLT_MAX);
+# else
+ return nextafterf(x, FLT_MAX);
+# endif
+ }
+
+ template<>
+ GLM_FUNC_QUALIFIER double next_float(double x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::nextafter(x, std::numeric_limits<double>::max());
+# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)))
+ return detail::nextafter(x, std::numeric_limits<double>::max());
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+ return __builtin_nextafter(x, DBL_MAX);
+# else
+ return nextafter(x, DBL_MAX);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T next_float(T x, int ULPs)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'next_float' only accept floating-point input");
+ assert(ULPs >= 0);
+
+ T temp = x;
+ for (int i = 0; i < ULPs; ++i)
+ temp = next_float(temp);
+ return temp;
+ }
+
+ GLM_FUNC_QUALIFIER float prev_float(float x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::nextafter(x, std::numeric_limits<float>::min());
+# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)))
+ return detail::nextafterf(x, FLT_MIN);
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+ return __builtin_nextafterf(x, FLT_MIN);
+# else
+ return nextafterf(x, FLT_MIN);
+# endif
+ }
+
+ GLM_FUNC_QUALIFIER double prev_float(double x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::nextafter(x, std::numeric_limits<double>::min());
+# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)))
+ return _nextafter(x, DBL_MIN);
+# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+ return __builtin_nextafter(x, DBL_MIN);
+# else
+ return nextafter(x, DBL_MIN);
+# endif
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T prev_float(T x, int ULPs)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'prev_float' only accept floating-point input");
+ assert(ULPs >= 0);
+
+ T temp = x;
+ for (int i = 0; i < ULPs; ++i)
+ temp = prev_float(temp);
+ return temp;
+ }
+
+ GLM_FUNC_QUALIFIER int float_distance(float x, float y)
+ {
+ detail::float_t<float> const a(x);
+ detail::float_t<float> const b(y);
+
+ return abs(a.i - b.i);
+ }
+
+ GLM_FUNC_QUALIFIER int64 float_distance(double x, double y)
+ {
+ detail::float_t<double> const a(x);
+ detail::float_t<double> const b(y);
+
+ return abs(a.i - b.i);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> next_float(vec<L, T, Q> const& x)
+ {
+ vec<L, T, Q> Result;
+ for (length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = next_float(x[i]);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> next_float(vec<L, T, Q> const& x, int ULPs)
+ {
+ vec<L, T, Q> Result;
+ for (length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = next_float(x[i], ULPs);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> next_float(vec<L, T, Q> const& x, vec<L, int, Q> const& ULPs)
+ {
+ vec<L, T, Q> Result;
+ for (length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = next_float(x[i], ULPs[i]);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prev_float(vec<L, T, Q> const& x)
+ {
+ vec<L, T, Q> Result;
+ for (length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = prev_float(x[i]);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prev_float(vec<L, T, Q> const& x, int ULPs)
+ {
+ vec<L, T, Q> Result;
+ for (length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = prev_float(x[i], ULPs);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> prev_float(vec<L, T, Q> const& x, vec<L, int, Q> const& ULPs)
+ {
+ vec<L, T, Q> Result;
+ for (length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = prev_float(x[i], ULPs[i]);
+ return Result;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int, Q> float_distance(vec<L, float, Q> const& x, vec<L, float, Q> const& y)
+ {
+ vec<L, int, Q> Result;
+ for (length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = float_distance(x[i], y[i]);
+ return Result;
+ }
+
+ template<length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, int64, Q> float_distance(vec<L, double, Q> const& x, vec<L, double, Q> const& y)
+ {
+ vec<L, int64, Q> Result;
+ for (length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = float_distance(x[i], y[i]);
+ return Result;
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/gtc/vec1.hpp b/3rdparty/glm/source/glm/gtc/vec1.hpp
new file mode 100644
index 0000000..63697a2
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtc/vec1.hpp
@@ -0,0 +1,30 @@
+/// @ref gtc_vec1
+/// @file glm/gtc/vec1.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtc_vec1 GLM_GTC_vec1
+/// @ingroup gtc
+///
+/// Include <glm/gtc/vec1.hpp> to use the features of this extension.
+///
+/// Add vec1, ivec1, uvec1 and bvec1 types.
+
+#pragma once
+
+// Dependency:
+#include "../ext/vector_bool1.hpp"
+#include "../ext/vector_bool1_precision.hpp"
+#include "../ext/vector_float1.hpp"
+#include "../ext/vector_float1_precision.hpp"
+#include "../ext/vector_double1.hpp"
+#include "../ext/vector_double1_precision.hpp"
+#include "../ext/vector_int1.hpp"
+#include "../ext/vector_int1_sized.hpp"
+#include "../ext/vector_uint1.hpp"
+#include "../ext/vector_uint1_sized.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# pragma message("GLM: GLM_GTC_vec1 extension included")
+#endif
+
diff --git a/3rdparty/glm/source/glm/gtx/associated_min_max.hpp b/3rdparty/glm/source/glm/gtx/associated_min_max.hpp
new file mode 100644
index 0000000..d1a41c0
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/associated_min_max.hpp
@@ -0,0 +1,207 @@
+/// @ref gtx_associated_min_max
+/// @file glm/gtx/associated_min_max.hpp
+///
+/// @see core (dependence)
+/// @see gtx_extented_min_max (dependence)
+///
+/// @defgroup gtx_associated_min_max GLM_GTX_associated_min_max
+/// @ingroup gtx
+///
+/// Include <glm/gtx/associated_min_max.hpp> to use the features of this extension.
+///
+/// @brief Min and max functions that return associated values not the compared onces.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_associated_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_associated_min_max extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_associated_min_max
+ /// @{
+
+ /// Minimum comparison between 2 variables and returns 2 associated variable values
+ /// @see gtx_associated_min_max
+ template<typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL U associatedMin(T x, U a, T y, U b);
+
+ /// Minimum comparison between 2 variables and returns 2 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<2, U, Q> associatedMin(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b);
+
+ /// Minimum comparison between 2 variables and returns 2 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+ T x, const vec<L, U, Q>& a,
+ T y, const vec<L, U, Q>& b);
+
+ /// Minimum comparison between 2 variables and returns 2 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b);
+
+ /// Minimum comparison between 3 variables and returns 3 associated variable values
+ /// @see gtx_associated_min_max
+ template<typename T, typename U>
+ GLM_FUNC_DECL U associatedMin(
+ T x, U a,
+ T y, U b,
+ T z, U c);
+
+ /// Minimum comparison between 3 variables and returns 3 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+ vec<L, T, Q> const& z, vec<L, U, Q> const& c);
+
+ /// Minimum comparison between 4 variables and returns 4 associated variable values
+ /// @see gtx_associated_min_max
+ template<typename T, typename U>
+ GLM_FUNC_DECL U associatedMin(
+ T x, U a,
+ T y, U b,
+ T z, U c,
+ T w, U d);
+
+ /// Minimum comparison between 4 variables and returns 4 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+ vec<L, T, Q> const& z, vec<L, U, Q> const& c,
+ vec<L, T, Q> const& w, vec<L, U, Q> const& d);
+
+ /// Minimum comparison between 4 variables and returns 4 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+ T x, vec<L, U, Q> const& a,
+ T y, vec<L, U, Q> const& b,
+ T z, vec<L, U, Q> const& c,
+ T w, vec<L, U, Q> const& d);
+
+ /// Minimum comparison between 4 variables and returns 4 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b,
+ vec<L, T, Q> const& z, U c,
+ vec<L, T, Q> const& w, U d);
+
+ /// Maximum comparison between 2 variables and returns 2 associated variable values
+ /// @see gtx_associated_min_max
+ template<typename T, typename U>
+ GLM_FUNC_DECL U associatedMax(T x, U a, T y, U b);
+
+ /// Maximum comparison between 2 variables and returns 2 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<2, U, Q> associatedMax(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b);
+
+ /// Maximum comparison between 2 variables and returns 2 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> associatedMax(
+ T x, vec<L, U, Q> const& a,
+ T y, vec<L, U, Q> const& b);
+
+ /// Maximum comparison between 2 variables and returns 2 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b);
+
+ /// Maximum comparison between 3 variables and returns 3 associated variable values
+ /// @see gtx_associated_min_max
+ template<typename T, typename U>
+ GLM_FUNC_DECL U associatedMax(
+ T x, U a,
+ T y, U b,
+ T z, U c);
+
+ /// Maximum comparison between 3 variables and returns 3 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+ vec<L, T, Q> const& z, vec<L, U, Q> const& c);
+
+ /// Maximum comparison between 3 variables and returns 3 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> associatedMax(
+ T x, vec<L, U, Q> const& a,
+ T y, vec<L, U, Q> const& b,
+ T z, vec<L, U, Q> const& c);
+
+ /// Maximum comparison between 3 variables and returns 3 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b,
+ vec<L, T, Q> const& z, U c);
+
+ /// Maximum comparison between 4 variables and returns 4 associated variable values
+ /// @see gtx_associated_min_max
+ template<typename T, typename U>
+ GLM_FUNC_DECL U associatedMax(
+ T x, U a,
+ T y, U b,
+ T z, U c,
+ T w, U d);
+
+ /// Maximum comparison between 4 variables and returns 4 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+ vec<L, T, Q> const& z, vec<L, U, Q> const& c,
+ vec<L, T, Q> const& w, vec<L, U, Q> const& d);
+
+ /// Maximum comparison between 4 variables and returns 4 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+ T x, vec<L, U, Q> const& a,
+ T y, vec<L, U, Q> const& b,
+ T z, vec<L, U, Q> const& c,
+ T w, vec<L, U, Q> const& d);
+
+ /// Maximum comparison between 4 variables and returns 4 associated variable values
+ /// @see gtx_associated_min_max
+ template<length_t L, typename T, typename U, qualifier Q>
+ GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b,
+ vec<L, T, Q> const& z, U c,
+ vec<L, T, Q> const& w, U d);
+
+ /// @}
+} //namespace glm
+
+#include "associated_min_max.inl"
diff --git a/3rdparty/glm/source/glm/gtx/associated_min_max.inl b/3rdparty/glm/source/glm/gtx/associated_min_max.inl
new file mode 100644
index 0000000..5186c47
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/associated_min_max.inl
@@ -0,0 +1,354 @@
+/// @ref gtx_associated_min_max
+
+namespace glm{
+
+// Min comparison between 2 variables
+template<typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER U associatedMin(T x, U a, T y, U b)
+{
+ return x < y ? a : b;
+}
+
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<2, U, Q> associatedMin
+(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x[i] < y[i] ? a[i] : b[i];
+ return Result;
+}
+
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMin
+(
+ T x, const vec<L, U, Q>& a,
+ T y, const vec<L, U, Q>& b
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x < y ? a[i] : b[i];
+ return Result;
+}
+
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMin
+(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x[i] < y[i] ? a : b;
+ return Result;
+}
+
+// Min comparison between 3 variables
+template<typename T, typename U>
+GLM_FUNC_QUALIFIER U associatedMin
+(
+ T x, U a,
+ T y, U b,
+ T z, U c
+)
+{
+ U Result = x < y ? (x < z ? a : c) : (y < z ? b : c);
+ return Result;
+}
+
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMin
+(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+ vec<L, T, Q> const& z, vec<L, U, Q> const& c
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x[i] < y[i] ? (x[i] < z[i] ? a[i] : c[i]) : (y[i] < z[i] ? b[i] : c[i]);
+ return Result;
+}
+
+// Min comparison between 4 variables
+template<typename T, typename U>
+GLM_FUNC_QUALIFIER U associatedMin
+(
+ T x, U a,
+ T y, U b,
+ T z, U c,
+ T w, U d
+)
+{
+ T Test1 = min(x, y);
+ T Test2 = min(z, w);
+ U Result1 = x < y ? a : b;
+ U Result2 = z < w ? c : d;
+ U Result = Test1 < Test2 ? Result1 : Result2;
+ return Result;
+}
+
+// Min comparison between 4 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMin
+(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+ vec<L, T, Q> const& z, vec<L, U, Q> const& c,
+ vec<L, T, Q> const& w, vec<L, U, Q> const& d
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ {
+ T Test1 = min(x[i], y[i]);
+ T Test2 = min(z[i], w[i]);
+ U Result1 = x[i] < y[i] ? a[i] : b[i];
+ U Result2 = z[i] < w[i] ? c[i] : d[i];
+ Result[i] = Test1 < Test2 ? Result1 : Result2;
+ }
+ return Result;
+}
+
+// Min comparison between 4 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMin
+(
+ T x, vec<L, U, Q> const& a,
+ T y, vec<L, U, Q> const& b,
+ T z, vec<L, U, Q> const& c,
+ T w, vec<L, U, Q> const& d
+)
+{
+ T Test1 = min(x, y);
+ T Test2 = min(z, w);
+
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ {
+ U Result1 = x < y ? a[i] : b[i];
+ U Result2 = z < w ? c[i] : d[i];
+ Result[i] = Test1 < Test2 ? Result1 : Result2;
+ }
+ return Result;
+}
+
+// Min comparison between 4 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMin
+(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b,
+ vec<L, T, Q> const& z, U c,
+ vec<L, T, Q> const& w, U d
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ {
+ T Test1 = min(x[i], y[i]);
+ T Test2 = min(z[i], w[i]);
+ U Result1 = x[i] < y[i] ? a : b;
+ U Result2 = z[i] < w[i] ? c : d;
+ Result[i] = Test1 < Test2 ? Result1 : Result2;
+ }
+ return Result;
+}
+
+// Max comparison between 2 variables
+template<typename T, typename U>
+GLM_FUNC_QUALIFIER U associatedMax(T x, U a, T y, U b)
+{
+ return x > y ? a : b;
+}
+
+// Max comparison between 2 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<2, U, Q> associatedMax
+(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x[i] > y[i] ? a[i] : b[i];
+ return Result;
+}
+
+// Max comparison between 2 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, T, Q> associatedMax
+(
+ T x, vec<L, U, Q> const& a,
+ T y, vec<L, U, Q> const& b
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x > y ? a[i] : b[i];
+ return Result;
+}
+
+// Max comparison between 2 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMax
+(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b
+)
+{
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x[i] > y[i] ? a : b;
+ return Result;
+}
+
+// Max comparison between 3 variables
+template<typename T, typename U>
+GLM_FUNC_QUALIFIER U associatedMax
+(
+ T x, U a,
+ T y, U b,
+ T z, U c
+)
+{
+ U Result = x > y ? (x > z ? a : c) : (y > z ? b : c);
+ return Result;
+}
+
+// Max comparison between 3 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMax
+(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+ vec<L, T, Q> const& z, vec<L, U, Q> const& c
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a[i] : c[i]) : (y[i] > z[i] ? b[i] : c[i]);
+ return Result;
+}
+
+// Max comparison between 3 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, T, Q> associatedMax
+(
+ T x, vec<L, U, Q> const& a,
+ T y, vec<L, U, Q> const& b,
+ T z, vec<L, U, Q> const& c
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x > y ? (x > z ? a[i] : c[i]) : (y > z ? b[i] : c[i]);
+ return Result;
+}
+
+// Max comparison between 3 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMax
+(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b,
+ vec<L, T, Q> const& z, U c
+)
+{
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a : c) : (y[i] > z[i] ? b : c);
+ return Result;
+}
+
+// Max comparison between 4 variables
+template<typename T, typename U>
+GLM_FUNC_QUALIFIER U associatedMax
+(
+ T x, U a,
+ T y, U b,
+ T z, U c,
+ T w, U d
+)
+{
+ T Test1 = max(x, y);
+ T Test2 = max(z, w);
+ U Result1 = x > y ? a : b;
+ U Result2 = z > w ? c : d;
+ U Result = Test1 > Test2 ? Result1 : Result2;
+ return Result;
+}
+
+// Max comparison between 4 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMax
+(
+ vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+ vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+ vec<L, T, Q> const& z, vec<L, U, Q> const& c,
+ vec<L, T, Q> const& w, vec<L, U, Q> const& d
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ {
+ T Test1 = max(x[i], y[i]);
+ T Test2 = max(z[i], w[i]);
+ U Result1 = x[i] > y[i] ? a[i] : b[i];
+ U Result2 = z[i] > w[i] ? c[i] : d[i];
+ Result[i] = Test1 > Test2 ? Result1 : Result2;
+ }
+ return Result;
+}
+
+// Max comparison between 4 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMax
+(
+ T x, vec<L, U, Q> const& a,
+ T y, vec<L, U, Q> const& b,
+ T z, vec<L, U, Q> const& c,
+ T w, vec<L, U, Q> const& d
+)
+{
+ T Test1 = max(x, y);
+ T Test2 = max(z, w);
+
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ {
+ U Result1 = x > y ? a[i] : b[i];
+ U Result2 = z > w ? c[i] : d[i];
+ Result[i] = Test1 > Test2 ? Result1 : Result2;
+ }
+ return Result;
+}
+
+// Max comparison between 4 variables
+template<length_t L, typename T, typename U, qualifier Q>
+GLM_FUNC_QUALIFIER vec<L, U, Q> associatedMax
+(
+ vec<L, T, Q> const& x, U a,
+ vec<L, T, Q> const& y, U b,
+ vec<L, T, Q> const& z, U c,
+ vec<L, T, Q> const& w, U d
+)
+{
+ vec<L, U, Q> Result;
+ for(length_t i = 0, n = Result.length(); i < n; ++i)
+ {
+ T Test1 = max(x[i], y[i]);
+ T Test2 = max(z[i], w[i]);
+ U Result1 = x[i] > y[i] ? a : b;
+ U Result2 = z[i] > w[i] ? c : d;
+ Result[i] = Test1 > Test2 ? Result1 : Result2;
+ }
+ return Result;
+}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/bit.hpp b/3rdparty/glm/source/glm/gtx/bit.hpp
new file mode 100644
index 0000000..60a7aef
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/bit.hpp
@@ -0,0 +1,98 @@
+/// @ref gtx_bit
+/// @file glm/gtx/bit.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_bit GLM_GTX_bit
+/// @ingroup gtx
+///
+/// Include <glm/gtx/bit.hpp> to use the features of this extension.
+///
+/// Allow to perform bit operations on integer values
+
+#pragma once
+
+// Dependencies
+#include "../gtc/bitfield.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_bit is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_bit extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_bit
+ /// @{
+
+ /// @see gtx_bit
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType highestBitValue(genIUType Value);
+
+ /// @see gtx_bit
+ template<typename genIUType>
+ GLM_FUNC_DECL genIUType lowestBitValue(genIUType Value);
+
+ /// Find the highest bit set to 1 in a integer variable and return its value.
+ ///
+ /// @see gtx_bit
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> highestBitValue(vec<L, T, Q> const& value);
+
+ /// Return the power of two number which value is just higher the input value.
+ /// Deprecated, use ceilPowerOfTwo from GTC_round instead
+ ///
+ /// @see gtc_round
+ /// @see gtx_bit
+ template<typename genIUType>
+ GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoAbove(genIUType Value);
+
+ /// Return the power of two number which value is just higher the input value.
+ /// Deprecated, use ceilPowerOfTwo from GTC_round instead
+ ///
+ /// @see gtc_round
+ /// @see gtx_bit
+ template<length_t L, typename T, qualifier Q>
+ GLM_DEPRECATED GLM_FUNC_DECL vec<L, T, Q> powerOfTwoAbove(vec<L, T, Q> const& value);
+
+ /// Return the power of two number which value is just lower the input value.
+ /// Deprecated, use floorPowerOfTwo from GTC_round instead
+ ///
+ /// @see gtc_round
+ /// @see gtx_bit
+ template<typename genIUType>
+ GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoBelow(genIUType Value);
+
+ /// Return the power of two number which value is just lower the input value.
+ /// Deprecated, use floorPowerOfTwo from GTC_round instead
+ ///
+ /// @see gtc_round
+ /// @see gtx_bit
+ template<length_t L, typename T, qualifier Q>
+ GLM_DEPRECATED GLM_FUNC_DECL vec<L, T, Q> powerOfTwoBelow(vec<L, T, Q> const& value);
+
+ /// Return the power of two number which value is the closet to the input value.
+ /// Deprecated, use roundPowerOfTwo from GTC_round instead
+ ///
+ /// @see gtc_round
+ /// @see gtx_bit
+ template<typename genIUType>
+ GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoNearest(genIUType Value);
+
+ /// Return the power of two number which value is the closet to the input value.
+ /// Deprecated, use roundPowerOfTwo from GTC_round instead
+ ///
+ /// @see gtc_round
+ /// @see gtx_bit
+ template<length_t L, typename T, qualifier Q>
+ GLM_DEPRECATED GLM_FUNC_DECL vec<L, T, Q> powerOfTwoNearest(vec<L, T, Q> const& value);
+
+ /// @}
+} //namespace glm
+
+
+#include "bit.inl"
+
diff --git a/3rdparty/glm/source/glm/gtx/bit.inl b/3rdparty/glm/source/glm/gtx/bit.inl
new file mode 100644
index 0000000..621b626
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/bit.inl
@@ -0,0 +1,92 @@
+/// @ref gtx_bit
+
+namespace glm
+{
+ ///////////////////
+ // highestBitValue
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType highestBitValue(genIUType Value)
+ {
+ genIUType tmp = Value;
+ genIUType result = genIUType(0);
+ while(tmp)
+ {
+ result = (tmp & (~tmp + 1)); // grab lowest bit
+ tmp &= ~result; // clear lowest bit
+ }
+ return result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> highestBitValue(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(highestBitValue, v);
+ }
+
+ ///////////////////
+ // lowestBitValue
+
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType lowestBitValue(genIUType Value)
+ {
+ return (Value & (~Value + 1));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> lowestBitValue(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(lowestBitValue, v);
+ }
+
+ ///////////////////
+ // powerOfTwoAbove
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType powerOfTwoAbove(genType value)
+ {
+ return isPowerOfTwo(value) ? value : highestBitValue(value) << 1;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> powerOfTwoAbove(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(powerOfTwoAbove, v);
+ }
+
+ ///////////////////
+ // powerOfTwoBelow
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType powerOfTwoBelow(genType value)
+ {
+ return isPowerOfTwo(value) ? value : highestBitValue(value);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> powerOfTwoBelow(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(powerOfTwoBelow, v);
+ }
+
+ /////////////////////
+ // powerOfTwoNearest
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType powerOfTwoNearest(genType value)
+ {
+ if(isPowerOfTwo(value))
+ return value;
+
+ genType const prev = highestBitValue(value);
+ genType const next = prev << 1;
+ return (next - value) < (value - prev) ? next : prev;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> powerOfTwoNearest(vec<L, T, Q> const& v)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(powerOfTwoNearest, v);
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/closest_point.hpp b/3rdparty/glm/source/glm/gtx/closest_point.hpp
new file mode 100644
index 0000000..de6dbbf
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/closest_point.hpp
@@ -0,0 +1,49 @@
+/// @ref gtx_closest_point
+/// @file glm/gtx/closest_point.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_closest_point GLM_GTX_closest_point
+/// @ingroup gtx
+///
+/// Include <glm/gtx/closest_point.hpp> to use the features of this extension.
+///
+/// Find the point on a straight line which is the closet of a point.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_closest_point extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_closest_point
+ /// @{
+
+ /// Find the point on a straight line which is the closet of a point.
+ /// @see gtx_closest_point
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> closestPointOnLine(
+ vec<3, T, Q> const& point,
+ vec<3, T, Q> const& a,
+ vec<3, T, Q> const& b);
+
+ /// 2d lines work as well
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> closestPointOnLine(
+ vec<2, T, Q> const& point,
+ vec<2, T, Q> const& a,
+ vec<2, T, Q> const& b);
+
+ /// @}
+}// namespace glm
+
+#include "closest_point.inl"
diff --git a/3rdparty/glm/source/glm/gtx/closest_point.inl b/3rdparty/glm/source/glm/gtx/closest_point.inl
new file mode 100644
index 0000000..0a39b04
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/closest_point.inl
@@ -0,0 +1,45 @@
+/// @ref gtx_closest_point
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> closestPointOnLine
+ (
+ vec<3, T, Q> const& point,
+ vec<3, T, Q> const& a,
+ vec<3, T, Q> const& b
+ )
+ {
+ T LineLength = distance(a, b);
+ vec<3, T, Q> Vector = point - a;
+ vec<3, T, Q> LineDirection = (b - a) / LineLength;
+
+ // Project Vector to LineDirection to get the distance of point from a
+ T Distance = dot(Vector, LineDirection);
+
+ if(Distance <= T(0)) return a;
+ if(Distance >= LineLength) return b;
+ return a + LineDirection * Distance;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, T, Q> closestPointOnLine
+ (
+ vec<2, T, Q> const& point,
+ vec<2, T, Q> const& a,
+ vec<2, T, Q> const& b
+ )
+ {
+ T LineLength = distance(a, b);
+ vec<2, T, Q> Vector = point - a;
+ vec<2, T, Q> LineDirection = (b - a) / LineLength;
+
+ // Project Vector to LineDirection to get the distance of point from a
+ T Distance = dot(Vector, LineDirection);
+
+ if(Distance <= T(0)) return a;
+ if(Distance >= LineLength) return b;
+ return a + LineDirection * Distance;
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/color_encoding.hpp b/3rdparty/glm/source/glm/gtx/color_encoding.hpp
new file mode 100644
index 0000000..96ded2a
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/color_encoding.hpp
@@ -0,0 +1,54 @@
+/// @ref gtx_color_encoding
+/// @file glm/gtx/color_encoding.hpp
+///
+/// @see core (dependence)
+/// @see gtx_color_encoding (dependence)
+///
+/// @defgroup gtx_color_encoding GLM_GTX_color_encoding
+/// @ingroup gtx
+///
+/// Include <glm/gtx/color_encoding.hpp> to use the features of this extension.
+///
+/// @brief Allow to perform bit operations on integer values
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../vec3.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTC_color_encoding is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTC_color_encoding extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_color_encoding
+ /// @{
+
+ /// Convert a linear sRGB color to D65 YUV.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB);
+
+ /// Convert a linear sRGB color to D50 YUV.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB);
+
+ /// Convert a D65 YUV color to linear sRGB.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ);
+
+ /// Convert a D65 YUV color to D50 YUV.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ);
+
+ /// @}
+} //namespace glm
+
+#include "color_encoding.inl"
diff --git a/3rdparty/glm/source/glm/gtx/color_encoding.inl b/3rdparty/glm/source/glm/gtx/color_encoding.inl
new file mode 100644
index 0000000..e50fa3e
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/color_encoding.inl
@@ -0,0 +1,45 @@
+/// @ref gtx_color_encoding
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB)
+ {
+ vec<3, T, Q> const M(0.490f, 0.17697f, 0.2f);
+ vec<3, T, Q> const N(0.31f, 0.8124f, 0.01063f);
+ vec<3, T, Q> const O(0.490f, 0.01f, 0.99f);
+
+ return (M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB) * static_cast<T>(5.650675255693055f);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB)
+ {
+ vec<3, T, Q> const M(0.436030342570117f, 0.222438466210245f, 0.013897440074263f);
+ vec<3, T, Q> const N(0.385101860087134f, 0.716942745571917f, 0.097076381494207f);
+ vec<3, T, Q> const O(0.143067806654203f, 0.060618777416563f, 0.713926257896652f);
+
+ return M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ)
+ {
+ vec<3, T, Q> const M(0.41847f, -0.091169f, 0.0009209f);
+ vec<3, T, Q> const N(-0.15866f, 0.25243f, 0.015708f);
+ vec<3, T, Q> const O(0.0009209f, -0.0025498f, 0.1786f);
+
+ return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ)
+ {
+ vec<3, T, Q> const M(+1.047844353856414f, +0.029549007606644f, -0.009250984365223f);
+ vec<3, T, Q> const N(+0.022898981050086f, +0.990508028941971f, +0.015072338237051f);
+ vec<3, T, Q> const O(-0.050206647741605f, -0.017074711360960f, +0.751717835079977f);
+
+ return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ;
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/color_space.hpp b/3rdparty/glm/source/glm/gtx/color_space.hpp
new file mode 100644
index 0000000..a634392
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/color_space.hpp
@@ -0,0 +1,72 @@
+/// @ref gtx_color_space
+/// @file glm/gtx/color_space.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_color_space GLM_GTX_color_space
+/// @ingroup gtx
+///
+/// Include <glm/gtx/color_space.hpp> to use the features of this extension.
+///
+/// Related to RGB to HSV conversions and operations.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_color_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_color_space extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_color_space
+ /// @{
+
+ /// Converts a color from HSV color space to its color in RGB color space.
+ /// @see gtx_color_space
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> rgbColor(
+ vec<3, T, Q> const& hsvValue);
+
+ /// Converts a color from RGB color space to its color in HSV color space.
+ /// @see gtx_color_space
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> hsvColor(
+ vec<3, T, Q> const& rgbValue);
+
+ /// Build a saturation matrix.
+ /// @see gtx_color_space
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> saturation(
+ T const s);
+
+ /// Modify the saturation of a color.
+ /// @see gtx_color_space
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> saturation(
+ T const s,
+ vec<3, T, Q> const& color);
+
+ /// Modify the saturation of a color.
+ /// @see gtx_color_space
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> saturation(
+ T const s,
+ vec<4, T, Q> const& color);
+
+ /// Compute color luminosity associating ratios (0.33, 0.59, 0.11) to RGB canals.
+ /// @see gtx_color_space
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T luminosity(
+ vec<3, T, Q> const& color);
+
+ /// @}
+}//namespace glm
+
+#include "color_space.inl"
diff --git a/3rdparty/glm/source/glm/gtx/color_space.inl b/3rdparty/glm/source/glm/gtx/color_space.inl
new file mode 100644
index 0000000..f698afe
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/color_space.inl
@@ -0,0 +1,141 @@
+/// @ref gtx_color_space
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rgbColor(const vec<3, T, Q>& hsvColor)
+ {
+ vec<3, T, Q> hsv = hsvColor;
+ vec<3, T, Q> rgbColor;
+
+ if(hsv.y == static_cast<T>(0))
+ // achromatic (grey)
+ rgbColor = vec<3, T, Q>(hsv.z);
+ else
+ {
+ T sector = floor(hsv.x * (T(1) / T(60)));
+ T frac = (hsv.x * (T(1) / T(60))) - sector;
+ // factorial part of h
+ T o = hsv.z * (T(1) - hsv.y);
+ T p = hsv.z * (T(1) - hsv.y * frac);
+ T q = hsv.z * (T(1) - hsv.y * (T(1) - frac));
+
+ switch(int(sector))
+ {
+ default:
+ case 0:
+ rgbColor.r = hsv.z;
+ rgbColor.g = q;
+ rgbColor.b = o;
+ break;
+ case 1:
+ rgbColor.r = p;
+ rgbColor.g = hsv.z;
+ rgbColor.b = o;
+ break;
+ case 2:
+ rgbColor.r = o;
+ rgbColor.g = hsv.z;
+ rgbColor.b = q;
+ break;
+ case 3:
+ rgbColor.r = o;
+ rgbColor.g = p;
+ rgbColor.b = hsv.z;
+ break;
+ case 4:
+ rgbColor.r = q;
+ rgbColor.g = o;
+ rgbColor.b = hsv.z;
+ break;
+ case 5:
+ rgbColor.r = hsv.z;
+ rgbColor.g = o;
+ rgbColor.b = p;
+ break;
+ }
+ }
+
+ return rgbColor;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> hsvColor(const vec<3, T, Q>& rgbColor)
+ {
+ vec<3, T, Q> hsv = rgbColor;
+ float Min = min(min(rgbColor.r, rgbColor.g), rgbColor.b);
+ float Max = max(max(rgbColor.r, rgbColor.g), rgbColor.b);
+ float Delta = Max - Min;
+
+ hsv.z = Max;
+
+ if(Max != static_cast<T>(0))
+ {
+ hsv.y = Delta / hsv.z;
+ T h = static_cast<T>(0);
+
+ if(rgbColor.r == Max)
+ // between yellow & magenta
+ h = static_cast<T>(0) + T(60) * (rgbColor.g - rgbColor.b) / Delta;
+ else if(rgbColor.g == Max)
+ // between cyan & yellow
+ h = static_cast<T>(120) + T(60) * (rgbColor.b - rgbColor.r) / Delta;
+ else
+ // between magenta & cyan
+ h = static_cast<T>(240) + T(60) * (rgbColor.r - rgbColor.g) / Delta;
+
+ if(h < T(0))
+ hsv.x = h + T(360);
+ else
+ hsv.x = h;
+ }
+ else
+ {
+ // If r = g = b = 0 then s = 0, h is undefined
+ hsv.y = static_cast<T>(0);
+ hsv.x = static_cast<T>(0);
+ }
+
+ return hsv;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> saturation(T const s)
+ {
+ vec<3, T, defaultp> rgbw = vec<3, T, defaultp>(T(0.2126), T(0.7152), T(0.0722));
+
+ vec<3, T, defaultp> const col((T(1) - s) * rgbw);
+
+ mat<4, 4, T, defaultp> result(T(1));
+ result[0][0] = col.x + s;
+ result[0][1] = col.x;
+ result[0][2] = col.x;
+ result[1][0] = col.y;
+ result[1][1] = col.y + s;
+ result[1][2] = col.y;
+ result[2][0] = col.z;
+ result[2][1] = col.z;
+ result[2][2] = col.z + s;
+
+ return result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> saturation(const T s, const vec<3, T, Q>& color)
+ {
+ return vec<3, T, Q>(saturation(s) * vec<4, T, Q>(color, T(0)));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> saturation(const T s, const vec<4, T, Q>& color)
+ {
+ return saturation(s) * color;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T luminosity(const vec<3, T, Q>& color)
+ {
+ const vec<3, T, Q> tmp = vec<3, T, Q>(0.33, 0.59, 0.11);
+ return dot(color, tmp);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/color_space_YCoCg.hpp b/3rdparty/glm/source/glm/gtx/color_space_YCoCg.hpp
new file mode 100644
index 0000000..dd2b771
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/color_space_YCoCg.hpp
@@ -0,0 +1,60 @@
+/// @ref gtx_color_space_YCoCg
+/// @file glm/gtx/color_space_YCoCg.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_color_space_YCoCg GLM_GTX_color_space_YCoCg
+/// @ingroup gtx
+///
+/// Include <glm/gtx/color_space_YCoCg.hpp> to use the features of this extension.
+///
+/// RGB to YCoCg conversions and operations
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_color_space_YCoCg is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_color_space_YCoCg extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_color_space_YCoCg
+ /// @{
+
+ /// Convert a color from RGB color space to YCoCg color space.
+ /// @see gtx_color_space_YCoCg
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCg(
+ vec<3, T, Q> const& rgbColor);
+
+ /// Convert a color from YCoCg color space to RGB color space.
+ /// @see gtx_color_space_YCoCg
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> YCoCg2rgb(
+ vec<3, T, Q> const& YCoCgColor);
+
+ /// Convert a color from RGB color space to YCoCgR color space.
+ /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range"
+ /// @see gtx_color_space_YCoCg
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCgR(
+ vec<3, T, Q> const& rgbColor);
+
+ /// Convert a color from YCoCgR color space to RGB color space.
+ /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range"
+ /// @see gtx_color_space_YCoCg
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> YCoCgR2rgb(
+ vec<3, T, Q> const& YCoCgColor);
+
+ /// @}
+}//namespace glm
+
+#include "color_space_YCoCg.inl"
diff --git a/3rdparty/glm/source/glm/gtx/color_space_YCoCg.inl b/3rdparty/glm/source/glm/gtx/color_space_YCoCg.inl
new file mode 100644
index 0000000..83ba857
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/color_space_YCoCg.inl
@@ -0,0 +1,107 @@
+/// @ref gtx_color_space_YCoCg
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCg
+ (
+ vec<3, T, Q> const& rgbColor
+ )
+ {
+ vec<3, T, Q> result;
+ result.x/*Y */ = rgbColor.r / T(4) + rgbColor.g / T(2) + rgbColor.b / T(4);
+ result.y/*Co*/ = rgbColor.r / T(2) + rgbColor.g * T(0) - rgbColor.b / T(2);
+ result.z/*Cg*/ = - rgbColor.r / T(4) + rgbColor.g / T(2) - rgbColor.b / T(4);
+ return result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCg2rgb
+ (
+ vec<3, T, Q> const& YCoCgColor
+ )
+ {
+ vec<3, T, Q> result;
+ result.r = YCoCgColor.x + YCoCgColor.y - YCoCgColor.z;
+ result.g = YCoCgColor.x + YCoCgColor.z;
+ result.b = YCoCgColor.x - YCoCgColor.y - YCoCgColor.z;
+ return result;
+ }
+
+ template<typename T, qualifier Q, bool isInteger>
+ class compute_YCoCgR {
+ public:
+ static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR
+ (
+ vec<3, T, Q> const& rgbColor
+ )
+ {
+ vec<3, T, Q> result;
+ result.x/*Y */ = rgbColor.g * static_cast<T>(0.5) + (rgbColor.r + rgbColor.b) * static_cast<T>(0.25);
+ result.y/*Co*/ = rgbColor.r - rgbColor.b;
+ result.z/*Cg*/ = rgbColor.g - (rgbColor.r + rgbColor.b) * static_cast<T>(0.5);
+ return result;
+ }
+
+ static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb
+ (
+ vec<3, T, Q> const& YCoCgRColor
+ )
+ {
+ vec<3, T, Q> result;
+ T tmp = YCoCgRColor.x - (YCoCgRColor.z * static_cast<T>(0.5));
+ result.g = YCoCgRColor.z + tmp;
+ result.b = tmp - (YCoCgRColor.y * static_cast<T>(0.5));
+ result.r = result.b + YCoCgRColor.y;
+ return result;
+ }
+ };
+
+ template<typename T, qualifier Q>
+ class compute_YCoCgR<T, Q, true> {
+ public:
+ static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR
+ (
+ vec<3, T, Q> const& rgbColor
+ )
+ {
+ vec<3, T, Q> result;
+ result.y/*Co*/ = rgbColor.r - rgbColor.b;
+ T tmp = rgbColor.b + (result.y >> 1);
+ result.z/*Cg*/ = rgbColor.g - tmp;
+ result.x/*Y */ = tmp + (result.z >> 1);
+ return result;
+ }
+
+ static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb
+ (
+ vec<3, T, Q> const& YCoCgRColor
+ )
+ {
+ vec<3, T, Q> result;
+ T tmp = YCoCgRColor.x - (YCoCgRColor.z >> 1);
+ result.g = YCoCgRColor.z + tmp;
+ result.b = tmp - (YCoCgRColor.y >> 1);
+ result.r = result.b + YCoCgRColor.y;
+ return result;
+ }
+ };
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR
+ (
+ vec<3, T, Q> const& rgbColor
+ )
+ {
+ return compute_YCoCgR<T, Q, std::numeric_limits<T>::is_integer>::rgb2YCoCgR(rgbColor);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb
+ (
+ vec<3, T, Q> const& YCoCgRColor
+ )
+ {
+ return compute_YCoCgR<T, Q, std::numeric_limits<T>::is_integer>::YCoCgR2rgb(YCoCgRColor);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/common.hpp b/3rdparty/glm/source/glm/gtx/common.hpp
new file mode 100644
index 0000000..254ada2
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/common.hpp
@@ -0,0 +1,76 @@
+/// @ref gtx_common
+/// @file glm/gtx/common.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_common GLM_GTX_common
+/// @ingroup gtx
+///
+/// Include <glm/gtx/common.hpp> to use the features of this extension.
+///
+/// @brief Provide functions to increase the compatibility with Cg and HLSL languages
+
+#pragma once
+
+// Dependencies:
+#include "../vec2.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+#include "../gtc/vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_common is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_common extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_common
+ /// @{
+
+ /// Returns true if x is a denormalized number
+ /// Numbers whose absolute value is too small to be represented in the normal format are represented in an alternate, denormalized format.
+ /// This format is less precise but can represent values closer to zero.
+ ///
+ /// @tparam genType Floating-point scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/isnan.xml">GLSL isnan man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL typename genType::bool_type isdenormal(genType const& x);
+
+ /// Similar to 'mod' but with a different rounding and integer support.
+ /// Returns 'x - y * trunc(x/y)' instead of 'x - y * floor(x/y)'
+ ///
+ /// @see <a href="http://stackoverflow.com/questions/7610631/glsl-mod-vs-hlsl-fmod">GLSL mod vs HLSL fmod</a>
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/mod.xml">GLSL mod man page</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fmod(vec<L, T, Q> const& v);
+
+ /// Returns whether vector components values are within an interval. A open interval excludes its endpoints, and is denoted with square brackets.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_relational
+ template <length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> openBounded(vec<L, T, Q> const& Value, vec<L, T, Q> const& Min, vec<L, T, Q> const& Max);
+
+ /// Returns whether vector components values are within an interval. A closed interval includes its endpoints, and is denoted with square brackets.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or integer scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see ext_vector_relational
+ template <length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> closeBounded(vec<L, T, Q> const& Value, vec<L, T, Q> const& Min, vec<L, T, Q> const& Max);
+
+ /// @}
+}//namespace glm
+
+#include "common.inl"
diff --git a/3rdparty/glm/source/glm/gtx/common.inl b/3rdparty/glm/source/glm/gtx/common.inl
new file mode 100644
index 0000000..4ad2126
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/common.inl
@@ -0,0 +1,125 @@
+/// @ref gtx_common
+
+#include <cmath>
+#include "../gtc/epsilon.hpp"
+#include "../gtc/constants.hpp"
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q, bool isFloat = true>
+ struct compute_fmod
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& a, vec<L, T, Q> const& b)
+ {
+ return detail::functor2<vec, L, T, Q>::call(std::fmod, a, b);
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q>
+ struct compute_fmod<L, T, Q, false>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& a, vec<L, T, Q> const& b)
+ {
+ return a % b;
+ }
+ };
+}//namespace detail
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER bool isdenormal(T const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isdenormal' only accept floating-point inputs");
+
+# if GLM_HAS_CXX11_STL
+ return std::fpclassify(x) == FP_SUBNORMAL;
+# else
+ return epsilonNotEqual(x, static_cast<T>(0), epsilon<T>()) && std::fabs(x) < std::numeric_limits<T>::min();
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename vec<1, T, Q>::bool_type isdenormal
+ (
+ vec<1, T, Q> const& x
+ )
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isdenormal' only accept floating-point inputs");
+
+ return typename vec<1, T, Q>::bool_type(
+ isdenormal(x.x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename vec<2, T, Q>::bool_type isdenormal
+ (
+ vec<2, T, Q> const& x
+ )
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isdenormal' only accept floating-point inputs");
+
+ return typename vec<2, T, Q>::bool_type(
+ isdenormal(x.x),
+ isdenormal(x.y));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename vec<3, T, Q>::bool_type isdenormal
+ (
+ vec<3, T, Q> const& x
+ )
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isdenormal' only accept floating-point inputs");
+
+ return typename vec<3, T, Q>::bool_type(
+ isdenormal(x.x),
+ isdenormal(x.y),
+ isdenormal(x.z));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename vec<4, T, Q>::bool_type isdenormal
+ (
+ vec<4, T, Q> const& x
+ )
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isdenormal' only accept floating-point inputs");
+
+ return typename vec<4, T, Q>::bool_type(
+ isdenormal(x.x),
+ isdenormal(x.y),
+ isdenormal(x.z),
+ isdenormal(x.w));
+ }
+
+ // fmod
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fmod(genType x, genType y)
+ {
+ return fmod(vec<1, genType>(x), y).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmod(vec<L, T, Q> const& x, T y)
+ {
+ return detail::compute_fmod<L, T, Q, std::numeric_limits<T>::is_iec559>::call(x, vec<L, T, Q>(y));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fmod(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ return detail::compute_fmod<L, T, Q, std::numeric_limits<T>::is_iec559>::call(x, y);
+ }
+
+ template <length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> openBounded(vec<L, T, Q> const& Value, vec<L, T, Q> const& Min, vec<L, T, Q> const& Max)
+ {
+ return greaterThan(Value, Min) && lessThan(Value, Max);
+ }
+
+ template <length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> closeBounded(vec<L, T, Q> const& Value, vec<L, T, Q> const& Min, vec<L, T, Q> const& Max)
+ {
+ return greaterThanEqual(Value, Min) && lessThanEqual(Value, Max);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/compatibility.hpp b/3rdparty/glm/source/glm/gtx/compatibility.hpp
new file mode 100644
index 0000000..f1b00a6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/compatibility.hpp
@@ -0,0 +1,133 @@
+/// @ref gtx_compatibility
+/// @file glm/gtx/compatibility.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_compatibility GLM_GTX_compatibility
+/// @ingroup gtx
+///
+/// Include <glm/gtx/compatibility.hpp> to use the features of this extension.
+///
+/// Provide functions to increase the compatibility with Cg and HLSL languages
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/quaternion.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_compatibility is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_compatibility extension included")
+# endif
+#endif
+
+#if GLM_COMPILER & GLM_COMPILER_VC
+# include <cfloat>
+#elif GLM_COMPILER & GLM_COMPILER_GCC
+# include <cmath>
+# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+# undef isfinite
+# endif
+#endif//GLM_COMPILER
+
+namespace glm
+{
+ /// @addtogroup gtx_compatibility
+ /// @{
+
+ template<typename T> GLM_FUNC_QUALIFIER T lerp(T x, T y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
+
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, const vec<2, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, const vec<3, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, const vec<4, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
+
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER T saturate(T x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<2, T, Q> saturate(const vec<2, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<3, T, Q> saturate(const vec<3, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<4, T, Q> saturate(const vec<4, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
+
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER T atan2(T x, T y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<2, T, Q> atan2(const vec<2, T, Q>& x, const vec<2, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<3, T, Q> atan2(const vec<3, T, Q>& x, const vec<3, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<4, T, Q> atan2(const vec<4, T, Q>& x, const vec<4, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility)
+
+ template<typename genType> GLM_FUNC_DECL bool isfinite(genType const& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_DECL vec<1, bool, Q> isfinite(const vec<1, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_DECL vec<2, bool, Q> isfinite(const vec<2, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_DECL vec<3, bool, Q> isfinite(const vec<3, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
+ template<typename T, qualifier Q> GLM_FUNC_DECL vec<4, bool, Q> isfinite(const vec<4, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
+
+ typedef bool bool1; //!< \brief boolean type with 1 component. (From GLM_GTX_compatibility extension)
+ typedef vec<2, bool, highp> bool2; //!< \brief boolean type with 2 components. (From GLM_GTX_compatibility extension)
+ typedef vec<3, bool, highp> bool3; //!< \brief boolean type with 3 components. (From GLM_GTX_compatibility extension)
+ typedef vec<4, bool, highp> bool4; //!< \brief boolean type with 4 components. (From GLM_GTX_compatibility extension)
+
+ typedef bool bool1x1; //!< \brief boolean matrix with 1 x 1 component. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 2, bool, highp> bool2x2; //!< \brief boolean matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 3, bool, highp> bool2x3; //!< \brief boolean matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 4, bool, highp> bool2x4; //!< \brief boolean matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 2, bool, highp> bool3x2; //!< \brief boolean matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 3, bool, highp> bool3x3; //!< \brief boolean matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 4, bool, highp> bool3x4; //!< \brief boolean matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 2, bool, highp> bool4x2; //!< \brief boolean matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 3, bool, highp> bool4x3; //!< \brief boolean matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 4, bool, highp> bool4x4; //!< \brief boolean matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
+
+ typedef int int1; //!< \brief integer vector with 1 component. (From GLM_GTX_compatibility extension)
+ typedef vec<2, int, highp> int2; //!< \brief integer vector with 2 components. (From GLM_GTX_compatibility extension)
+ typedef vec<3, int, highp> int3; //!< \brief integer vector with 3 components. (From GLM_GTX_compatibility extension)
+ typedef vec<4, int, highp> int4; //!< \brief integer vector with 4 components. (From GLM_GTX_compatibility extension)
+
+ typedef int int1x1; //!< \brief integer matrix with 1 component. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 2, int, highp> int2x2; //!< \brief integer matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 3, int, highp> int2x3; //!< \brief integer matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 4, int, highp> int2x4; //!< \brief integer matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 2, int, highp> int3x2; //!< \brief integer matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 3, int, highp> int3x3; //!< \brief integer matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 4, int, highp> int3x4; //!< \brief integer matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 2, int, highp> int4x2; //!< \brief integer matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 3, int, highp> int4x3; //!< \brief integer matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 4, int, highp> int4x4; //!< \brief integer matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
+
+ typedef float float1; //!< \brief single-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension)
+ typedef vec<2, float, highp> float2; //!< \brief single-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension)
+ typedef vec<3, float, highp> float3; //!< \brief single-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension)
+ typedef vec<4, float, highp> float4; //!< \brief single-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension)
+
+ typedef float float1x1; //!< \brief single-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 2, float, highp> float2x2; //!< \brief single-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 3, float, highp> float2x3; //!< \brief single-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 4, float, highp> float2x4; //!< \brief single-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 2, float, highp> float3x2; //!< \brief single-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 3, float, highp> float3x3; //!< \brief single-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 4, float, highp> float3x4; //!< \brief single-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 2, float, highp> float4x2; //!< \brief single-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 3, float, highp> float4x3; //!< \brief single-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 4, float, highp> float4x4; //!< \brief single-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
+
+ typedef double double1; //!< \brief double-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension)
+ typedef vec<2, double, highp> double2; //!< \brief double-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension)
+ typedef vec<3, double, highp> double3; //!< \brief double-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension)
+ typedef vec<4, double, highp> double4; //!< \brief double-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension)
+
+ typedef double double1x1; //!< \brief double-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 2, double, highp> double2x2; //!< \brief double-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 3, double, highp> double2x3; //!< \brief double-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<2, 4, double, highp> double2x4; //!< \brief double-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 2, double, highp> double3x2; //!< \brief double-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 3, double, highp> double3x3; //!< \brief double-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<3, 4, double, highp> double3x4; //!< \brief double-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 2, double, highp> double4x2; //!< \brief double-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 3, double, highp> double4x3; //!< \brief double-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
+ typedef mat<4, 4, double, highp> double4x4; //!< \brief double-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
+
+ /// @}
+}//namespace glm
+
+#include "compatibility.inl"
diff --git a/3rdparty/glm/source/glm/gtx/compatibility.inl b/3rdparty/glm/source/glm/gtx/compatibility.inl
new file mode 100644
index 0000000..1d49496
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/compatibility.inl
@@ -0,0 +1,62 @@
+#include <limits>
+
+namespace glm
+{
+ // isfinite
+ template<typename genType>
+ GLM_FUNC_QUALIFIER bool isfinite(
+ genType const& x)
+ {
+# if GLM_HAS_CXX11_STL
+ return std::isfinite(x) != 0;
+# elif GLM_COMPILER & GLM_COMPILER_VC
+ return _finite(x) != 0;
+# elif GLM_COMPILER & GLM_COMPILER_GCC && GLM_PLATFORM & GLM_PLATFORM_ANDROID
+ return _isfinite(x) != 0;
+# else
+ if (std::numeric_limits<genType>::is_integer || std::denorm_absent == std::numeric_limits<genType>::has_denorm)
+ return std::numeric_limits<genType>::min() <= x && std::numeric_limits<genType>::max() >= x;
+ else
+ return -std::numeric_limits<genType>::max() <= x && std::numeric_limits<genType>::max() >= x;
+# endif
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<1, bool, Q> isfinite(
+ vec<1, T, Q> const& x)
+ {
+ return vec<1, bool, Q>(
+ isfinite(x.x));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, bool, Q> isfinite(
+ vec<2, T, Q> const& x)
+ {
+ return vec<2, bool, Q>(
+ isfinite(x.x),
+ isfinite(x.y));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, bool, Q> isfinite(
+ vec<3, T, Q> const& x)
+ {
+ return vec<3, bool, Q>(
+ isfinite(x.x),
+ isfinite(x.y),
+ isfinite(x.z));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> isfinite(
+ vec<4, T, Q> const& x)
+ {
+ return vec<4, bool, Q>(
+ isfinite(x.x),
+ isfinite(x.y),
+ isfinite(x.z),
+ isfinite(x.w));
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/component_wise.hpp b/3rdparty/glm/source/glm/gtx/component_wise.hpp
new file mode 100644
index 0000000..34a2b0a
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/component_wise.hpp
@@ -0,0 +1,69 @@
+/// @ref gtx_component_wise
+/// @file glm/gtx/component_wise.hpp
+/// @date 2007-05-21 / 2011-06-07
+/// @author Christophe Riccio
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_component_wise GLM_GTX_component_wise
+/// @ingroup gtx
+///
+/// Include <glm/gtx/component_wise.hpp> to use the features of this extension.
+///
+/// Operations between components of a type
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_component_wise is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_component_wise extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_component_wise
+ /// @{
+
+ /// Convert an integer vector to a normalized float vector.
+ /// If the parameter value type is already a floating qualifier type, the value is passed through.
+ /// @see gtx_component_wise
+ template<typename floatType, length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, floatType, Q> compNormalize(vec<L, T, Q> const& v);
+
+ /// Convert a normalized float vector to an integer vector.
+ /// If the parameter value type is already a floating qualifier type, the value is passed through.
+ /// @see gtx_component_wise
+ template<length_t L, typename T, typename floatType, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> compScale(vec<L, floatType, Q> const& v);
+
+ /// Add all vector components together.
+ /// @see gtx_component_wise
+ template<typename genType>
+ GLM_FUNC_DECL typename genType::value_type compAdd(genType const& v);
+
+ /// Multiply all vector components together.
+ /// @see gtx_component_wise
+ template<typename genType>
+ GLM_FUNC_DECL typename genType::value_type compMul(genType const& v);
+
+ /// Find the minimum value between single vector components.
+ /// @see gtx_component_wise
+ template<typename genType>
+ GLM_FUNC_DECL typename genType::value_type compMin(genType const& v);
+
+ /// Find the maximum value between single vector components.
+ /// @see gtx_component_wise
+ template<typename genType>
+ GLM_FUNC_DECL typename genType::value_type compMax(genType const& v);
+
+ /// @}
+}//namespace glm
+
+#include "component_wise.inl"
diff --git a/3rdparty/glm/source/glm/gtx/component_wise.inl b/3rdparty/glm/source/glm/gtx/component_wise.inl
new file mode 100644
index 0000000..cbbc7d4
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/component_wise.inl
@@ -0,0 +1,127 @@
+/// @ref gtx_component_wise
+
+#include <limits>
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, typename floatType, qualifier Q, bool isInteger, bool signedType>
+ struct compute_compNormalize
+ {};
+
+ template<length_t L, typename T, typename floatType, qualifier Q>
+ struct compute_compNormalize<L, T, floatType, Q, true, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, floatType, Q> call(vec<L, T, Q> const& v)
+ {
+ floatType const Min = static_cast<floatType>(std::numeric_limits<T>::min());
+ floatType const Max = static_cast<floatType>(std::numeric_limits<T>::max());
+ return (vec<L, floatType, Q>(v) - Min) / (Max - Min) * static_cast<floatType>(2) - static_cast<floatType>(1);
+ }
+ };
+
+ template<length_t L, typename T, typename floatType, qualifier Q>
+ struct compute_compNormalize<L, T, floatType, Q, true, false>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, floatType, Q> call(vec<L, T, Q> const& v)
+ {
+ return vec<L, floatType, Q>(v) / static_cast<floatType>(std::numeric_limits<T>::max());
+ }
+ };
+
+ template<length_t L, typename T, typename floatType, qualifier Q>
+ struct compute_compNormalize<L, T, floatType, Q, false, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, floatType, Q> call(vec<L, T, Q> const& v)
+ {
+ return v;
+ }
+ };
+
+ template<length_t L, typename T, typename floatType, qualifier Q, bool isInteger, bool signedType>
+ struct compute_compScale
+ {};
+
+ template<length_t L, typename T, typename floatType, qualifier Q>
+ struct compute_compScale<L, T, floatType, Q, true, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, floatType, Q> const& v)
+ {
+ floatType const Max = static_cast<floatType>(std::numeric_limits<T>::max()) + static_cast<floatType>(0.5);
+ vec<L, floatType, Q> const Scaled(v * Max);
+ vec<L, T, Q> const Result(Scaled - static_cast<floatType>(0.5));
+ return Result;
+ }
+ };
+
+ template<length_t L, typename T, typename floatType, qualifier Q>
+ struct compute_compScale<L, T, floatType, Q, true, false>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, floatType, Q> const& v)
+ {
+ return vec<L, T, Q>(vec<L, floatType, Q>(v) * static_cast<floatType>(std::numeric_limits<T>::max()));
+ }
+ };
+
+ template<length_t L, typename T, typename floatType, qualifier Q>
+ struct compute_compScale<L, T, floatType, Q, false, true>
+ {
+ GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, floatType, Q> const& v)
+ {
+ return v;
+ }
+ };
+}//namespace detail
+
+ template<typename floatType, length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, floatType, Q> compNormalize(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<floatType>::is_iec559, "'compNormalize' accepts only floating-point types for 'floatType' template parameter");
+
+ return detail::compute_compNormalize<L, T, floatType, Q, std::numeric_limits<T>::is_integer, std::numeric_limits<T>::is_signed>::call(v);
+ }
+
+ template<typename T, length_t L, typename floatType, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> compScale(vec<L, floatType, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<floatType>::is_iec559, "'compScale' accepts only floating-point types for 'floatType' template parameter");
+
+ return detail::compute_compScale<L, T, floatType, Q, std::numeric_limits<T>::is_integer, std::numeric_limits<T>::is_signed>::call(v);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T compAdd(vec<L, T, Q> const& v)
+ {
+ T Result(0);
+ for(length_t i = 0, n = v.length(); i < n; ++i)
+ Result += v[i];
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T compMul(vec<L, T, Q> const& v)
+ {
+ T Result(1);
+ for(length_t i = 0, n = v.length(); i < n; ++i)
+ Result *= v[i];
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T compMin(vec<L, T, Q> const& v)
+ {
+ T Result(v[0]);
+ for(length_t i = 1, n = v.length(); i < n; ++i)
+ Result = min(Result, v[i]);
+ return Result;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T compMax(vec<L, T, Q> const& v)
+ {
+ T Result(v[0]);
+ for(length_t i = 1, n = v.length(); i < n; ++i)
+ Result = max(Result, v[i]);
+ return Result;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/dual_quaternion.hpp b/3rdparty/glm/source/glm/gtx/dual_quaternion.hpp
new file mode 100644
index 0000000..6a51ab7
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/dual_quaternion.hpp
@@ -0,0 +1,274 @@
+/// @ref gtx_dual_quaternion
+/// @file glm/gtx/dual_quaternion.hpp
+/// @author Maksim Vorobiev ([email protected])
+///
+/// @see core (dependence)
+/// @see gtc_constants (dependence)
+/// @see gtc_quaternion (dependence)
+///
+/// @defgroup gtx_dual_quaternion GLM_GTX_dual_quaternion
+/// @ingroup gtx
+///
+/// Include <glm/gtx/dual_quaternion.hpp> to use the features of this extension.
+///
+/// Defines a templated dual-quaternion type and several dual-quaternion operations.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/constants.hpp"
+#include "../gtc/quaternion.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_dual_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_dual_quaternion extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_dual_quaternion
+ /// @{
+
+ template<typename T, qualifier Q = defaultp>
+ struct tdualquat
+ {
+ // -- Implementation detail --
+
+ typedef T value_type;
+ typedef qua<T, Q> part_type;
+
+ // -- Data --
+
+ qua<T, Q> real, dual;
+
+ // -- Component accesses --
+
+ typedef length_t length_type;
+ /// Return the count of components of a dual quaternion
+ GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;}
+
+ GLM_FUNC_DECL part_type & operator[](length_type i);
+ GLM_FUNC_DECL part_type const& operator[](length_type i) const;
+
+ // -- Implicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR tdualquat() GLM_DEFAULT;
+ GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat<T, Q> const& d) GLM_DEFAULT;
+ template<qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat<T, P> const& d);
+
+ // -- Explicit basic constructors --
+
+ GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua<T, Q> const& real);
+ GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua<T, Q> const& orientation, vec<3, T, Q> const& translation);
+ GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua<T, Q> const& real, qua<T, Q> const& dual);
+
+ // -- Conversion constructors --
+
+ template<typename U, qualifier P>
+ GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT tdualquat(tdualquat<U, P> const& q);
+
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<2, 4, T, Q> const& holder_mat);
+ GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<3, 4, T, Q> const& aug_mat);
+
+ // -- Unary arithmetic operators --
+
+ GLM_FUNC_DECL tdualquat<T, Q> & operator=(tdualquat<T, Q> const& m) GLM_DEFAULT;
+
+ template<typename U>
+ GLM_FUNC_DECL tdualquat<T, Q> & operator=(tdualquat<U, Q> const& m);
+ template<typename U>
+ GLM_FUNC_DECL tdualquat<T, Q> & operator*=(U s);
+ template<typename U>
+ GLM_FUNC_DECL tdualquat<T, Q> & operator/=(U s);
+ };
+
+ // -- Unary bit operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> operator+(tdualquat<T, Q> const& q);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> operator-(tdualquat<T, Q> const& q);
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> operator+(tdualquat<T, Q> const& q, tdualquat<T, Q> const& p);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> operator*(tdualquat<T, Q> const& q, tdualquat<T, Q> const& p);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> operator*(tdualquat<T, Q> const& q, vec<3, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat<T, Q> const& q);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> operator*(tdualquat<T, Q> const& q, vec<4, T, Q> const& v);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat<T, Q> const& q);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> operator*(tdualquat<T, Q> const& q, T const& s);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> operator*(T const& s, tdualquat<T, Q> const& q);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> operator/(tdualquat<T, Q> const& q, T const& s);
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator==(tdualquat<T, Q> const& q1, tdualquat<T, Q> const& q2);
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool operator!=(tdualquat<T, Q> const& q1, tdualquat<T, Q> const& q2);
+
+ /// Creates an identity dual quaternion.
+ ///
+ /// @see gtx_dual_quaternion
+ template <typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> dual_quat_identity();
+
+ /// Returns the normalized quaternion.
+ ///
+ /// @see gtx_dual_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> normalize(tdualquat<T, Q> const& q);
+
+ /// Returns the linear interpolation of two dual quaternion.
+ ///
+ /// @see gtc_dual_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> lerp(tdualquat<T, Q> const& x, tdualquat<T, Q> const& y, T const& a);
+
+ /// Returns the q inverse.
+ ///
+ /// @see gtx_dual_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> inverse(tdualquat<T, Q> const& q);
+
+ /// Converts a quaternion to a 2 * 4 matrix.
+ ///
+ /// @see gtx_dual_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> mat2x4_cast(tdualquat<T, Q> const& x);
+
+ /// Converts a quaternion to a 3 * 4 matrix.
+ ///
+ /// @see gtx_dual_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> mat3x4_cast(tdualquat<T, Q> const& x);
+
+ /// Converts a 2 * 4 matrix (matrix which holds real and dual parts) to a quaternion.
+ ///
+ /// @see gtx_dual_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> dualquat_cast(mat<2, 4, T, Q> const& x);
+
+ /// Converts a 3 * 4 matrix (augmented matrix rotation + translation) to a quaternion.
+ ///
+ /// @see gtx_dual_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL tdualquat<T, Q> dualquat_cast(mat<3, 4, T, Q> const& x);
+
+
+ /// Dual-quaternion of low single-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<float, lowp> lowp_dualquat;
+
+ /// Dual-quaternion of medium single-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<float, mediump> mediump_dualquat;
+
+ /// Dual-quaternion of high single-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<float, highp> highp_dualquat;
+
+
+ /// Dual-quaternion of low single-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<float, lowp> lowp_fdualquat;
+
+ /// Dual-quaternion of medium single-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<float, mediump> mediump_fdualquat;
+
+ /// Dual-quaternion of high single-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<float, highp> highp_fdualquat;
+
+
+ /// Dual-quaternion of low double-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<double, lowp> lowp_ddualquat;
+
+ /// Dual-quaternion of medium double-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<double, mediump> mediump_ddualquat;
+
+ /// Dual-quaternion of high double-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef tdualquat<double, highp> highp_ddualquat;
+
+
+#if(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT))
+ /// Dual-quaternion of floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef highp_fdualquat dualquat;
+
+ /// Dual-quaternion of single-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef highp_fdualquat fdualquat;
+#elif(defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT))
+ typedef highp_fdualquat dualquat;
+ typedef highp_fdualquat fdualquat;
+#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT))
+ typedef mediump_fdualquat dualquat;
+ typedef mediump_fdualquat fdualquat;
+#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && defined(GLM_PRECISION_LOWP_FLOAT))
+ typedef lowp_fdualquat dualquat;
+ typedef lowp_fdualquat fdualquat;
+#else
+# error "GLM error: multiple default precision requested for single-precision floating-point types"
+#endif
+
+
+#if(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE))
+ /// Dual-quaternion of default double-qualifier floating-point numbers.
+ ///
+ /// @see gtx_dual_quaternion
+ typedef highp_ddualquat ddualquat;
+#elif(defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE))
+ typedef highp_ddualquat ddualquat;
+#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE))
+ typedef mediump_ddualquat ddualquat;
+#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && defined(GLM_PRECISION_LOWP_DOUBLE))
+ typedef lowp_ddualquat ddualquat;
+#else
+# error "GLM error: Multiple default precision requested for double-precision floating-point types"
+#endif
+
+ /// @}
+} //namespace glm
+
+#include "dual_quaternion.inl"
diff --git a/3rdparty/glm/source/glm/gtx/dual_quaternion.inl b/3rdparty/glm/source/glm/gtx/dual_quaternion.inl
new file mode 100644
index 0000000..fad07ea
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/dual_quaternion.inl
@@ -0,0 +1,352 @@
+/// @ref gtx_dual_quaternion
+
+#include "../geometric.hpp"
+#include <limits>
+
+namespace glm
+{
+ // -- Component accesses --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename tdualquat<T, Q>::part_type & tdualquat<T, Q>::operator[](typename tdualquat<T, Q>::length_type i)
+ {
+ assert(i >= 0 && i < this->length());
+ return (&real)[i];
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER typename tdualquat<T, Q>::part_type const& tdualquat<T, Q>::operator[](typename tdualquat<T, Q>::length_type i) const
+ {
+ assert(i >= 0 && i < this->length());
+ return (&real)[i];
+ }
+
+ // -- Implicit basic constructors --
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat()
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS != GLM_DISABLE
+ : real(qua<T, Q>())
+ , dual(qua<T, Q>(0, 0, 0, 0))
+# endif
+ {}
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat(tdualquat<T, Q> const& d)
+ : real(d.real)
+ , dual(d.dual)
+ {}
+# endif
+
+ template<typename T, qualifier Q>
+ template<qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat(tdualquat<T, P> const& d)
+ : real(d.real)
+ , dual(d.dual)
+ {}
+
+ // -- Explicit basic constructors --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat(qua<T, Q> const& r)
+ : real(r), dual(qua<T, Q>(0, 0, 0, 0))
+ {}
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat(qua<T, Q> const& q, vec<3, T, Q> const& p)
+ : real(q), dual(
+ T(-0.5) * ( p.x*q.x + p.y*q.y + p.z*q.z),
+ T(+0.5) * ( p.x*q.w + p.y*q.z - p.z*q.y),
+ T(+0.5) * (-p.x*q.z + p.y*q.w + p.z*q.x),
+ T(+0.5) * ( p.x*q.y - p.y*q.x + p.z*q.w))
+ {}
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat(qua<T, Q> const& r, qua<T, Q> const& d)
+ : real(r), dual(d)
+ {}
+
+ // -- Conversion constructors --
+
+ template<typename T, qualifier Q>
+ template<typename U, qualifier P>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat(tdualquat<U, P> const& q)
+ : real(q.real)
+ , dual(q.dual)
+ {}
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat(mat<2, 4, T, Q> const& m)
+ {
+ *this = dualquat_cast(m);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat<T, Q>::tdualquat(mat<3, 4, T, Q> const& m)
+ {
+ *this = dualquat_cast(m);
+ }
+
+ // -- Unary arithmetic operators --
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> & tdualquat<T, Q>::operator=(tdualquat<T, Q> const& q)
+ {
+ this->real = q.real;
+ this->dual = q.dual;
+ return *this;
+ }
+# endif
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> & tdualquat<T, Q>::operator=(tdualquat<U, Q> const& q)
+ {
+ this->real = q.real;
+ this->dual = q.dual;
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> & tdualquat<T, Q>::operator*=(U s)
+ {
+ this->real *= static_cast<T>(s);
+ this->dual *= static_cast<T>(s);
+ return *this;
+ }
+
+ template<typename T, qualifier Q>
+ template<typename U>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> & tdualquat<T, Q>::operator/=(U s)
+ {
+ this->real /= static_cast<T>(s);
+ this->dual /= static_cast<T>(s);
+ return *this;
+ }
+
+ // -- Unary bit operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> operator+(tdualquat<T, Q> const& q)
+ {
+ return q;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> operator-(tdualquat<T, Q> const& q)
+ {
+ return tdualquat<T, Q>(-q.real, -q.dual);
+ }
+
+ // -- Binary operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> operator+(tdualquat<T, Q> const& q, tdualquat<T, Q> const& p)
+ {
+ return tdualquat<T, Q>(q.real + p.real,q.dual + p.dual);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> operator*(tdualquat<T, Q> const& p, tdualquat<T, Q> const& o)
+ {
+ return tdualquat<T, Q>(p.real * o.real,p.real * o.dual + p.dual * o.real);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(tdualquat<T, Q> const& q, vec<3, T, Q> const& v)
+ {
+ vec<3, T, Q> const real_v3(q.real.x,q.real.y,q.real.z);
+ vec<3, T, Q> const dual_v3(q.dual.x,q.dual.y,q.dual.z);
+ return (cross(real_v3, cross(real_v3,v) + v * q.real.w + dual_v3) + dual_v3 * q.real.w - real_v3 * q.dual.w) * T(2) + v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat<T, Q> const& q)
+ {
+ return glm::inverse(q) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(tdualquat<T, Q> const& q, vec<4, T, Q> const& v)
+ {
+ return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat<T, Q> const& q)
+ {
+ return glm::inverse(q) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> operator*(tdualquat<T, Q> const& q, T const& s)
+ {
+ return tdualquat<T, Q>(q.real * s, q.dual * s);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> operator*(T const& s, tdualquat<T, Q> const& q)
+ {
+ return q * s;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> operator/(tdualquat<T, Q> const& q, T const& s)
+ {
+ return tdualquat<T, Q>(q.real / s, q.dual / s);
+ }
+
+ // -- Boolean operators --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator==(tdualquat<T, Q> const& q1, tdualquat<T, Q> const& q2)
+ {
+ return (q1.real == q2.real) && (q1.dual == q2.dual);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool operator!=(tdualquat<T, Q> const& q1, tdualquat<T, Q> const& q2)
+ {
+ return (q1.real != q2.real) || (q1.dual != q2.dual);
+ }
+
+ // -- Operations --
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> dual_quat_identity()
+ {
+ return tdualquat<T, Q>(
+ qua<T, Q>(static_cast<T>(1), static_cast<T>(0), static_cast<T>(0), static_cast<T>(0)),
+ qua<T, Q>(static_cast<T>(0), static_cast<T>(0), static_cast<T>(0), static_cast<T>(0)));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> normalize(tdualquat<T, Q> const& q)
+ {
+ return q / length(q.real);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> lerp(tdualquat<T, Q> const& x, tdualquat<T, Q> const& y, T const& a)
+ {
+ // Dual Quaternion Linear blend aka DLB:
+ // Lerp is only defined in [0, 1]
+ assert(a >= static_cast<T>(0));
+ assert(a <= static_cast<T>(1));
+ T const k = dot(x.real,y.real) < static_cast<T>(0) ? -a : a;
+ T const one(1);
+ return tdualquat<T, Q>(x * (one - a) + y * k);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> inverse(tdualquat<T, Q> const& q)
+ {
+ const glm::qua<T, Q> real = conjugate(q.real);
+ const glm::qua<T, Q> dual = conjugate(q.dual);
+ return tdualquat<T, Q>(real, dual + (real * (-2.0f * dot(real,dual))));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat2x4_cast(tdualquat<T, Q> const& x)
+ {
+ return mat<2, 4, T, Q>( x[0].x, x[0].y, x[0].z, x[0].w, x[1].x, x[1].y, x[1].z, x[1].w );
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat3x4_cast(tdualquat<T, Q> const& x)
+ {
+ qua<T, Q> r = x.real / length2(x.real);
+
+ qua<T, Q> const rr(r.w * x.real.w, r.x * x.real.x, r.y * x.real.y, r.z * x.real.z);
+ r *= static_cast<T>(2);
+
+ T const xy = r.x * x.real.y;
+ T const xz = r.x * x.real.z;
+ T const yz = r.y * x.real.z;
+ T const wx = r.w * x.real.x;
+ T const wy = r.w * x.real.y;
+ T const wz = r.w * x.real.z;
+
+ vec<4, T, Q> const a(
+ rr.w + rr.x - rr.y - rr.z,
+ xy - wz,
+ xz + wy,
+ -(x.dual.w * r.x - x.dual.x * r.w + x.dual.y * r.z - x.dual.z * r.y));
+
+ vec<4, T, Q> const b(
+ xy + wz,
+ rr.w + rr.y - rr.x - rr.z,
+ yz - wx,
+ -(x.dual.w * r.y - x.dual.x * r.z - x.dual.y * r.w + x.dual.z * r.x));
+
+ vec<4, T, Q> const c(
+ xz - wy,
+ yz + wx,
+ rr.w + rr.z - rr.x - rr.y,
+ -(x.dual.w * r.z + x.dual.x * r.y - x.dual.y * r.x - x.dual.z * r.w));
+
+ return mat<3, 4, T, Q>(a, b, c);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> dualquat_cast(mat<2, 4, T, Q> const& x)
+ {
+ return tdualquat<T, Q>(
+ qua<T, Q>( x[0].w, x[0].x, x[0].y, x[0].z ),
+ qua<T, Q>( x[1].w, x[1].x, x[1].y, x[1].z ));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER tdualquat<T, Q> dualquat_cast(mat<3, 4, T, Q> const& x)
+ {
+ qua<T, Q> real;
+
+ T const trace = x[0].x + x[1].y + x[2].z;
+ if(trace > static_cast<T>(0))
+ {
+ T const r = sqrt(T(1) + trace);
+ T const invr = static_cast<T>(0.5) / r;
+ real.w = static_cast<T>(0.5) * r;
+ real.x = (x[2].y - x[1].z) * invr;
+ real.y = (x[0].z - x[2].x) * invr;
+ real.z = (x[1].x - x[0].y) * invr;
+ }
+ else if(x[0].x > x[1].y && x[0].x > x[2].z)
+ {
+ T const r = sqrt(T(1) + x[0].x - x[1].y - x[2].z);
+ T const invr = static_cast<T>(0.5) / r;
+ real.x = static_cast<T>(0.5)*r;
+ real.y = (x[1].x + x[0].y) * invr;
+ real.z = (x[0].z + x[2].x) * invr;
+ real.w = (x[2].y - x[1].z) * invr;
+ }
+ else if(x[1].y > x[2].z)
+ {
+ T const r = sqrt(T(1) + x[1].y - x[0].x - x[2].z);
+ T const invr = static_cast<T>(0.5) / r;
+ real.x = (x[1].x + x[0].y) * invr;
+ real.y = static_cast<T>(0.5) * r;
+ real.z = (x[2].y + x[1].z) * invr;
+ real.w = (x[0].z - x[2].x) * invr;
+ }
+ else
+ {
+ T const r = sqrt(T(1) + x[2].z - x[0].x - x[1].y);
+ T const invr = static_cast<T>(0.5) / r;
+ real.x = (x[0].z + x[2].x) * invr;
+ real.y = (x[2].y + x[1].z) * invr;
+ real.z = static_cast<T>(0.5) * r;
+ real.w = (x[1].x - x[0].y) * invr;
+ }
+
+ qua<T, Q> dual;
+ dual.x = static_cast<T>(0.5) * ( x[0].w * real.w + x[1].w * real.z - x[2].w * real.y);
+ dual.y = static_cast<T>(0.5) * (-x[0].w * real.z + x[1].w * real.w + x[2].w * real.x);
+ dual.z = static_cast<T>(0.5) * ( x[0].w * real.y - x[1].w * real.x + x[2].w * real.w);
+ dual.w = -static_cast<T>(0.5) * ( x[0].w * real.x + x[1].w * real.y + x[2].w * real.z);
+ return tdualquat<T, Q>(real, dual);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/easing.hpp b/3rdparty/glm/source/glm/gtx/easing.hpp
new file mode 100644
index 0000000..57f3d61
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/easing.hpp
@@ -0,0 +1,219 @@
+/// @ref gtx_easing
+/// @file glm/gtx/easing.hpp
+/// @author Robert Chisholm
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_easing GLM_GTX_easing
+/// @ingroup gtx
+///
+/// Include <glm/gtx/easing.hpp> to use the features of this extension.
+///
+/// Easing functions for animations and transitons
+/// All functions take a parameter x in the range [0.0,1.0]
+///
+/// Based on the AHEasing project of Warren Moore (https://github.com/warrenm/AHEasing)
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/constants.hpp"
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_easing is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_easing extension included")
+# endif
+#endif
+
+namespace glm{
+ /// @addtogroup gtx_easing
+ /// @{
+
+ /// Modelled after the line y = x
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType linearInterpolation(genType const & a);
+
+ /// Modelled after the parabola y = x^2
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quadraticEaseIn(genType const & a);
+
+ /// Modelled after the parabola y = -x^2 + 2x
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quadraticEaseOut(genType const & a);
+
+ /// Modelled after the piecewise quadratic
+ /// y = (1/2)((2x)^2) ; [0, 0.5)
+ /// y = -(1/2)((2x-1)*(2x-3) - 1) ; [0.5, 1]
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quadraticEaseInOut(genType const & a);
+
+ /// Modelled after the cubic y = x^3
+ template <typename genType>
+ GLM_FUNC_DECL genType cubicEaseIn(genType const & a);
+
+ /// Modelled after the cubic y = (x - 1)^3 + 1
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType cubicEaseOut(genType const & a);
+
+ /// Modelled after the piecewise cubic
+ /// y = (1/2)((2x)^3) ; [0, 0.5)
+ /// y = (1/2)((2x-2)^3 + 2) ; [0.5, 1]
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType cubicEaseInOut(genType const & a);
+
+ /// Modelled after the quartic x^4
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quarticEaseIn(genType const & a);
+
+ /// Modelled after the quartic y = 1 - (x - 1)^4
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quarticEaseOut(genType const & a);
+
+ /// Modelled after the piecewise quartic
+ /// y = (1/2)((2x)^4) ; [0, 0.5)
+ /// y = -(1/2)((2x-2)^4 - 2) ; [0.5, 1]
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quarticEaseInOut(genType const & a);
+
+ /// Modelled after the quintic y = x^5
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quinticEaseIn(genType const & a);
+
+ /// Modelled after the quintic y = (x - 1)^5 + 1
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quinticEaseOut(genType const & a);
+
+ /// Modelled after the piecewise quintic
+ /// y = (1/2)((2x)^5) ; [0, 0.5)
+ /// y = (1/2)((2x-2)^5 + 2) ; [0.5, 1]
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType quinticEaseInOut(genType const & a);
+
+ /// Modelled after quarter-cycle of sine wave
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType sineEaseIn(genType const & a);
+
+ /// Modelled after quarter-cycle of sine wave (different phase)
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType sineEaseOut(genType const & a);
+
+ /// Modelled after half sine wave
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType sineEaseInOut(genType const & a);
+
+ /// Modelled after shifted quadrant IV of unit circle
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType circularEaseIn(genType const & a);
+
+ /// Modelled after shifted quadrant II of unit circle
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType circularEaseOut(genType const & a);
+
+ /// Modelled after the piecewise circular function
+ /// y = (1/2)(1 - sqrt(1 - 4x^2)) ; [0, 0.5)
+ /// y = (1/2)(sqrt(-(2x - 3)*(2x - 1)) + 1) ; [0.5, 1]
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType circularEaseInOut(genType const & a);
+
+ /// Modelled after the exponential function y = 2^(10(x - 1))
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType exponentialEaseIn(genType const & a);
+
+ /// Modelled after the exponential function y = -2^(-10x) + 1
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType exponentialEaseOut(genType const & a);
+
+ /// Modelled after the piecewise exponential
+ /// y = (1/2)2^(10(2x - 1)) ; [0,0.5)
+ /// y = -(1/2)*2^(-10(2x - 1))) + 1 ; [0.5,1]
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType exponentialEaseInOut(genType const & a);
+
+ /// Modelled after the damped sine wave y = sin(13pi/2*x)*pow(2, 10 * (x - 1))
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType elasticEaseIn(genType const & a);
+
+ /// Modelled after the damped sine wave y = sin(-13pi/2*(x + 1))*pow(2, -10x) + 1
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType elasticEaseOut(genType const & a);
+
+ /// Modelled after the piecewise exponentially-damped sine wave:
+ /// y = (1/2)*sin(13pi/2*(2*x))*pow(2, 10 * ((2*x) - 1)) ; [0,0.5)
+ /// y = (1/2)*(sin(-13pi/2*((2x-1)+1))*pow(2,-10(2*x-1)) + 2) ; [0.5, 1]
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType elasticEaseInOut(genType const & a);
+
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType backEaseIn(genType const& a);
+
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType backEaseOut(genType const& a);
+
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType backEaseInOut(genType const& a);
+
+ /// @param a parameter
+ /// @param o Optional overshoot modifier
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType backEaseIn(genType const& a, genType const& o);
+
+ /// @param a parameter
+ /// @param o Optional overshoot modifier
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType backEaseOut(genType const& a, genType const& o);
+
+ /// @param a parameter
+ /// @param o Optional overshoot modifier
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType backEaseInOut(genType const& a, genType const& o);
+
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType bounceEaseIn(genType const& a);
+
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType bounceEaseOut(genType const& a);
+
+ /// @see gtx_easing
+ template <typename genType>
+ GLM_FUNC_DECL genType bounceEaseInOut(genType const& a);
+
+ /// @}
+}//namespace glm
+
+#include "easing.inl"
diff --git a/3rdparty/glm/source/glm/gtx/easing.inl b/3rdparty/glm/source/glm/gtx/easing.inl
new file mode 100644
index 0000000..4b7d05b
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/easing.inl
@@ -0,0 +1,436 @@
+/// @ref gtx_easing
+
+#include <cmath>
+
+namespace glm{
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType linearInterpolation(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return a;
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quadraticEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return a * a;
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quadraticEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return -(a * (a - static_cast<genType>(2)));
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quadraticEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a < static_cast<genType>(0.5))
+ {
+ return static_cast<genType>(2) * a * a;
+ }
+ else
+ {
+ return (-static_cast<genType>(2) * a * a) + (4 * a) - one<genType>();
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType cubicEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return a * a * a;
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType cubicEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ genType const f = a - one<genType>();
+ return f * f * f + one<genType>();
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType cubicEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if (a < static_cast<genType>(0.5))
+ {
+ return static_cast<genType>(4) * a * a * a;
+ }
+ else
+ {
+ genType const f = ((static_cast<genType>(2) * a) - static_cast<genType>(2));
+ return static_cast<genType>(0.5) * f * f * f + one<genType>();
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quarticEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return a * a * a * a;
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quarticEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ genType const f = (a - one<genType>());
+ return f * f * f * (one<genType>() - a) + one<genType>();
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quarticEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a < static_cast<genType>(0.5))
+ {
+ return static_cast<genType>(8) * a * a * a * a;
+ }
+ else
+ {
+ genType const f = (a - one<genType>());
+ return -static_cast<genType>(8) * f * f * f * f + one<genType>();
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quinticEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return a * a * a * a * a;
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quinticEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ genType const f = (a - one<genType>());
+ return f * f * f * f * f + one<genType>();
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType quinticEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a < static_cast<genType>(0.5))
+ {
+ return static_cast<genType>(16) * a * a * a * a * a;
+ }
+ else
+ {
+ genType const f = ((static_cast<genType>(2) * a) - static_cast<genType>(2));
+ return static_cast<genType>(0.5) * f * f * f * f * f + one<genType>();
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType sineEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return sin((a - one<genType>()) * half_pi<genType>()) + one<genType>();
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType sineEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return sin(a * half_pi<genType>());
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType sineEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return static_cast<genType>(0.5) * (one<genType>() - cos(a * pi<genType>()));
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType circularEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return one<genType>() - sqrt(one<genType>() - (a * a));
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType circularEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return sqrt((static_cast<genType>(2) - a) * a);
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType circularEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a < static_cast<genType>(0.5))
+ {
+ return static_cast<genType>(0.5) * (one<genType>() - std::sqrt(one<genType>() - static_cast<genType>(4) * (a * a)));
+ }
+ else
+ {
+ return static_cast<genType>(0.5) * (std::sqrt(-((static_cast<genType>(2) * a) - static_cast<genType>(3)) * ((static_cast<genType>(2) * a) - one<genType>())) + one<genType>());
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType exponentialEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a <= zero<genType>())
+ return a;
+ else
+ {
+ genType const Complementary = a - one<genType>();
+ genType const Two = static_cast<genType>(2);
+
+ return glm::pow(Two, Complementary * static_cast<genType>(10));
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType exponentialEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a >= one<genType>())
+ return a;
+ else
+ {
+ return one<genType>() - glm::pow(static_cast<genType>(2), -static_cast<genType>(10) * a);
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType exponentialEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a < static_cast<genType>(0.5))
+ return static_cast<genType>(0.5) * glm::pow(static_cast<genType>(2), (static_cast<genType>(20) * a) - static_cast<genType>(10));
+ else
+ return -static_cast<genType>(0.5) * glm::pow(static_cast<genType>(2), (-static_cast<genType>(20) * a) + static_cast<genType>(10)) + one<genType>();
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType elasticEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return std::sin(static_cast<genType>(13) * half_pi<genType>() * a) * glm::pow(static_cast<genType>(2), static_cast<genType>(10) * (a - one<genType>()));
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType elasticEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return std::sin(-static_cast<genType>(13) * half_pi<genType>() * (a + one<genType>())) * glm::pow(static_cast<genType>(2), -static_cast<genType>(10) * a) + one<genType>();
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType elasticEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a < static_cast<genType>(0.5))
+ return static_cast<genType>(0.5) * std::sin(static_cast<genType>(13) * half_pi<genType>() * (static_cast<genType>(2) * a)) * glm::pow(static_cast<genType>(2), static_cast<genType>(10) * ((static_cast<genType>(2) * a) - one<genType>()));
+ else
+ return static_cast<genType>(0.5) * (std::sin(-static_cast<genType>(13) * half_pi<genType>() * ((static_cast<genType>(2) * a - one<genType>()) + one<genType>())) * glm::pow(static_cast<genType>(2), -static_cast<genType>(10) * (static_cast<genType>(2) * a - one<genType>())) + static_cast<genType>(2));
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a, genType const& o)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ genType z = ((o + one<genType>()) * a) - o;
+ return (a * a * z);
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a, genType const& o)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ genType n = a - one<genType>();
+ genType z = ((o + one<genType>()) * n) + o;
+ return (n * n * z) + one<genType>();
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a, genType const& o)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ genType s = o * static_cast<genType>(1.525);
+ genType x = static_cast<genType>(0.5);
+ genType n = a / static_cast<genType>(0.5);
+
+ if (n < static_cast<genType>(1))
+ {
+ genType z = ((s + static_cast<genType>(1)) * n) - s;
+ genType m = n * n * z;
+ return x * m;
+ }
+ else
+ {
+ n -= static_cast<genType>(2);
+ genType z = ((s + static_cast<genType>(1)) * n) + s;
+ genType m = (n*n*z) + static_cast<genType>(2);
+ return x * m;
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a)
+ {
+ return backEaseIn(a, static_cast<genType>(1.70158));
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a)
+ {
+ return backEaseOut(a, static_cast<genType>(1.70158));
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a)
+ {
+ return backEaseInOut(a, static_cast<genType>(1.70158));
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType bounceEaseOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a < static_cast<genType>(4.0 / 11.0))
+ {
+ return (static_cast<genType>(121) * a * a) / static_cast<genType>(16);
+ }
+ else if(a < static_cast<genType>(8.0 / 11.0))
+ {
+ return (static_cast<genType>(363.0 / 40.0) * a * a) - (static_cast<genType>(99.0 / 10.0) * a) + static_cast<genType>(17.0 / 5.0);
+ }
+ else if(a < static_cast<genType>(9.0 / 10.0))
+ {
+ return (static_cast<genType>(4356.0 / 361.0) * a * a) - (static_cast<genType>(35442.0 / 1805.0) * a) + static_cast<genType>(16061.0 / 1805.0);
+ }
+ else
+ {
+ return (static_cast<genType>(54.0 / 5.0) * a * a) - (static_cast<genType>(513.0 / 25.0) * a) + static_cast<genType>(268.0 / 25.0);
+ }
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType bounceEaseIn(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ return one<genType>() - bounceEaseOut(one<genType>() - a);
+ }
+
+ template <typename genType>
+ GLM_FUNC_QUALIFIER genType bounceEaseInOut(genType const& a)
+ {
+ // Only defined in [0, 1]
+ assert(a >= zero<genType>());
+ assert(a <= one<genType>());
+
+ if(a < static_cast<genType>(0.5))
+ {
+ return static_cast<genType>(0.5) * (one<genType>() - bounceEaseOut(a * static_cast<genType>(2)));
+ }
+ else
+ {
+ return static_cast<genType>(0.5) * bounceEaseOut(a * static_cast<genType>(2) - one<genType>()) + static_cast<genType>(0.5);
+ }
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/euler_angles.hpp b/3rdparty/glm/source/glm/gtx/euler_angles.hpp
new file mode 100644
index 0000000..2723697
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/euler_angles.hpp
@@ -0,0 +1,335 @@
+/// @ref gtx_euler_angles
+/// @file glm/gtx/euler_angles.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_euler_angles GLM_GTX_euler_angles
+/// @ingroup gtx
+///
+/// Include <glm/gtx/euler_angles.hpp> to use the features of this extension.
+///
+/// Build matrices from Euler angles.
+///
+/// Extraction of Euler angles from rotation matrix.
+/// Based on the original paper 2014 Mike Day - Extracting Euler Angles from a Rotation Matrix.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_euler_angles is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_euler_angles extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_euler_angles
+ /// @{
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle X.
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleX(
+ T const& angleX);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Y.
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleY(
+ T const& angleY);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Z.
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZ(
+ T const& angleZ);
+
+ /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about X-axis.
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleX(
+ T const & angleX, T const & angularVelocityX);
+
+ /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Y-axis.
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleY(
+ T const & angleY, T const & angularVelocityY);
+
+ /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Z-axis.
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleZ(
+ T const & angleZ, T const & angularVelocityZ);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXY(
+ T const& angleX,
+ T const& angleY);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYX(
+ T const& angleY,
+ T const& angleX);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZ(
+ T const& angleX,
+ T const& angleZ);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZX(
+ T const& angle,
+ T const& angleX);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZ(
+ T const& angleY,
+ T const& angleZ);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZY(
+ T const& angleZ,
+ T const& angleY);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * Z).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYZ(
+ T const& t1,
+ T const& t2,
+ T const& t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXZ(
+ T const& yaw,
+ T const& pitch,
+ T const& roll);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * X).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZX(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * X).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYX(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Y).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXY(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * Y).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZY(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * Z).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYZ(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Z).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXZ(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * Y).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZY(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * X).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZX(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * X).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYX(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Y).
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXY(
+ T const & t1,
+ T const & t2,
+ T const & t3);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z).
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<4, 4, T, defaultp> yawPitchRoll(
+ T const& yaw,
+ T const& pitch,
+ T const& roll);
+
+ /// Creates a 2D 2 * 2 rotation matrix from an euler angle.
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<2, 2, T, defaultp> orientate2(T const& angle);
+
+ /// Creates a 2D 4 * 4 homogeneous rotation matrix from an euler angle.
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL mat<3, 3, T, defaultp> orientate3(T const& angle);
+
+ /// Creates a 3D 3 * 3 rotation matrix from euler angles (Y * X * Z).
+ /// @see gtx_euler_angles
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> orientate3(vec<3, T, Q> const& angles);
+
+ /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z).
+ /// @see gtx_euler_angles
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> orientate4(vec<3, T, Q> const& angles);
+
+ /// Extracts the (X * Y * Z) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template<typename T>
+ GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (Y * X * Z) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (X * Z * X) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (X * Y * X) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (Y * X * Y) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (Y * Z * Y) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (Z * Y * Z) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (Z * X * Z) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (X * Z * Y) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (Y * Z * X) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (Z * Y * X) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// Extracts the (Z * X * Y) Euler angles from the rotation matrix M
+ /// @see gtx_euler_angles
+ template <typename T>
+ GLM_FUNC_DECL void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3);
+
+ /// @}
+}//namespace glm
+
+#include "euler_angles.inl"
diff --git a/3rdparty/glm/source/glm/gtx/euler_angles.inl b/3rdparty/glm/source/glm/gtx/euler_angles.inl
new file mode 100644
index 0000000..3f13df6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/euler_angles.inl
@@ -0,0 +1,899 @@
+/// @ref gtx_euler_angles
+
+#include "compatibility.hpp" // glm::atan2
+
+namespace glm
+{
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleX
+ (
+ T const& angleX
+ )
+ {
+ T cosX = glm::cos(angleX);
+ T sinX = glm::sin(angleX);
+
+ return mat<4, 4, T, defaultp>(
+ T(1), T(0), T(0), T(0),
+ T(0), cosX, sinX, T(0),
+ T(0),-sinX, cosX, T(0),
+ T(0), T(0), T(0), T(1));
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleY
+ (
+ T const& angleY
+ )
+ {
+ T cosY = glm::cos(angleY);
+ T sinY = glm::sin(angleY);
+
+ return mat<4, 4, T, defaultp>(
+ cosY, T(0), -sinY, T(0),
+ T(0), T(1), T(0), T(0),
+ sinY, T(0), cosY, T(0),
+ T(0), T(0), T(0), T(1));
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZ
+ (
+ T const& angleZ
+ )
+ {
+ T cosZ = glm::cos(angleZ);
+ T sinZ = glm::sin(angleZ);
+
+ return mat<4, 4, T, defaultp>(
+ cosZ, sinZ, T(0), T(0),
+ -sinZ, cosZ, T(0), T(0),
+ T(0), T(0), T(1), T(0),
+ T(0), T(0), T(0), T(1));
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleX
+ (
+ T const & angleX,
+ T const & angularVelocityX
+ )
+ {
+ T cosX = glm::cos(angleX) * angularVelocityX;
+ T sinX = glm::sin(angleX) * angularVelocityX;
+
+ return mat<4, 4, T, defaultp>(
+ T(0), T(0), T(0), T(0),
+ T(0),-sinX, cosX, T(0),
+ T(0),-cosX,-sinX, T(0),
+ T(0), T(0), T(0), T(0));
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleY
+ (
+ T const & angleY,
+ T const & angularVelocityY
+ )
+ {
+ T cosY = glm::cos(angleY) * angularVelocityY;
+ T sinY = glm::sin(angleY) * angularVelocityY;
+
+ return mat<4, 4, T, defaultp>(
+ -sinY, T(0), -cosY, T(0),
+ T(0), T(0), T(0), T(0),
+ cosY, T(0), -sinY, T(0),
+ T(0), T(0), T(0), T(0));
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleZ
+ (
+ T const & angleZ,
+ T const & angularVelocityZ
+ )
+ {
+ T cosZ = glm::cos(angleZ) * angularVelocityZ;
+ T sinZ = glm::sin(angleZ) * angularVelocityZ;
+
+ return mat<4, 4, T, defaultp>(
+ -sinZ, cosZ, T(0), T(0),
+ -cosZ, -sinZ, T(0), T(0),
+ T(0), T(0), T(0), T(0),
+ T(0), T(0), T(0), T(0));
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXY
+ (
+ T const& angleX,
+ T const& angleY
+ )
+ {
+ T cosX = glm::cos(angleX);
+ T sinX = glm::sin(angleX);
+ T cosY = glm::cos(angleY);
+ T sinY = glm::sin(angleY);
+
+ return mat<4, 4, T, defaultp>(
+ cosY, -sinX * -sinY, cosX * -sinY, T(0),
+ T(0), cosX, sinX, T(0),
+ sinY, -sinX * cosY, cosX * cosY, T(0),
+ T(0), T(0), T(0), T(1));
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYX
+ (
+ T const& angleY,
+ T const& angleX
+ )
+ {
+ T cosX = glm::cos(angleX);
+ T sinX = glm::sin(angleX);
+ T cosY = glm::cos(angleY);
+ T sinY = glm::sin(angleY);
+
+ return mat<4, 4, T, defaultp>(
+ cosY, 0, -sinY, T(0),
+ sinY * sinX, cosX, cosY * sinX, T(0),
+ sinY * cosX, -sinX, cosY * cosX, T(0),
+ T(0), T(0), T(0), T(1));
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZ
+ (
+ T const& angleX,
+ T const& angleZ
+ )
+ {
+ return eulerAngleX(angleX) * eulerAngleZ(angleZ);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZX
+ (
+ T const& angleZ,
+ T const& angleX
+ )
+ {
+ return eulerAngleZ(angleZ) * eulerAngleX(angleX);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZ
+ (
+ T const& angleY,
+ T const& angleZ
+ )
+ {
+ return eulerAngleY(angleY) * eulerAngleZ(angleZ);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZY
+ (
+ T const& angleZ,
+ T const& angleY
+ )
+ {
+ return eulerAngleZ(angleZ) * eulerAngleY(angleY);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ
+ (
+ T const& t1,
+ T const& t2,
+ T const& t3
+ )
+ {
+ T c1 = glm::cos(-t1);
+ T c2 = glm::cos(-t2);
+ T c3 = glm::cos(-t3);
+ T s1 = glm::sin(-t1);
+ T s2 = glm::sin(-t2);
+ T s3 = glm::sin(-t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c2 * c3;
+ Result[0][1] =-c1 * s3 + s1 * s2 * c3;
+ Result[0][2] = s1 * s3 + c1 * s2 * c3;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] = c2 * s3;
+ Result[1][1] = c1 * c3 + s1 * s2 * s3;
+ Result[1][2] =-s1 * c3 + c1 * s2 * s3;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] =-s2;
+ Result[2][1] = s1 * c2;
+ Result[2][2] = c1 * c2;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXZ
+ (
+ T const& yaw,
+ T const& pitch,
+ T const& roll
+ )
+ {
+ T tmp_ch = glm::cos(yaw);
+ T tmp_sh = glm::sin(yaw);
+ T tmp_cp = glm::cos(pitch);
+ T tmp_sp = glm::sin(pitch);
+ T tmp_cb = glm::cos(roll);
+ T tmp_sb = glm::sin(roll);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb;
+ Result[0][1] = tmp_sb * tmp_cp;
+ Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb;
+ Result[1][1] = tmp_cb * tmp_cp;
+ Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = tmp_sh * tmp_cp;
+ Result[2][1] = -tmp_sp;
+ Result[2][2] = tmp_ch * tmp_cp;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZX
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c2;
+ Result[0][1] = c1 * s2;
+ Result[0][2] = s1 * s2;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] =-c3 * s2;
+ Result[1][1] = c1 * c2 * c3 - s1 * s3;
+ Result[1][2] = c1 * s3 + c2 * c3 * s1;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = s2 * s3;
+ Result[2][1] =-c3 * s1 - c1 * c2 * s3;
+ Result[2][2] = c1 * c3 - c2 * s1 * s3;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYX
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c2;
+ Result[0][1] = s1 * s2;
+ Result[0][2] =-c1 * s2;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] = s2 * s3;
+ Result[1][1] = c1 * c3 - c2 * s1 * s3;
+ Result[1][2] = c3 * s1 + c1 * c2 * s3;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = c3 * s2;
+ Result[2][1] =-c1 * s3 - c2 * c3 * s1;
+ Result[2][2] = c1 * c2 * c3 - s1 * s3;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXY
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c1 * c3 - c2 * s1 * s3;
+ Result[0][1] = s2* s3;
+ Result[0][2] =-c3 * s1 - c1 * c2 * s3;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] = s1 * s2;
+ Result[1][1] = c2;
+ Result[1][2] = c1 * s2;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = c1 * s3 + c2 * c3 * s1;
+ Result[2][1] =-c3 * s2;
+ Result[2][2] = c1 * c2 * c3 - s1 * s3;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZY
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c1 * c2 * c3 - s1 * s3;
+ Result[0][1] = c3 * s2;
+ Result[0][2] =-c1 * s3 - c2 * c3 * s1;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] =-c1 * s2;
+ Result[1][1] = c2;
+ Result[1][2] = s1 * s2;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = c3 * s1 + c1 * c2 * s3;
+ Result[2][1] = s2 * s3;
+ Result[2][2] = c1 * c3 - c2 * s1 * s3;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYZ
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c1 * c2 * c3 - s1 * s3;
+ Result[0][1] = c1 * s3 + c2 * c3 * s1;
+ Result[0][2] =-c3 * s2;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] =-c3 * s1 - c1 * c2 * s3;
+ Result[1][1] = c1 * c3 - c2 * s1 * s3;
+ Result[1][2] = s2 * s3;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = c1 * s2;
+ Result[2][1] = s1 * s2;
+ Result[2][2] = c2;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXZ
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c1 * c3 - c2 * s1 * s3;
+ Result[0][1] = c3 * s1 + c1 * c2 * s3;
+ Result[0][2] = s2 *s3;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] =-c1 * s3 - c2 * c3 * s1;
+ Result[1][1] = c1 * c2 * c3 - s1 * s3;
+ Result[1][2] = c3 * s2;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = s1 * s2;
+ Result[2][1] =-c1 * s2;
+ Result[2][2] = c2;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZY
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c2 * c3;
+ Result[0][1] = s1 * s3 + c1 * c3 * s2;
+ Result[0][2] = c3 * s1 * s2 - c1 * s3;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] =-s2;
+ Result[1][1] = c1 * c2;
+ Result[1][2] = c2 * s1;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = c2 * s3;
+ Result[2][1] = c1 * s2 * s3 - c3 * s1;
+ Result[2][2] = c1 * c3 + s1 * s2 *s3;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZX
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c1 * c2;
+ Result[0][1] = s2;
+ Result[0][2] =-c2 * s1;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] = s1 * s3 - c1 * c3 * s2;
+ Result[1][1] = c2 * c3;
+ Result[1][2] = c1 * s3 + c3 * s1 * s2;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = c3 * s1 + c1 * s2 * s3;
+ Result[2][1] =-c2 * s3;
+ Result[2][2] = c1 * c3 - s1 * s2 * s3;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYX
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c1 * c2;
+ Result[0][1] = c2 * s1;
+ Result[0][2] =-s2;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] = c1 * s2 * s3 - c3 * s1;
+ Result[1][1] = c1 * c3 + s1 * s2 * s3;
+ Result[1][2] = c2 * s3;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = s1 * s3 + c1 * c3 * s2;
+ Result[2][1] = c3 * s1 * s2 - c1 * s3;
+ Result[2][2] = c2 * c3;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXY
+ (
+ T const & t1,
+ T const & t2,
+ T const & t3
+ )
+ {
+ T c1 = glm::cos(t1);
+ T s1 = glm::sin(t1);
+ T c2 = glm::cos(t2);
+ T s2 = glm::sin(t2);
+ T c3 = glm::cos(t3);
+ T s3 = glm::sin(t3);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = c1 * c3 - s1 * s2 * s3;
+ Result[0][1] = c3 * s1 + c1 * s2 * s3;
+ Result[0][2] =-c2 * s3;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] =-c2 * s1;
+ Result[1][1] = c1 * c2;
+ Result[1][2] = s2;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = c1 * s3 + c3 * s1 * s2;
+ Result[2][1] = s1 * s3 - c1 * c3 * s2;
+ Result[2][2] = c2 * c3;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> yawPitchRoll
+ (
+ T const& yaw,
+ T const& pitch,
+ T const& roll
+ )
+ {
+ T tmp_ch = glm::cos(yaw);
+ T tmp_sh = glm::sin(yaw);
+ T tmp_cp = glm::cos(pitch);
+ T tmp_sp = glm::sin(pitch);
+ T tmp_cb = glm::cos(roll);
+ T tmp_sb = glm::sin(roll);
+
+ mat<4, 4, T, defaultp> Result;
+ Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb;
+ Result[0][1] = tmp_sb * tmp_cp;
+ Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb;
+ Result[0][3] = static_cast<T>(0);
+ Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb;
+ Result[1][1] = tmp_cb * tmp_cp;
+ Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb;
+ Result[1][3] = static_cast<T>(0);
+ Result[2][0] = tmp_sh * tmp_cp;
+ Result[2][1] = -tmp_sp;
+ Result[2][2] = tmp_ch * tmp_cp;
+ Result[2][3] = static_cast<T>(0);
+ Result[3][0] = static_cast<T>(0);
+ Result[3][1] = static_cast<T>(0);
+ Result[3][2] = static_cast<T>(0);
+ Result[3][3] = static_cast<T>(1);
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> orientate2
+ (
+ T const& angle
+ )
+ {
+ T c = glm::cos(angle);
+ T s = glm::sin(angle);
+
+ mat<2, 2, T, defaultp> Result;
+ Result[0][0] = c;
+ Result[0][1] = s;
+ Result[1][0] = -s;
+ Result[1][1] = c;
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> orientate3
+ (
+ T const& angle
+ )
+ {
+ T c = glm::cos(angle);
+ T s = glm::sin(angle);
+
+ mat<3, 3, T, defaultp> Result;
+ Result[0][0] = c;
+ Result[0][1] = s;
+ Result[0][2] = T(0.0);
+ Result[1][0] = -s;
+ Result[1][1] = c;
+ Result[1][2] = T(0.0);
+ Result[2][0] = T(0.0);
+ Result[2][1] = T(0.0);
+ Result[2][2] = T(1.0);
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orientate3
+ (
+ vec<3, T, Q> const& angles
+ )
+ {
+ return mat<3, 3, T, Q>(yawPitchRoll(angles.z, angles.x, angles.y));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientate4
+ (
+ vec<3, T, Q> const& angles
+ )
+ {
+ return yawPitchRoll(angles.z, angles.x, angles.y);
+ }
+
+ template<typename T>
+ GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[2][1], M[2][2]);
+ T C2 = glm::sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]);
+ T T2 = glm::atan2<T, defaultp>(-M[2][0], C2);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]);
+ t1 = -T1;
+ t2 = -T2;
+ t3 = -T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[2][0], M[2][2]);
+ T C2 = glm::sqrt(M[0][1]*M[0][1] + M[1][1]*M[1][1]);
+ T T2 = glm::atan2<T, defaultp>(-M[2][1], C2);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(S1*M[1][2] - C1*M[1][0], C1*M[0][0] - S1*M[0][2]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[0][2], M[0][1]);
+ T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]);
+ T T2 = glm::atan2<T, defaultp>(S2, M[0][0]);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(C1*M[1][2] - S1*M[1][1], C1*M[2][2] - S1*M[2][1]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[0][1], -M[0][2]);
+ T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]);
+ T T2 = glm::atan2<T, defaultp>(S2, M[0][0]);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(-C1*M[2][1] - S1*M[2][2], C1*M[1][1] + S1*M[1][2]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[1][0], M[1][2]);
+ T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]);
+ T T2 = glm::atan2<T, defaultp>(S2, M[1][1]);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(C1*M[2][0] - S1*M[2][2], C1*M[0][0] - S1*M[0][2]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[1][2], -M[1][0]);
+ T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]);
+ T T2 = glm::atan2<T, defaultp>(S2, M[1][1]);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(-S1*M[0][0] - C1*M[0][2], S1*M[2][0] + C1*M[2][2]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[2][1], M[2][0]);
+ T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]);
+ T T2 = glm::atan2<T, defaultp>(S2, M[2][2]);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(C1*M[0][1] - S1*M[0][0], C1*M[1][1] - S1*M[1][0]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[2][0], -M[2][1]);
+ T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]);
+ T T2 = glm::atan2<T, defaultp>(S2, M[2][2]);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(-C1*M[1][0] - S1*M[1][1], C1*M[0][0] + S1*M[0][1]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[1][2], M[1][1]);
+ T C2 = glm::sqrt(M[0][0]*M[0][0] + M[2][0]*M[2][0]);
+ T T2 = glm::atan2<T, defaultp>(-M[1][0], C2);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(S1*M[0][1] - C1*M[0][2], C1*M[2][2] - S1*M[2][1]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(-M[0][2], M[0][0]);
+ T C2 = glm::sqrt(M[1][1]*M[1][1] + M[2][1]*M[2][1]);
+ T T2 = glm::atan2<T, defaultp>(M[0][1], C2);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(S1*M[1][0] + C1*M[1][2], S1*M[2][0] + C1*M[2][2]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(M[0][1], M[0][0]);
+ T C2 = glm::sqrt(M[1][2]*M[1][2] + M[2][2]*M[2][2]);
+ T T2 = glm::atan2<T, defaultp>(-M[0][2], C2);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(S1*M[2][0] - C1*M[2][1], C1*M[1][1] - S1*M[1][0]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M,
+ T & t1,
+ T & t2,
+ T & t3)
+ {
+ T T1 = glm::atan2<T, defaultp>(-M[1][0], M[1][1]);
+ T C2 = glm::sqrt(M[0][2]*M[0][2] + M[2][2]*M[2][2]);
+ T T2 = glm::atan2<T, defaultp>(M[1][2], C2);
+ T S1 = glm::sin(T1);
+ T C1 = glm::cos(T1);
+ T T3 = glm::atan2<T, defaultp>(C1*M[2][0] + S1*M[2][1], C1*M[0][0] + S1*M[0][1]);
+ t1 = T1;
+ t2 = T2;
+ t3 = T3;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/extend.hpp b/3rdparty/glm/source/glm/gtx/extend.hpp
new file mode 100644
index 0000000..28b7c5c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/extend.hpp
@@ -0,0 +1,42 @@
+/// @ref gtx_extend
+/// @file glm/gtx/extend.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_extend GLM_GTX_extend
+/// @ingroup gtx
+///
+/// Include <glm/gtx/extend.hpp> to use the features of this extension.
+///
+/// Extend a position from a source to a position at a defined length.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_extend extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_extend
+ /// @{
+
+ /// Extends of Length the Origin position using the (Source - Origin) direction.
+ /// @see gtx_extend
+ template<typename genType>
+ GLM_FUNC_DECL genType extend(
+ genType const& Origin,
+ genType const& Source,
+ typename genType::value_type const Length);
+
+ /// @}
+}//namespace glm
+
+#include "extend.inl"
diff --git a/3rdparty/glm/source/glm/gtx/extend.inl b/3rdparty/glm/source/glm/gtx/extend.inl
new file mode 100644
index 0000000..32128eb
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/extend.inl
@@ -0,0 +1,48 @@
+/// @ref gtx_extend
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType extend
+ (
+ genType const& Origin,
+ genType const& Source,
+ genType const& Distance
+ )
+ {
+ return Origin + (Source - Origin) * Distance;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, T, Q> extend
+ (
+ vec<2, T, Q> const& Origin,
+ vec<2, T, Q> const& Source,
+ T const& Distance
+ )
+ {
+ return Origin + (Source - Origin) * Distance;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> extend
+ (
+ vec<3, T, Q> const& Origin,
+ vec<3, T, Q> const& Source,
+ T const& Distance
+ )
+ {
+ return Origin + (Source - Origin) * Distance;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> extend
+ (
+ vec<4, T, Q> const& Origin,
+ vec<4, T, Q> const& Source,
+ T const& Distance
+ )
+ {
+ return Origin + (Source - Origin) * Distance;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/extended_min_max.hpp b/3rdparty/glm/source/glm/gtx/extended_min_max.hpp
new file mode 100644
index 0000000..025eda2
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/extended_min_max.hpp
@@ -0,0 +1,137 @@
+/// @ref gtx_extended_min_max
+/// @file glm/gtx/extended_min_max.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_extended_min_max GLM_GTX_extented_min_max
+/// @ingroup gtx
+///
+/// Include <glm/gtx/extented_min_max.hpp> to use the features of this extension.
+///
+/// Min and max functions for 3 to 4 parameters.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../ext/vector_common.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_extented_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_extented_min_max extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_extended_min_max
+ /// @{
+
+ /// Return the minimum component-wise values of 3 inputs
+ /// @see gtx_extented_min_max
+ template<typename T>
+ GLM_FUNC_DECL T min(
+ T const& x,
+ T const& y,
+ T const& z);
+
+ /// Return the minimum component-wise values of 3 inputs
+ /// @see gtx_extented_min_max
+ template<typename T, template<typename> class C>
+ GLM_FUNC_DECL C<T> min(
+ C<T> const& x,
+ typename C<T>::T const& y,
+ typename C<T>::T const& z);
+
+ /// Return the minimum component-wise values of 3 inputs
+ /// @see gtx_extented_min_max
+ template<typename T, template<typename> class C>
+ GLM_FUNC_DECL C<T> min(
+ C<T> const& x,
+ C<T> const& y,
+ C<T> const& z);
+
+ /// Return the minimum component-wise values of 4 inputs
+ /// @see gtx_extented_min_max
+ template<typename T>
+ GLM_FUNC_DECL T min(
+ T const& x,
+ T const& y,
+ T const& z,
+ T const& w);
+
+ /// Return the minimum component-wise values of 4 inputs
+ /// @see gtx_extented_min_max
+ template<typename T, template<typename> class C>
+ GLM_FUNC_DECL C<T> min(
+ C<T> const& x,
+ typename C<T>::T const& y,
+ typename C<T>::T const& z,
+ typename C<T>::T const& w);
+
+ /// Return the minimum component-wise values of 4 inputs
+ /// @see gtx_extented_min_max
+ template<typename T, template<typename> class C>
+ GLM_FUNC_DECL C<T> min(
+ C<T> const& x,
+ C<T> const& y,
+ C<T> const& z,
+ C<T> const& w);
+
+ /// Return the maximum component-wise values of 3 inputs
+ /// @see gtx_extented_min_max
+ template<typename T>
+ GLM_FUNC_DECL T max(
+ T const& x,
+ T const& y,
+ T const& z);
+
+ /// Return the maximum component-wise values of 3 inputs
+ /// @see gtx_extented_min_max
+ template<typename T, template<typename> class C>
+ GLM_FUNC_DECL C<T> max(
+ C<T> const& x,
+ typename C<T>::T const& y,
+ typename C<T>::T const& z);
+
+ /// Return the maximum component-wise values of 3 inputs
+ /// @see gtx_extented_min_max
+ template<typename T, template<typename> class C>
+ GLM_FUNC_DECL C<T> max(
+ C<T> const& x,
+ C<T> const& y,
+ C<T> const& z);
+
+ /// Return the maximum component-wise values of 4 inputs
+ /// @see gtx_extented_min_max
+ template<typename T>
+ GLM_FUNC_DECL T max(
+ T const& x,
+ T const& y,
+ T const& z,
+ T const& w);
+
+ /// Return the maximum component-wise values of 4 inputs
+ /// @see gtx_extented_min_max
+ template<typename T, template<typename> class C>
+ GLM_FUNC_DECL C<T> max(
+ C<T> const& x,
+ typename C<T>::T const& y,
+ typename C<T>::T const& z,
+ typename C<T>::T const& w);
+
+ /// Return the maximum component-wise values of 4 inputs
+ /// @see gtx_extented_min_max
+ template<typename T, template<typename> class C>
+ GLM_FUNC_DECL C<T> max(
+ C<T> const& x,
+ C<T> const& y,
+ C<T> const& z,
+ C<T> const& w);
+
+ /// @}
+}//namespace glm
+
+#include "extended_min_max.inl"
diff --git a/3rdparty/glm/source/glm/gtx/extended_min_max.inl b/3rdparty/glm/source/glm/gtx/extended_min_max.inl
new file mode 100644
index 0000000..de5998f
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/extended_min_max.inl
@@ -0,0 +1,138 @@
+/// @ref gtx_extended_min_max
+
+namespace glm
+{
+ template<typename T>
+ GLM_FUNC_QUALIFIER T min(
+ T const& x,
+ T const& y,
+ T const& z)
+ {
+ return glm::min(glm::min(x, y), z);
+ }
+
+ template<typename T, template<typename> class C>
+ GLM_FUNC_QUALIFIER C<T> min
+ (
+ C<T> const& x,
+ typename C<T>::T const& y,
+ typename C<T>::T const& z
+ )
+ {
+ return glm::min(glm::min(x, y), z);
+ }
+
+ template<typename T, template<typename> class C>
+ GLM_FUNC_QUALIFIER C<T> min
+ (
+ C<T> const& x,
+ C<T> const& y,
+ C<T> const& z
+ )
+ {
+ return glm::min(glm::min(x, y), z);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T min
+ (
+ T const& x,
+ T const& y,
+ T const& z,
+ T const& w
+ )
+ {
+ return glm::min(glm::min(x, y), glm::min(z, w));
+ }
+
+ template<typename T, template<typename> class C>
+ GLM_FUNC_QUALIFIER C<T> min
+ (
+ C<T> const& x,
+ typename C<T>::T const& y,
+ typename C<T>::T const& z,
+ typename C<T>::T const& w
+ )
+ {
+ return glm::min(glm::min(x, y), glm::min(z, w));
+ }
+
+ template<typename T, template<typename> class C>
+ GLM_FUNC_QUALIFIER C<T> min
+ (
+ C<T> const& x,
+ C<T> const& y,
+ C<T> const& z,
+ C<T> const& w
+ )
+ {
+ return glm::min(glm::min(x, y), glm::min(z, w));
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T max(
+ T const& x,
+ T const& y,
+ T const& z)
+ {
+ return glm::max(glm::max(x, y), z);
+ }
+
+ template<typename T, template<typename> class C>
+ GLM_FUNC_QUALIFIER C<T> max
+ (
+ C<T> const& x,
+ typename C<T>::T const& y,
+ typename C<T>::T const& z
+ )
+ {
+ return glm::max(glm::max(x, y), z);
+ }
+
+ template<typename T, template<typename> class C>
+ GLM_FUNC_QUALIFIER C<T> max
+ (
+ C<T> const& x,
+ C<T> const& y,
+ C<T> const& z
+ )
+ {
+ return glm::max(glm::max(x, y), z);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T max
+ (
+ T const& x,
+ T const& y,
+ T const& z,
+ T const& w
+ )
+ {
+ return glm::max(glm::max(x, y), glm::max(z, w));
+ }
+
+ template<typename T, template<typename> class C>
+ GLM_FUNC_QUALIFIER C<T> max
+ (
+ C<T> const& x,
+ typename C<T>::T const& y,
+ typename C<T>::T const& z,
+ typename C<T>::T const& w
+ )
+ {
+ return glm::max(glm::max(x, y), glm::max(z, w));
+ }
+
+ template<typename T, template<typename> class C>
+ GLM_FUNC_QUALIFIER C<T> max
+ (
+ C<T> const& x,
+ C<T> const& y,
+ C<T> const& z,
+ C<T> const& w
+ )
+ {
+ return glm::max(glm::max(x, y), glm::max(z, w));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/exterior_product.hpp b/3rdparty/glm/source/glm/gtx/exterior_product.hpp
new file mode 100644
index 0000000..5522df7
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/exterior_product.hpp
@@ -0,0 +1,45 @@
+/// @ref gtx_exterior_product
+/// @file glm/gtx/exterior_product.hpp
+///
+/// @see core (dependence)
+/// @see gtx_exterior_product (dependence)
+///
+/// @defgroup gtx_exterior_product GLM_GTX_exterior_product
+/// @ingroup gtx
+///
+/// Include <glm/gtx/exterior_product.hpp> to use the features of this extension.
+///
+/// @brief Allow to perform bit operations on integer values
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_exterior_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_exterior_product extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_exterior_product
+ /// @{
+
+ /// Returns the cross product of x and y.
+ ///
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="https://en.wikipedia.org/wiki/Exterior_algebra#Cross_and_triple_products">Exterior product</a>
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T cross(vec<2, T, Q> const& v, vec<2, T, Q> const& u);
+
+ /// @}
+} //namespace glm
+
+#include "exterior_product.inl"
diff --git a/3rdparty/glm/source/glm/gtx/exterior_product.inl b/3rdparty/glm/source/glm/gtx/exterior_product.inl
new file mode 100644
index 0000000..93661fd
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/exterior_product.inl
@@ -0,0 +1,26 @@
+/// @ref gtx_exterior_product
+
+#include <limits>
+
+namespace glm {
+namespace detail
+{
+ template<typename T, qualifier Q, bool Aligned>
+ struct compute_cross_vec2
+ {
+ GLM_FUNC_QUALIFIER static T call(vec<2, T, Q> const& v, vec<2, T, Q> const& u)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'cross' accepts only floating-point inputs");
+
+ return v.x * u.y - u.x * v.y;
+ }
+ };
+}//namespace detail
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T cross(vec<2, T, Q> const& x, vec<2, T, Q> const& y)
+ {
+ return detail::compute_cross_vec2<T, Q, detail::is_aligned<Q>::value>::call(x, y);
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/gtx/fast_exponential.hpp b/3rdparty/glm/source/glm/gtx/fast_exponential.hpp
new file mode 100644
index 0000000..6fb7286
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/fast_exponential.hpp
@@ -0,0 +1,95 @@
+/// @ref gtx_fast_exponential
+/// @file glm/gtx/fast_exponential.hpp
+///
+/// @see core (dependence)
+/// @see gtx_half_float (dependence)
+///
+/// @defgroup gtx_fast_exponential GLM_GTX_fast_exponential
+/// @ingroup gtx
+///
+/// Include <glm/gtx/fast_exponential.hpp> to use the features of this extension.
+///
+/// Fast but less accurate implementations of exponential based functions.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_fast_exponential is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_fast_exponential extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_fast_exponential
+ /// @{
+
+ /// Faster than the common pow function but less accurate.
+ /// @see gtx_fast_exponential
+ template<typename genType>
+ GLM_FUNC_DECL genType fastPow(genType x, genType y);
+
+ /// Faster than the common pow function but less accurate.
+ /// @see gtx_fast_exponential
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastPow(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Faster than the common pow function but less accurate.
+ /// @see gtx_fast_exponential
+ template<typename genTypeT, typename genTypeU>
+ GLM_FUNC_DECL genTypeT fastPow(genTypeT x, genTypeU y);
+
+ /// Faster than the common pow function but less accurate.
+ /// @see gtx_fast_exponential
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastPow(vec<L, T, Q> const& x);
+
+ /// Faster than the common exp function but less accurate.
+ /// @see gtx_fast_exponential
+ template<typename T>
+ GLM_FUNC_DECL T fastExp(T x);
+
+ /// Faster than the common exp function but less accurate.
+ /// @see gtx_fast_exponential
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastExp(vec<L, T, Q> const& x);
+
+ /// Faster than the common log function but less accurate.
+ /// @see gtx_fast_exponential
+ template<typename T>
+ GLM_FUNC_DECL T fastLog(T x);
+
+ /// Faster than the common exp2 function but less accurate.
+ /// @see gtx_fast_exponential
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastLog(vec<L, T, Q> const& x);
+
+ /// Faster than the common exp2 function but less accurate.
+ /// @see gtx_fast_exponential
+ template<typename T>
+ GLM_FUNC_DECL T fastExp2(T x);
+
+ /// Faster than the common exp2 function but less accurate.
+ /// @see gtx_fast_exponential
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastExp2(vec<L, T, Q> const& x);
+
+ /// Faster than the common log2 function but less accurate.
+ /// @see gtx_fast_exponential
+ template<typename T>
+ GLM_FUNC_DECL T fastLog2(T x);
+
+ /// Faster than the common log2 function but less accurate.
+ /// @see gtx_fast_exponential
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastLog2(vec<L, T, Q> const& x);
+
+ /// @}
+}//namespace glm
+
+#include "fast_exponential.inl"
diff --git a/3rdparty/glm/source/glm/gtx/fast_exponential.inl b/3rdparty/glm/source/glm/gtx/fast_exponential.inl
new file mode 100644
index 0000000..5b11742
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/fast_exponential.inl
@@ -0,0 +1,136 @@
+/// @ref gtx_fast_exponential
+
+namespace glm
+{
+ // fastPow:
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastPow(genType x, genType y)
+ {
+ return exp(y * log(x));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastPow(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ return exp(y * log(x));
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastPow(T x, int y)
+ {
+ T f = static_cast<T>(1);
+ for(int i = 0; i < y; ++i)
+ f *= x;
+ return f;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastPow(vec<L, T, Q> const& x, vec<L, int, Q> const& y)
+ {
+ vec<L, T, Q> Result;
+ for(length_t i = 0, n = x.length(); i < n; ++i)
+ Result[i] = fastPow(x[i], y[i]);
+ return Result;
+ }
+
+ // fastExp
+ // Note: This function provides accurate results only for value between -1 and 1, else avoid it.
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastExp(T x)
+ {
+ // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower.
+ // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f))));
+ T x2 = x * x;
+ T x3 = x2 * x;
+ T x4 = x3 * x;
+ T x5 = x4 * x;
+ return T(1) + x + (x2 * T(0.5)) + (x3 * T(0.1666666667)) + (x4 * T(0.041666667)) + (x5 * T(0.008333333333));
+ }
+ /* // Try to handle all values of float... but often shower than std::exp, glm::floor and the loop kill the performance
+ GLM_FUNC_QUALIFIER float fastExp(float x)
+ {
+ const float e = 2.718281828f;
+ const float IntegerPart = floor(x);
+ const float FloatPart = x - IntegerPart;
+ float z = 1.f;
+
+ for(int i = 0; i < int(IntegerPart); ++i)
+ z *= e;
+
+ const float x2 = FloatPart * FloatPart;
+ const float x3 = x2 * FloatPart;
+ const float x4 = x3 * FloatPart;
+ const float x5 = x4 * FloatPart;
+ return z * (1.0f + FloatPart + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f));
+ }
+
+ // Increase accuracy on number bigger that 1 and smaller than -1 but it's not enough for high and negative numbers
+ GLM_FUNC_QUALIFIER float fastExp(float x)
+ {
+ // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower.
+ // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f))));
+ float x2 = x * x;
+ float x3 = x2 * x;
+ float x4 = x3 * x;
+ float x5 = x4 * x;
+ float x6 = x5 * x;
+ float x7 = x6 * x;
+ float x8 = x7 * x;
+ return 1.0f + x + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)+ (x6 * 0.00138888888888f) + (x7 * 0.000198412698f) + (x8 * 0.0000248015873f);;
+ }
+ */
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastExp(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastExp, x);
+ }
+
+ // fastLog
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastLog(genType x)
+ {
+ return std::log(x);
+ }
+
+ /* Slower than the VC7.1 function...
+ GLM_FUNC_QUALIFIER float fastLog(float x)
+ {
+ float y1 = (x - 1.0f) / (x + 1.0f);
+ float y2 = y1 * y1;
+ return 2.0f * y1 * (1.0f + y2 * (0.3333333333f + y2 * (0.2f + y2 * 0.1428571429f)));
+ }
+ */
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastLog(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastLog, x);
+ }
+
+ //fastExp2, ln2 = 0.69314718055994530941723212145818f
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastExp2(genType x)
+ {
+ return fastExp(static_cast<genType>(0.69314718055994530941723212145818) * x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastExp2(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastExp2, x);
+ }
+
+ // fastLog2, ln2 = 0.69314718055994530941723212145818f
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastLog2(genType x)
+ {
+ return fastLog(x) / static_cast<genType>(0.69314718055994530941723212145818);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastLog2(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastLog2, x);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/fast_square_root.hpp b/3rdparty/glm/source/glm/gtx/fast_square_root.hpp
new file mode 100644
index 0000000..ac42a9c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/fast_square_root.hpp
@@ -0,0 +1,98 @@
+/// @ref gtx_fast_square_root
+/// @file glm/gtx/fast_square_root.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_fast_square_root GLM_GTX_fast_square_root
+/// @ingroup gtx
+///
+/// Include <glm/gtx/fast_square_root.hpp> to use the features of this extension.
+///
+/// Fast but less accurate implementations of square root based functions.
+/// - Sqrt optimisation based on Newton's method,
+/// www.gamedev.net/community/forums/topic.asp?topic id=139956
+
+#pragma once
+
+// Dependency:
+#include "../common.hpp"
+#include "../exponential.hpp"
+#include "../geometric.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_fast_square_root is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_fast_square_root extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_fast_square_root
+ /// @{
+
+ /// Faster than the common sqrt function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType fastSqrt(genType x);
+
+ /// Faster than the common sqrt function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastSqrt(vec<L, T, Q> const& x);
+
+ /// Faster than the common inversesqrt function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType fastInverseSqrt(genType x);
+
+ /// Faster than the common inversesqrt function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastInverseSqrt(vec<L, T, Q> const& x);
+
+ /// Faster than the common length function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType fastLength(genType x);
+
+ /// Faster than the common length function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T fastLength(vec<L, T, Q> const& x);
+
+ /// Faster than the common distance function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType fastDistance(genType x, genType y);
+
+ /// Faster than the common distance function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T fastDistance(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Faster than the common normalize function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType fastNormalize(genType x);
+
+ /// Faster than the common normalize function but less accurate.
+ ///
+ /// @see gtx_fast_square_root extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> fastNormalize(vec<L, T, Q> const& x);
+
+ /// @}
+}// namespace glm
+
+#include "fast_square_root.inl"
diff --git a/3rdparty/glm/source/glm/gtx/fast_square_root.inl b/3rdparty/glm/source/glm/gtx/fast_square_root.inl
new file mode 100644
index 0000000..4e6c6de
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/fast_square_root.inl
@@ -0,0 +1,75 @@
+/// @ref gtx_fast_square_root
+
+namespace glm
+{
+ // fastSqrt
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastSqrt(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'fastSqrt' only accept floating-point input");
+
+ return genType(1) / fastInverseSqrt(x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastSqrt(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastSqrt, x);
+ }
+
+ // fastInversesqrt
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastInverseSqrt(genType x)
+ {
+ return detail::compute_inversesqrt<1, genType, lowp, detail::is_aligned<lowp>::value>::call(vec<1, genType, lowp>(x)).x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastInverseSqrt(vec<L, T, Q> const& x)
+ {
+ return detail::compute_inversesqrt<L, T, Q, detail::is_aligned<Q>::value>::call(x);
+ }
+
+ // fastLength
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastLength(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'fastLength' only accept floating-point inputs");
+
+ return abs(x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T fastLength(vec<L, T, Q> const& x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'fastLength' only accept floating-point inputs");
+
+ return fastSqrt(dot(x, x));
+ }
+
+ // fastDistance
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastDistance(genType x, genType y)
+ {
+ return fastLength(y - x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T fastDistance(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ return fastLength(y - x);
+ }
+
+ // fastNormalize
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType fastNormalize(genType x)
+ {
+ return x > genType(0) ? genType(1) : -genType(1);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastNormalize(vec<L, T, Q> const& x)
+ {
+ return x * fastInverseSqrt(dot(x, x));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/fast_trigonometry.hpp b/3rdparty/glm/source/glm/gtx/fast_trigonometry.hpp
new file mode 100644
index 0000000..2650d6e
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/fast_trigonometry.hpp
@@ -0,0 +1,79 @@
+/// @ref gtx_fast_trigonometry
+/// @file glm/gtx/fast_trigonometry.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_fast_trigonometry GLM_GTX_fast_trigonometry
+/// @ingroup gtx
+///
+/// Include <glm/gtx/fast_trigonometry.hpp> to use the features of this extension.
+///
+/// Fast but less accurate implementations of trigonometric functions.
+
+#pragma once
+
+// Dependency:
+#include "../gtc/constants.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_fast_trigonometry is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_fast_trigonometry extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_fast_trigonometry
+ /// @{
+
+ /// Wrap an angle to [0 2pi[
+ /// From GLM_GTX_fast_trigonometry extension.
+ template<typename T>
+ GLM_FUNC_DECL T wrapAngle(T angle);
+
+ /// Faster than the common sin function but less accurate.
+ /// From GLM_GTX_fast_trigonometry extension.
+ template<typename T>
+ GLM_FUNC_DECL T fastSin(T angle);
+
+ /// Faster than the common cos function but less accurate.
+ /// From GLM_GTX_fast_trigonometry extension.
+ template<typename T>
+ GLM_FUNC_DECL T fastCos(T angle);
+
+ /// Faster than the common tan function but less accurate.
+ /// Defined between -2pi and 2pi.
+ /// From GLM_GTX_fast_trigonometry extension.
+ template<typename T>
+ GLM_FUNC_DECL T fastTan(T angle);
+
+ /// Faster than the common asin function but less accurate.
+ /// Defined between -2pi and 2pi.
+ /// From GLM_GTX_fast_trigonometry extension.
+ template<typename T>
+ GLM_FUNC_DECL T fastAsin(T angle);
+
+ /// Faster than the common acos function but less accurate.
+ /// Defined between -2pi and 2pi.
+ /// From GLM_GTX_fast_trigonometry extension.
+ template<typename T>
+ GLM_FUNC_DECL T fastAcos(T angle);
+
+ /// Faster than the common atan function but less accurate.
+ /// Defined between -2pi and 2pi.
+ /// From GLM_GTX_fast_trigonometry extension.
+ template<typename T>
+ GLM_FUNC_DECL T fastAtan(T y, T x);
+
+ /// Faster than the common atan function but less accurate.
+ /// Defined between -2pi and 2pi.
+ /// From GLM_GTX_fast_trigonometry extension.
+ template<typename T>
+ GLM_FUNC_DECL T fastAtan(T angle);
+
+ /// @}
+}//namespace glm
+
+#include "fast_trigonometry.inl"
diff --git a/3rdparty/glm/source/glm/gtx/fast_trigonometry.inl b/3rdparty/glm/source/glm/gtx/fast_trigonometry.inl
new file mode 100644
index 0000000..1a710cb
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/fast_trigonometry.inl
@@ -0,0 +1,142 @@
+/// @ref gtx_fast_trigonometry
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> taylorCos(vec<L, T, Q> const& x)
+ {
+ return static_cast<T>(1)
+ - (x * x) * (1.f / 2.f)
+ + ((x * x) * (x * x)) * (1.f / 24.f)
+ - (((x * x) * (x * x)) * (x * x)) * (1.f / 720.f)
+ + (((x * x) * (x * x)) * ((x * x) * (x * x))) * (1.f / 40320.f);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T cos_52s(T x)
+ {
+ T const xx(x * x);
+ return (T(0.9999932946) + xx * (T(-0.4999124376) + xx * (T(0.0414877472) + xx * T(-0.0012712095))));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> cos_52s(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(cos_52s, x);
+ }
+}//namespace detail
+
+ // wrapAngle
+ template<typename T>
+ GLM_FUNC_QUALIFIER T wrapAngle(T angle)
+ {
+ return abs<T>(mod<T>(angle, two_pi<T>()));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> wrapAngle(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(wrapAngle, x);
+ }
+
+ // cos
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastCos(T x)
+ {
+ T const angle(wrapAngle<T>(x));
+
+ if(angle < half_pi<T>())
+ return detail::cos_52s(angle);
+ if(angle < pi<T>())
+ return -detail::cos_52s(pi<T>() - angle);
+ if(angle < (T(3) * half_pi<T>()))
+ return -detail::cos_52s(angle - pi<T>());
+
+ return detail::cos_52s(two_pi<T>() - angle);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastCos(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastCos, x);
+ }
+
+ // sin
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastSin(T x)
+ {
+ return fastCos<T>(half_pi<T>() - x);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastSin(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastSin, x);
+ }
+
+ // tan
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastTan(T x)
+ {
+ return x + (x * x * x * T(0.3333333333)) + (x * x * x * x * x * T(0.1333333333333)) + (x * x * x * x * x * x * x * T(0.0539682539));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastTan(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastTan, x);
+ }
+
+ // asin
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastAsin(T x)
+ {
+ return x + (x * x * x * T(0.166666667)) + (x * x * x * x * x * T(0.075)) + (x * x * x * x * x * x * x * T(0.0446428571)) + (x * x * x * x * x * x * x * x * x * T(0.0303819444));// + (x * x * x * x * x * x * x * x * x * x * x * T(0.022372159));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastAsin(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastAsin, x);
+ }
+
+ // acos
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastAcos(T x)
+ {
+ return T(1.5707963267948966192313216916398) - fastAsin(x); //(PI / 2)
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastAcos(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastAcos, x);
+ }
+
+ // atan
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastAtan(T y, T x)
+ {
+ T sgn = sign(y) * sign(x);
+ return abs(fastAtan(y / x)) * sgn;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastAtan(vec<L, T, Q> const& y, vec<L, T, Q> const& x)
+ {
+ return detail::functor2<vec, L, T, Q>::call(fastAtan, y, x);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T fastAtan(T x)
+ {
+ return x - (x * x * x * T(0.333333333333)) + (x * x * x * x * x * T(0.2)) - (x * x * x * x * x * x * x * T(0.1428571429)) + (x * x * x * x * x * x * x * x * x * T(0.111111111111)) - (x * x * x * x * x * x * x * x * x * x * x * T(0.0909090909));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> fastAtan(vec<L, T, Q> const& x)
+ {
+ return detail::functor1<vec, L, T, T, Q>::call(fastAtan, x);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/float_notmalize.inl b/3rdparty/glm/source/glm/gtx/float_notmalize.inl
new file mode 100644
index 0000000..8cdbc5a
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/float_notmalize.inl
@@ -0,0 +1,13 @@
+/// @ref gtx_float_normalize
+
+#include <limits>
+
+namespace glm
+{
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, float, Q> floatNormalize(vec<L, T, Q> const& v)
+ {
+ return vec<L, float, Q>(v) / static_cast<float>(std::numeric_limits<T>::max());
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/functions.hpp b/3rdparty/glm/source/glm/gtx/functions.hpp
new file mode 100644
index 0000000..9f4166c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/functions.hpp
@@ -0,0 +1,56 @@
+/// @ref gtx_functions
+/// @file glm/gtx/functions.hpp
+///
+/// @see core (dependence)
+/// @see gtc_quaternion (dependence)
+///
+/// @defgroup gtx_functions GLM_GTX_functions
+/// @ingroup gtx
+///
+/// Include <glm/gtx/functions.hpp> to use the features of this extension.
+///
+/// List of useful common functions.
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+#include "../detail/qualifier.hpp"
+#include "../detail/type_vec2.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_functions is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_functions extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_functions
+ /// @{
+
+ /// 1D gauss function
+ ///
+ /// @see gtc_epsilon
+ template<typename T>
+ GLM_FUNC_DECL T gauss(
+ T x,
+ T ExpectedValue,
+ T StandardDeviation);
+
+ /// 2D gauss function
+ ///
+ /// @see gtc_epsilon
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T gauss(
+ vec<2, T, Q> const& Coord,
+ vec<2, T, Q> const& ExpectedValue,
+ vec<2, T, Q> const& StandardDeviation);
+
+ /// @}
+}//namespace glm
+
+#include "functions.inl"
+
diff --git a/3rdparty/glm/source/glm/gtx/functions.inl b/3rdparty/glm/source/glm/gtx/functions.inl
new file mode 100644
index 0000000..29cbb20
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/functions.inl
@@ -0,0 +1,30 @@
+/// @ref gtx_functions
+
+#include "../exponential.hpp"
+
+namespace glm
+{
+ template<typename T>
+ GLM_FUNC_QUALIFIER T gauss
+ (
+ T x,
+ T ExpectedValue,
+ T StandardDeviation
+ )
+ {
+ return exp(-((x - ExpectedValue) * (x - ExpectedValue)) / (static_cast<T>(2) * StandardDeviation * StandardDeviation)) / (StandardDeviation * sqrt(static_cast<T>(6.28318530717958647692528676655900576)));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T gauss
+ (
+ vec<2, T, Q> const& Coord,
+ vec<2, T, Q> const& ExpectedValue,
+ vec<2, T, Q> const& StandardDeviation
+ )
+ {
+ vec<2, T, Q> const Squared = ((Coord - ExpectedValue) * (Coord - ExpectedValue)) / (static_cast<T>(2) * StandardDeviation * StandardDeviation);
+ return exp(-(Squared.x + Squared.y));
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/gtx/gradient_paint.hpp b/3rdparty/glm/source/glm/gtx/gradient_paint.hpp
new file mode 100644
index 0000000..6f85bf4
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/gradient_paint.hpp
@@ -0,0 +1,53 @@
+/// @ref gtx_gradient_paint
+/// @file glm/gtx/gradient_paint.hpp
+///
+/// @see core (dependence)
+/// @see gtx_optimum_pow (dependence)
+///
+/// @defgroup gtx_gradient_paint GLM_GTX_gradient_paint
+/// @ingroup gtx
+///
+/// Include <glm/gtx/gradient_paint.hpp> to use the features of this extension.
+///
+/// Functions that return the color of procedural gradient for specific coordinates.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtx/optimum_pow.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_gradient_paint is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_gradient_paint extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_gradient_paint
+ /// @{
+
+ /// Return a color from a radial gradient.
+ /// @see - gtx_gradient_paint
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T radialGradient(
+ vec<2, T, Q> const& Center,
+ T const& Radius,
+ vec<2, T, Q> const& Focal,
+ vec<2, T, Q> const& Position);
+
+ /// Return a color from a linear gradient.
+ /// @see - gtx_gradient_paint
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T linearGradient(
+ vec<2, T, Q> const& Point0,
+ vec<2, T, Q> const& Point1,
+ vec<2, T, Q> const& Position);
+
+ /// @}
+}// namespace glm
+
+#include "gradient_paint.inl"
diff --git a/3rdparty/glm/source/glm/gtx/gradient_paint.inl b/3rdparty/glm/source/glm/gtx/gradient_paint.inl
new file mode 100644
index 0000000..4c495e6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/gradient_paint.inl
@@ -0,0 +1,36 @@
+/// @ref gtx_gradient_paint
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T radialGradient
+ (
+ vec<2, T, Q> const& Center,
+ T const& Radius,
+ vec<2, T, Q> const& Focal,
+ vec<2, T, Q> const& Position
+ )
+ {
+ vec<2, T, Q> F = Focal - Center;
+ vec<2, T, Q> D = Position - Focal;
+ T Radius2 = pow2(Radius);
+ T Fx2 = pow2(F.x);
+ T Fy2 = pow2(F.y);
+
+ T Numerator = (D.x * F.x + D.y * F.y) + sqrt(Radius2 * (pow2(D.x) + pow2(D.y)) - pow2(D.x * F.y - D.y * F.x));
+ T Denominator = Radius2 - (Fx2 + Fy2);
+ return Numerator / Denominator;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T linearGradient
+ (
+ vec<2, T, Q> const& Point0,
+ vec<2, T, Q> const& Point1,
+ vec<2, T, Q> const& Position
+ )
+ {
+ vec<2, T, Q> Dist = Point1 - Point0;
+ return (Dist.x * (Position.x - Point0.x) + Dist.y * (Position.y - Point0.y)) / glm::dot(Dist, Dist);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/handed_coordinate_space.hpp b/3rdparty/glm/source/glm/gtx/handed_coordinate_space.hpp
new file mode 100644
index 0000000..3c85968
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/handed_coordinate_space.hpp
@@ -0,0 +1,50 @@
+/// @ref gtx_handed_coordinate_space
+/// @file glm/gtx/handed_coordinate_space.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_handed_coordinate_space GLM_GTX_handed_coordinate_space
+/// @ingroup gtx
+///
+/// Include <glm/gtx/handed_coordinate_system.hpp> to use the features of this extension.
+///
+/// To know if a set of three basis vectors defines a right or left-handed coordinate system.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_handed_coordinate_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_handed_coordinate_space extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_handed_coordinate_space
+ /// @{
+
+ //! Return if a trihedron right handed or not.
+ //! From GLM_GTX_handed_coordinate_space extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool rightHanded(
+ vec<3, T, Q> const& tangent,
+ vec<3, T, Q> const& binormal,
+ vec<3, T, Q> const& normal);
+
+ //! Return if a trihedron left handed or not.
+ //! From GLM_GTX_handed_coordinate_space extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool leftHanded(
+ vec<3, T, Q> const& tangent,
+ vec<3, T, Q> const& binormal,
+ vec<3, T, Q> const& normal);
+
+ /// @}
+}// namespace glm
+
+#include "handed_coordinate_space.inl"
diff --git a/3rdparty/glm/source/glm/gtx/handed_coordinate_space.inl b/3rdparty/glm/source/glm/gtx/handed_coordinate_space.inl
new file mode 100644
index 0000000..e43c17b
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/handed_coordinate_space.inl
@@ -0,0 +1,26 @@
+/// @ref gtx_handed_coordinate_space
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool rightHanded
+ (
+ vec<3, T, Q> const& tangent,
+ vec<3, T, Q> const& binormal,
+ vec<3, T, Q> const& normal
+ )
+ {
+ return dot(cross(normal, tangent), binormal) > T(0);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool leftHanded
+ (
+ vec<3, T, Q> const& tangent,
+ vec<3, T, Q> const& binormal,
+ vec<3, T, Q> const& normal
+ )
+ {
+ return dot(cross(normal, tangent), binormal) < T(0);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/hash.hpp b/3rdparty/glm/source/glm/gtx/hash.hpp
new file mode 100644
index 0000000..05dae9f
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/hash.hpp
@@ -0,0 +1,142 @@
+/// @ref gtx_hash
+/// @file glm/gtx/hash.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_hash GLM_GTX_hash
+/// @ingroup gtx
+///
+/// Include <glm/gtx/hash.hpp> to use the features of this extension.
+///
+/// Add std::hash support for glm types
+
+#pragma once
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_hash is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_hash extension included")
+# endif
+#endif
+
+#include <functional>
+
+#include "../vec2.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+#include "../gtc/vec1.hpp"
+
+#include "../gtc/quaternion.hpp"
+#include "../gtx/dual_quaternion.hpp"
+
+#include "../mat2x2.hpp"
+#include "../mat2x3.hpp"
+#include "../mat2x4.hpp"
+
+#include "../mat3x2.hpp"
+#include "../mat3x3.hpp"
+#include "../mat3x4.hpp"
+
+#include "../mat4x2.hpp"
+#include "../mat4x3.hpp"
+#include "../mat4x4.hpp"
+
+#if !GLM_HAS_CXX11_STL
+# error "GLM_GTX_hash requires C++11 standard library support"
+#endif
+
+namespace std
+{
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::vec<1, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::vec<1, T, Q> const& v) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::vec<2, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::vec<2, T, Q> const& v) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::vec<3, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::vec<3, T, Q> const& v) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::vec<4, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::vec<4, T, Q> const& v) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::qua<T,Q>>
+ {
+ GLM_FUNC_DECL size_t operator()(glm::qua<T, Q> const& q) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::tdualquat<T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::tdualquat<T,Q> const& q) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<2, 2, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<2, 2, T,Q> const& m) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<2, 3, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<2, 3, T,Q> const& m) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<2, 4, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<2, 4, T,Q> const& m) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<3, 2, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<3, 2, T,Q> const& m) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<3, 3, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<3, 3, T,Q> const& m) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<3, 4, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<3, 4, T,Q> const& m) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<4, 2, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<4, 2, T,Q> const& m) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<4, 3, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<4, 3, T,Q> const& m) const;
+ };
+
+ template<typename T, glm::qualifier Q>
+ struct hash<glm::mat<4, 4, T,Q> >
+ {
+ GLM_FUNC_DECL size_t operator()(glm::mat<4, 4, T,Q> const& m) const;
+ };
+} // namespace std
+
+#include "hash.inl"
diff --git a/3rdparty/glm/source/glm/gtx/hash.inl b/3rdparty/glm/source/glm/gtx/hash.inl
new file mode 100644
index 0000000..ff71ca9
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/hash.inl
@@ -0,0 +1,184 @@
+/// @ref gtx_hash
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_hash GLM_GTX_hash
+/// @ingroup gtx
+///
+/// @brief Add std::hash support for glm types
+///
+/// <glm/gtx/hash.inl> need to be included to use the features of this extension.
+
+namespace glm {
+namespace detail
+{
+ GLM_INLINE void hash_combine(size_t &seed, size_t hash)
+ {
+ hash += 0x9e3779b9 + (seed << 6) + (seed >> 2);
+ seed ^= hash;
+ }
+}}
+
+namespace std
+{
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::vec<1, T, Q>>::operator()(glm::vec<1, T, Q> const& v) const
+ {
+ hash<T> hasher;
+ return hasher(v.x);
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::vec<2, T, Q>>::operator()(glm::vec<2, T, Q> const& v) const
+ {
+ size_t seed = 0;
+ hash<T> hasher;
+ glm::detail::hash_combine(seed, hasher(v.x));
+ glm::detail::hash_combine(seed, hasher(v.y));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::vec<3, T, Q>>::operator()(glm::vec<3, T, Q> const& v) const
+ {
+ size_t seed = 0;
+ hash<T> hasher;
+ glm::detail::hash_combine(seed, hasher(v.x));
+ glm::detail::hash_combine(seed, hasher(v.y));
+ glm::detail::hash_combine(seed, hasher(v.z));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::vec<4, T, Q>>::operator()(glm::vec<4, T, Q> const& v) const
+ {
+ size_t seed = 0;
+ hash<T> hasher;
+ glm::detail::hash_combine(seed, hasher(v.x));
+ glm::detail::hash_combine(seed, hasher(v.y));
+ glm::detail::hash_combine(seed, hasher(v.z));
+ glm::detail::hash_combine(seed, hasher(v.w));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::qua<T, Q>>::operator()(glm::qua<T,Q> const& q) const
+ {
+ size_t seed = 0;
+ hash<T> hasher;
+ glm::detail::hash_combine(seed, hasher(q.x));
+ glm::detail::hash_combine(seed, hasher(q.y));
+ glm::detail::hash_combine(seed, hasher(q.z));
+ glm::detail::hash_combine(seed, hasher(q.w));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::tdualquat<T, Q>>::operator()(glm::tdualquat<T, Q> const& q) const
+ {
+ size_t seed = 0;
+ hash<glm::qua<T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(q.real));
+ glm::detail::hash_combine(seed, hasher(q.dual));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<2, 2, T, Q>>::operator()(glm::mat<2, 2, T, Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<2, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<2, 3, T, Q>>::operator()(glm::mat<2, 3, T, Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<3, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<2, 4, T, Q>>::operator()(glm::mat<2, 4, T, Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<4, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<3, 2, T, Q>>::operator()(glm::mat<3, 2, T, Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<2, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ glm::detail::hash_combine(seed, hasher(m[2]));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<3, 3, T, Q>>::operator()(glm::mat<3, 3, T, Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<3, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ glm::detail::hash_combine(seed, hasher(m[2]));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<3, 4, T, Q>>::operator()(glm::mat<3, 4, T, Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<4, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ glm::detail::hash_combine(seed, hasher(m[2]));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<4, 2, T,Q>>::operator()(glm::mat<4, 2, T,Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<2, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ glm::detail::hash_combine(seed, hasher(m[2]));
+ glm::detail::hash_combine(seed, hasher(m[3]));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<4, 3, T,Q>>::operator()(glm::mat<4, 3, T,Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<3, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ glm::detail::hash_combine(seed, hasher(m[2]));
+ glm::detail::hash_combine(seed, hasher(m[3]));
+ return seed;
+ }
+
+ template<typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER size_t hash<glm::mat<4, 4, T,Q>>::operator()(glm::mat<4, 4, T, Q> const& m) const
+ {
+ size_t seed = 0;
+ hash<glm::vec<4, T, Q>> hasher;
+ glm::detail::hash_combine(seed, hasher(m[0]));
+ glm::detail::hash_combine(seed, hasher(m[1]));
+ glm::detail::hash_combine(seed, hasher(m[2]));
+ glm::detail::hash_combine(seed, hasher(m[3]));
+ return seed;
+ }
+}
diff --git a/3rdparty/glm/source/glm/gtx/integer.hpp b/3rdparty/glm/source/glm/gtx/integer.hpp
new file mode 100644
index 0000000..d0b4c61
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/integer.hpp
@@ -0,0 +1,76 @@
+/// @ref gtx_integer
+/// @file glm/gtx/integer.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_integer GLM_GTX_integer
+/// @ingroup gtx
+///
+/// Include <glm/gtx/integer.hpp> to use the features of this extension.
+///
+/// Add support for integer for core functions
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/integer.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_integer is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_integer extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_integer
+ /// @{
+
+ //! Returns x raised to the y power.
+ //! From GLM_GTX_integer extension.
+ GLM_FUNC_DECL int pow(int x, uint y);
+
+ //! Returns the positive square root of x.
+ //! From GLM_GTX_integer extension.
+ GLM_FUNC_DECL int sqrt(int x);
+
+ //! Returns the floor log2 of x.
+ //! From GLM_GTX_integer extension.
+ GLM_FUNC_DECL unsigned int floor_log2(unsigned int x);
+
+ //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y.
+ //! From GLM_GTX_integer extension.
+ GLM_FUNC_DECL int mod(int x, int y);
+
+ //! Return the factorial value of a number (!12 max, integer only)
+ //! From GLM_GTX_integer extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType factorial(genType const& x);
+
+ //! 32bit signed integer.
+ //! From GLM_GTX_integer extension.
+ typedef signed int sint;
+
+ //! Returns x raised to the y power.
+ //! From GLM_GTX_integer extension.
+ GLM_FUNC_DECL uint pow(uint x, uint y);
+
+ //! Returns the positive square root of x.
+ //! From GLM_GTX_integer extension.
+ GLM_FUNC_DECL uint sqrt(uint x);
+
+ //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y.
+ //! From GLM_GTX_integer extension.
+ GLM_FUNC_DECL uint mod(uint x, uint y);
+
+ //! Returns the number of leading zeros.
+ //! From GLM_GTX_integer extension.
+ GLM_FUNC_DECL uint nlz(uint x);
+
+ /// @}
+}//namespace glm
+
+#include "integer.inl"
diff --git a/3rdparty/glm/source/glm/gtx/integer.inl b/3rdparty/glm/source/glm/gtx/integer.inl
new file mode 100644
index 0000000..956366b
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/integer.inl
@@ -0,0 +1,185 @@
+/// @ref gtx_integer
+
+namespace glm
+{
+ // pow
+ GLM_FUNC_QUALIFIER int pow(int x, uint y)
+ {
+ if(y == 0)
+ return x >= 0 ? 1 : -1;
+
+ int result = x;
+ for(uint i = 1; i < y; ++i)
+ result *= x;
+ return result;
+ }
+
+ // sqrt: From Christopher J. Musial, An integer square root, Graphics Gems, 1990, page 387
+ GLM_FUNC_QUALIFIER int sqrt(int x)
+ {
+ if(x <= 1) return x;
+
+ int NextTrial = x >> 1;
+ int CurrentAnswer;
+
+ do
+ {
+ CurrentAnswer = NextTrial;
+ NextTrial = (NextTrial + x / NextTrial) >> 1;
+ } while(NextTrial < CurrentAnswer);
+
+ return CurrentAnswer;
+ }
+
+// Henry Gordon Dietz: http://aggregate.org/MAGIC/
+namespace detail
+{
+ GLM_FUNC_QUALIFIER unsigned int ones32(unsigned int x)
+ {
+ /* 32-bit recursive reduction using SWAR...
+ but first step is mapping 2-bit values
+ into sum of 2 1-bit values in sneaky way
+ */
+ x -= ((x >> 1) & 0x55555555);
+ x = (((x >> 2) & 0x33333333) + (x & 0x33333333));
+ x = (((x >> 4) + x) & 0x0f0f0f0f);
+ x += (x >> 8);
+ x += (x >> 16);
+ return(x & 0x0000003f);
+ }
+}//namespace detail
+
+ // Henry Gordon Dietz: http://aggregate.org/MAGIC/
+/*
+ GLM_FUNC_QUALIFIER unsigned int floor_log2(unsigned int x)
+ {
+ x |= (x >> 1);
+ x |= (x >> 2);
+ x |= (x >> 4);
+ x |= (x >> 8);
+ x |= (x >> 16);
+
+ return _detail::ones32(x) >> 1;
+ }
+*/
+ // mod
+ GLM_FUNC_QUALIFIER int mod(int x, int y)
+ {
+ return ((x % y) + y) % y;
+ }
+
+ // factorial (!12 max, integer only)
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType factorial(genType const& x)
+ {
+ genType Temp = x;
+ genType Result;
+ for(Result = 1; Temp > 1; --Temp)
+ Result *= Temp;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, T, Q> factorial(
+ vec<2, T, Q> const& x)
+ {
+ return vec<2, T, Q>(
+ factorial(x.x),
+ factorial(x.y));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> factorial(
+ vec<3, T, Q> const& x)
+ {
+ return vec<3, T, Q>(
+ factorial(x.x),
+ factorial(x.y),
+ factorial(x.z));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> factorial(
+ vec<4, T, Q> const& x)
+ {
+ return vec<4, T, Q>(
+ factorial(x.x),
+ factorial(x.y),
+ factorial(x.z),
+ factorial(x.w));
+ }
+
+ GLM_FUNC_QUALIFIER uint pow(uint x, uint y)
+ {
+ if (y == 0)
+ return 1u;
+
+ uint result = x;
+ for(uint i = 1; i < y; ++i)
+ result *= x;
+ return result;
+ }
+
+ GLM_FUNC_QUALIFIER uint sqrt(uint x)
+ {
+ if(x <= 1) return x;
+
+ uint NextTrial = x >> 1;
+ uint CurrentAnswer;
+
+ do
+ {
+ CurrentAnswer = NextTrial;
+ NextTrial = (NextTrial + x / NextTrial) >> 1;
+ } while(NextTrial < CurrentAnswer);
+
+ return CurrentAnswer;
+ }
+
+ GLM_FUNC_QUALIFIER uint mod(uint x, uint y)
+ {
+ return x - y * (x / y);
+ }
+
+#if(GLM_COMPILER & (GLM_COMPILER_VC | GLM_COMPILER_GCC))
+
+ GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x)
+ {
+ return 31u - findMSB(x);
+ }
+
+#else
+
+ // Hackers Delight: http://www.hackersdelight.org/HDcode/nlz.c.txt
+ GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x)
+ {
+ int y, m, n;
+
+ y = -int(x >> 16); // If left half of x is 0,
+ m = (y >> 16) & 16; // set n = 16. If left half
+ n = 16 - m; // is nonzero, set n = 0 and
+ x = x >> m; // shift x right 16.
+ // Now x is of the form 0000xxxx.
+ y = x - 0x100; // If positions 8-15 are 0,
+ m = (y >> 16) & 8; // add 8 to n and shift x left 8.
+ n = n + m;
+ x = x << m;
+
+ y = x - 0x1000; // If positions 12-15 are 0,
+ m = (y >> 16) & 4; // add 4 to n and shift x left 4.
+ n = n + m;
+ x = x << m;
+
+ y = x - 0x4000; // If positions 14-15 are 0,
+ m = (y >> 16) & 2; // add 2 to n and shift x left 2.
+ n = n + m;
+ x = x << m;
+
+ y = x >> 14; // Set y = 0, 1, 2, or 3.
+ m = y & ~(y >> 1); // Set m = 0, 1, 2, or 2 resp.
+ return unsigned(n + 2 - m);
+ }
+
+#endif//(GLM_COMPILER)
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/intersect.hpp b/3rdparty/glm/source/glm/gtx/intersect.hpp
new file mode 100644
index 0000000..f5c0621
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/intersect.hpp
@@ -0,0 +1,92 @@
+/// @ref gtx_intersect
+/// @file glm/gtx/intersect.hpp
+///
+/// @see core (dependence)
+/// @see gtx_closest_point (dependence)
+///
+/// @defgroup gtx_intersect GLM_GTX_intersect
+/// @ingroup gtx
+///
+/// Include <glm/gtx/intersect.hpp> to use the features of this extension.
+///
+/// Add intersection functions
+
+#pragma once
+
+// Dependency:
+#include <cfloat>
+#include <limits>
+#include "../glm.hpp"
+#include "../geometric.hpp"
+#include "../gtx/closest_point.hpp"
+#include "../gtx/vector_query.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_closest_point extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_intersect
+ /// @{
+
+ //! Compute the intersection of a ray and a plane.
+ //! Ray direction and plane normal must be unit length.
+ //! From GLM_GTX_intersect extension.
+ template<typename genType>
+ GLM_FUNC_DECL bool intersectRayPlane(
+ genType const& orig, genType const& dir,
+ genType const& planeOrig, genType const& planeNormal,
+ typename genType::value_type & intersectionDistance);
+
+ //! Compute the intersection of a ray and a triangle.
+ /// Based om Tomas Möller implementation http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/
+ //! From GLM_GTX_intersect extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool intersectRayTriangle(
+ vec<3, T, Q> const& orig, vec<3, T, Q> const& dir,
+ vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, vec<3, T, Q> const& v2,
+ vec<2, T, Q>& baryPosition, T& distance);
+
+ //! Compute the intersection of a line and a triangle.
+ //! From GLM_GTX_intersect extension.
+ template<typename genType>
+ GLM_FUNC_DECL bool intersectLineTriangle(
+ genType const& orig, genType const& dir,
+ genType const& vert0, genType const& vert1, genType const& vert2,
+ genType & position);
+
+ //! Compute the intersection distance of a ray and a sphere.
+ //! The ray direction vector is unit length.
+ //! From GLM_GTX_intersect extension.
+ template<typename genType>
+ GLM_FUNC_DECL bool intersectRaySphere(
+ genType const& rayStarting, genType const& rayNormalizedDirection,
+ genType const& sphereCenter, typename genType::value_type const sphereRadiusSquared,
+ typename genType::value_type & intersectionDistance);
+
+ //! Compute the intersection of a ray and a sphere.
+ //! From GLM_GTX_intersect extension.
+ template<typename genType>
+ GLM_FUNC_DECL bool intersectRaySphere(
+ genType const& rayStarting, genType const& rayNormalizedDirection,
+ genType const& sphereCenter, const typename genType::value_type sphereRadius,
+ genType & intersectionPosition, genType & intersectionNormal);
+
+ //! Compute the intersection of a line and a sphere.
+ //! From GLM_GTX_intersect extension
+ template<typename genType>
+ GLM_FUNC_DECL bool intersectLineSphere(
+ genType const& point0, genType const& point1,
+ genType const& sphereCenter, typename genType::value_type sphereRadius,
+ genType & intersectionPosition1, genType & intersectionNormal1,
+ genType & intersectionPosition2 = genType(), genType & intersectionNormal2 = genType());
+
+ /// @}
+}//namespace glm
+
+#include "intersect.inl"
diff --git a/3rdparty/glm/source/glm/gtx/intersect.inl b/3rdparty/glm/source/glm/gtx/intersect.inl
new file mode 100644
index 0000000..d242a61
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/intersect.inl
@@ -0,0 +1,200 @@
+/// @ref gtx_intersect
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER bool intersectRayPlane
+ (
+ genType const& orig, genType const& dir,
+ genType const& planeOrig, genType const& planeNormal,
+ typename genType::value_type & intersectionDistance
+ )
+ {
+ typename genType::value_type d = glm::dot(dir, planeNormal);
+ typename genType::value_type Epsilon = std::numeric_limits<typename genType::value_type>::epsilon();
+
+ if(glm::abs(d) > Epsilon) // if dir and planeNormal are not perpendicular
+ {
+ typename genType::value_type const tmp_intersectionDistance = glm::dot(planeOrig - orig, planeNormal) / d;
+ if (tmp_intersectionDistance > static_cast<typename genType::value_type>(0)) { // allow only intersections
+ intersectionDistance = tmp_intersectionDistance;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool intersectRayTriangle
+ (
+ vec<3, T, Q> const& orig, vec<3, T, Q> const& dir,
+ vec<3, T, Q> const& vert0, vec<3, T, Q> const& vert1, vec<3, T, Q> const& vert2,
+ vec<2, T, Q>& baryPosition, T& distance
+ )
+ {
+ // find vectors for two edges sharing vert0
+ vec<3, T, Q> const edge1 = vert1 - vert0;
+ vec<3, T, Q> const edge2 = vert2 - vert0;
+
+ // begin calculating determinant - also used to calculate U parameter
+ vec<3, T, Q> const p = glm::cross(dir, edge2);
+
+ // if determinant is near zero, ray lies in plane of triangle
+ T const det = glm::dot(edge1, p);
+
+ vec<3, T, Q> Perpendicular(0);
+
+ if(det > std::numeric_limits<T>::epsilon())
+ {
+ // calculate distance from vert0 to ray origin
+ vec<3, T, Q> const dist = orig - vert0;
+
+ // calculate U parameter and test bounds
+ baryPosition.x = glm::dot(dist, p);
+ if(baryPosition.x < static_cast<T>(0) || baryPosition.x > det)
+ return false;
+
+ // prepare to test V parameter
+ Perpendicular = glm::cross(dist, edge1);
+
+ // calculate V parameter and test bounds
+ baryPosition.y = glm::dot(dir, Perpendicular);
+ if((baryPosition.y < static_cast<T>(0)) || ((baryPosition.x + baryPosition.y) > det))
+ return false;
+ }
+ else if(det < -std::numeric_limits<T>::epsilon())
+ {
+ // calculate distance from vert0 to ray origin
+ vec<3, T, Q> const dist = orig - vert0;
+
+ // calculate U parameter and test bounds
+ baryPosition.x = glm::dot(dist, p);
+ if((baryPosition.x > static_cast<T>(0)) || (baryPosition.x < det))
+ return false;
+
+ // prepare to test V parameter
+ Perpendicular = glm::cross(dist, edge1);
+
+ // calculate V parameter and test bounds
+ baryPosition.y = glm::dot(dir, Perpendicular);
+ if((baryPosition.y > static_cast<T>(0)) || (baryPosition.x + baryPosition.y < det))
+ return false;
+ }
+ else
+ return false; // ray is parallel to the plane of the triangle
+
+ T inv_det = static_cast<T>(1) / det;
+
+ // calculate distance, ray intersects triangle
+ distance = glm::dot(edge2, Perpendicular) * inv_det;
+ baryPosition *= inv_det;
+
+ return true;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER bool intersectLineTriangle
+ (
+ genType const& orig, genType const& dir,
+ genType const& vert0, genType const& vert1, genType const& vert2,
+ genType & position
+ )
+ {
+ typename genType::value_type Epsilon = std::numeric_limits<typename genType::value_type>::epsilon();
+
+ genType edge1 = vert1 - vert0;
+ genType edge2 = vert2 - vert0;
+
+ genType Perpendicular = cross(dir, edge2);
+
+ typename genType::value_type det = dot(edge1, Perpendicular);
+
+ if (det > -Epsilon && det < Epsilon)
+ return false;
+ typename genType::value_type inv_det = typename genType::value_type(1) / det;
+
+ genType Tengant = orig - vert0;
+
+ position.y = dot(Tengant, Perpendicular) * inv_det;
+ if (position.y < typename genType::value_type(0) || position.y > typename genType::value_type(1))
+ return false;
+
+ genType Cotengant = cross(Tengant, edge1);
+
+ position.z = dot(dir, Cotengant) * inv_det;
+ if (position.z < typename genType::value_type(0) || position.y + position.z > typename genType::value_type(1))
+ return false;
+
+ position.x = dot(edge2, Cotengant) * inv_det;
+
+ return true;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER bool intersectRaySphere
+ (
+ genType const& rayStarting, genType const& rayNormalizedDirection,
+ genType const& sphereCenter, const typename genType::value_type sphereRadiusSquared,
+ typename genType::value_type & intersectionDistance
+ )
+ {
+ typename genType::value_type Epsilon = std::numeric_limits<typename genType::value_type>::epsilon();
+ genType diff = sphereCenter - rayStarting;
+ typename genType::value_type t0 = dot(diff, rayNormalizedDirection);
+ typename genType::value_type dSquared = dot(diff, diff) - t0 * t0;
+ if( dSquared > sphereRadiusSquared )
+ {
+ return false;
+ }
+ typename genType::value_type t1 = sqrt( sphereRadiusSquared - dSquared );
+ intersectionDistance = t0 > t1 + Epsilon ? t0 - t1 : t0 + t1;
+ return intersectionDistance > Epsilon;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER bool intersectRaySphere
+ (
+ genType const& rayStarting, genType const& rayNormalizedDirection,
+ genType const& sphereCenter, const typename genType::value_type sphereRadius,
+ genType & intersectionPosition, genType & intersectionNormal
+ )
+ {
+ typename genType::value_type distance;
+ if( intersectRaySphere( rayStarting, rayNormalizedDirection, sphereCenter, sphereRadius * sphereRadius, distance ) )
+ {
+ intersectionPosition = rayStarting + rayNormalizedDirection * distance;
+ intersectionNormal = (intersectionPosition - sphereCenter) / sphereRadius;
+ return true;
+ }
+ return false;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER bool intersectLineSphere
+ (
+ genType const& point0, genType const& point1,
+ genType const& sphereCenter, typename genType::value_type sphereRadius,
+ genType & intersectionPoint1, genType & intersectionNormal1,
+ genType & intersectionPoint2, genType & intersectionNormal2
+ )
+ {
+ typename genType::value_type Epsilon = std::numeric_limits<typename genType::value_type>::epsilon();
+ genType dir = normalize(point1 - point0);
+ genType diff = sphereCenter - point0;
+ typename genType::value_type t0 = dot(diff, dir);
+ typename genType::value_type dSquared = dot(diff, diff) - t0 * t0;
+ if( dSquared > sphereRadius * sphereRadius )
+ {
+ return false;
+ }
+ typename genType::value_type t1 = sqrt( sphereRadius * sphereRadius - dSquared );
+ if( t0 < t1 + Epsilon )
+ t1 = -t1;
+ intersectionPoint1 = point0 + dir * (t0 - t1);
+ intersectionNormal1 = (intersectionPoint1 - sphereCenter) / sphereRadius;
+ intersectionPoint2 = point0 + dir * (t0 + t1);
+ intersectionNormal2 = (intersectionPoint2 - sphereCenter) / sphereRadius;
+ return true;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/io.hpp b/3rdparty/glm/source/glm/gtx/io.hpp
new file mode 100644
index 0000000..8d974f0
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/io.hpp
@@ -0,0 +1,201 @@
+/// @ref gtx_io
+/// @file glm/gtx/io.hpp
+/// @author Jan P Springer ([email protected])
+///
+/// @see core (dependence)
+/// @see gtc_matrix_access (dependence)
+/// @see gtc_quaternion (dependence)
+///
+/// @defgroup gtx_io GLM_GTX_io
+/// @ingroup gtx
+///
+/// Include <glm/gtx/io.hpp> to use the features of this extension.
+///
+/// std::[w]ostream support for glm types
+///
+/// std::[w]ostream support for glm types + qualifier/width/etc. manipulators
+/// based on howard hinnant's std::chrono io proposal
+/// [http://home.roadrunner.com/~hinnant/bloomington/chrono_io.html]
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtx/quaternion.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_io is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_io extension included")
+# endif
+#endif
+
+#include <iosfwd> // std::basic_ostream<> (fwd)
+#include <locale> // std::locale, std::locale::facet, std::locale::id
+#include <utility> // std::pair<>
+
+namespace glm
+{
+ /// @addtogroup gtx_io
+ /// @{
+
+ namespace io
+ {
+ enum order_type { column_major, row_major};
+
+ template<typename CTy>
+ class format_punct : public std::locale::facet
+ {
+ typedef CTy char_type;
+
+ public:
+
+ static std::locale::id id;
+
+ bool formatted;
+ unsigned precision;
+ unsigned width;
+ char_type separator;
+ char_type delim_left;
+ char_type delim_right;
+ char_type space;
+ char_type newline;
+ order_type order;
+
+ GLM_FUNC_DECL explicit format_punct(size_t a = 0);
+ GLM_FUNC_DECL explicit format_punct(format_punct const&);
+ };
+
+ template<typename CTy, typename CTr = std::char_traits<CTy> >
+ class basic_state_saver {
+
+ public:
+
+ GLM_FUNC_DECL explicit basic_state_saver(std::basic_ios<CTy,CTr>&);
+ GLM_FUNC_DECL ~basic_state_saver();
+
+ private:
+
+ typedef ::std::basic_ios<CTy,CTr> state_type;
+ typedef typename state_type::char_type char_type;
+ typedef ::std::ios_base::fmtflags flags_type;
+ typedef ::std::streamsize streamsize_type;
+ typedef ::std::locale const locale_type;
+
+ state_type& state_;
+ flags_type flags_;
+ streamsize_type precision_;
+ streamsize_type width_;
+ char_type fill_;
+ locale_type locale_;
+
+ GLM_FUNC_DECL basic_state_saver& operator=(basic_state_saver const&);
+ };
+
+ typedef basic_state_saver<char> state_saver;
+ typedef basic_state_saver<wchar_t> wstate_saver;
+
+ template<typename CTy, typename CTr = std::char_traits<CTy> >
+ class basic_format_saver
+ {
+ public:
+
+ GLM_FUNC_DECL explicit basic_format_saver(std::basic_ios<CTy,CTr>&);
+ GLM_FUNC_DECL ~basic_format_saver();
+
+ private:
+
+ basic_state_saver<CTy> const bss_;
+
+ GLM_FUNC_DECL basic_format_saver& operator=(basic_format_saver const&);
+ };
+
+ typedef basic_format_saver<char> format_saver;
+ typedef basic_format_saver<wchar_t> wformat_saver;
+
+ struct precision
+ {
+ unsigned value;
+
+ GLM_FUNC_DECL explicit precision(unsigned);
+ };
+
+ struct width
+ {
+ unsigned value;
+
+ GLM_FUNC_DECL explicit width(unsigned);
+ };
+
+ template<typename CTy>
+ struct delimeter
+ {
+ CTy value[3];
+
+ GLM_FUNC_DECL explicit delimeter(CTy /* left */, CTy /* right */, CTy /* separator */ = ',');
+ };
+
+ struct order
+ {
+ order_type value;
+
+ GLM_FUNC_DECL explicit order(order_type);
+ };
+
+ // functions, inlined (inline)
+
+ template<typename FTy, typename CTy, typename CTr>
+ FTy const& get_facet(std::basic_ios<CTy,CTr>&);
+ template<typename FTy, typename CTy, typename CTr>
+ std::basic_ios<CTy,CTr>& formatted(std::basic_ios<CTy,CTr>&);
+ template<typename FTy, typename CTy, typename CTr>
+ std::basic_ios<CTy,CTr>& unformattet(std::basic_ios<CTy,CTr>&);
+
+ template<typename CTy, typename CTr>
+ std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>&, precision const&);
+ template<typename CTy, typename CTr>
+ std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>&, width const&);
+ template<typename CTy, typename CTr>
+ std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>&, delimeter<CTy> const&);
+ template<typename CTy, typename CTr>
+ std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>&, order const&);
+ }//namespace io
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, qua<T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, vec<1, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, vec<2, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, vec<3, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, vec<4, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<2, 2, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<2, 3, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<2, 4, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<3, 2, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<3, 3, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<3, 4, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<4, 2, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<4, 3, T, Q> const&);
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<4, 4, T, Q> const&);
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_DECL std::basic_ostream<CTy,CTr> & operator<<(std::basic_ostream<CTy,CTr> &,
+ std::pair<mat<4, 4, T, Q> const, mat<4, 4, T, Q> const> const&);
+
+ /// @}
+}//namespace glm
+
+#include "io.inl"
diff --git a/3rdparty/glm/source/glm/gtx/io.inl b/3rdparty/glm/source/glm/gtx/io.inl
new file mode 100644
index 0000000..a3a1bb6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/io.inl
@@ -0,0 +1,440 @@
+/// @ref gtx_io
+/// @author Jan P Springer ([email protected])
+
+#include <iomanip> // std::fixed, std::setfill<>, std::setprecision, std::right, std::setw
+#include <ostream> // std::basic_ostream<>
+#include "../gtc/matrix_access.hpp" // glm::col, glm::row
+#include "../gtx/type_trait.hpp" // glm::type<>
+
+namespace glm{
+namespace io
+{
+ template<typename CTy>
+ GLM_FUNC_QUALIFIER format_punct<CTy>::format_punct(size_t a)
+ : std::locale::facet(a)
+ , formatted(true)
+ , precision(3)
+ , width(1 + 4 + 1 + precision)
+ , separator(',')
+ , delim_left('[')
+ , delim_right(']')
+ , space(' ')
+ , newline('\n')
+ , order(column_major)
+ {}
+
+ template<typename CTy>
+ GLM_FUNC_QUALIFIER format_punct<CTy>::format_punct(format_punct const& a)
+ : std::locale::facet(0)
+ , formatted(a.formatted)
+ , precision(a.precision)
+ , width(a.width)
+ , separator(a.separator)
+ , delim_left(a.delim_left)
+ , delim_right(a.delim_right)
+ , space(a.space)
+ , newline(a.newline)
+ , order(a.order)
+ {}
+
+ template<typename CTy> std::locale::id format_punct<CTy>::id;
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER basic_state_saver<CTy, CTr>::basic_state_saver(std::basic_ios<CTy, CTr>& a)
+ : state_(a)
+ , flags_(a.flags())
+ , precision_(a.precision())
+ , width_(a.width())
+ , fill_(a.fill())
+ , locale_(a.getloc())
+ {}
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER basic_state_saver<CTy, CTr>::~basic_state_saver()
+ {
+ state_.imbue(locale_);
+ state_.fill(fill_);
+ state_.width(width_);
+ state_.precision(precision_);
+ state_.flags(flags_);
+ }
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER basic_format_saver<CTy, CTr>::basic_format_saver(std::basic_ios<CTy, CTr>& a)
+ : bss_(a)
+ {
+ a.imbue(std::locale(a.getloc(), new format_punct<CTy>(get_facet<format_punct<CTy> >(a))));
+ }
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER
+ basic_format_saver<CTy, CTr>::~basic_format_saver()
+ {}
+
+ GLM_FUNC_QUALIFIER precision::precision(unsigned a)
+ : value(a)
+ {}
+
+ GLM_FUNC_QUALIFIER width::width(unsigned a)
+ : value(a)
+ {}
+
+ template<typename CTy>
+ GLM_FUNC_QUALIFIER delimeter<CTy>::delimeter(CTy a, CTy b, CTy c)
+ : value()
+ {
+ value[0] = a;
+ value[1] = b;
+ value[2] = c;
+ }
+
+ GLM_FUNC_QUALIFIER order::order(order_type a)
+ : value(a)
+ {}
+
+ template<typename FTy, typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER FTy const& get_facet(std::basic_ios<CTy, CTr>& ios)
+ {
+ if(!std::has_facet<FTy>(ios.getloc()))
+ ios.imbue(std::locale(ios.getloc(), new FTy));
+
+ return std::use_facet<FTy>(ios.getloc());
+ }
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER std::basic_ios<CTy, CTr>& formatted(std::basic_ios<CTy, CTr>& ios)
+ {
+ const_cast<format_punct<CTy>&>(get_facet<format_punct<CTy> >(ios)).formatted = true;
+ return ios;
+ }
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER std::basic_ios<CTy, CTr>& unformatted(std::basic_ios<CTy, CTr>& ios)
+ {
+ const_cast<format_punct<CTy>&>(get_facet<format_punct<CTy> >(ios)).formatted = false;
+ return ios;
+ }
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>& os, precision const& a)
+ {
+ const_cast<format_punct<CTy>&>(get_facet<format_punct<CTy> >(os)).precision = a.value;
+ return os;
+ }
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>& os, width const& a)
+ {
+ const_cast<format_punct<CTy>&>(get_facet<format_punct<CTy> >(os)).width = a.value;
+ return os;
+ }
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>& os, delimeter<CTy> const& a)
+ {
+ format_punct<CTy> & fmt(const_cast<format_punct<CTy>&>(get_facet<format_punct<CTy> >(os)));
+
+ fmt.delim_left = a.value[0];
+ fmt.delim_right = a.value[1];
+ fmt.separator = a.value[2];
+
+ return os;
+ }
+
+ template<typename CTy, typename CTr>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>& os, order const& a)
+ {
+ const_cast<format_punct<CTy>&>(get_facet<format_punct<CTy> >(os)).order = a.value;
+ return os;
+ }
+} // namespace io
+
+namespace detail
+{
+ template<typename CTy, typename CTr, typename V>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy, CTr>&
+ print_vector_on(std::basic_ostream<CTy, CTr>& os, V const& a)
+ {
+ typename std::basic_ostream<CTy, CTr>::sentry const cerberus(os);
+
+ if(cerberus)
+ {
+ io::format_punct<CTy> const& fmt(io::get_facet<io::format_punct<CTy> >(os));
+
+ length_t const& components(type<V>::components);
+
+ if(fmt.formatted)
+ {
+ io::basic_state_saver<CTy> const bss(os);
+
+ os << std::fixed << std::right << std::setprecision(fmt.precision) << std::setfill(fmt.space) << fmt.delim_left;
+
+ for(length_t i(0); i < components; ++i)
+ {
+ os << std::setw(fmt.width) << a[i];
+ if(components-1 != i)
+ os << fmt.separator;
+ }
+
+ os << fmt.delim_right;
+ }
+ else
+ {
+ for(length_t i(0); i < components; ++i)
+ {
+ os << a[i];
+
+ if(components-1 != i)
+ os << fmt.space;
+ }
+ }
+ }
+
+ return os;
+ }
+}//namespace detail
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, qua<T, Q> const& a)
+ {
+ return detail::print_vector_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, vec<1, T, Q> const& a)
+ {
+ return detail::print_vector_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, vec<2, T, Q> const& a)
+ {
+ return detail::print_vector_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, vec<3, T, Q> const& a)
+ {
+ return detail::print_vector_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, vec<4, T, Q> const& a)
+ {
+ return detail::print_vector_on(os, a);
+ }
+
+namespace detail
+{
+ template<typename CTy, typename CTr, template<length_t, length_t, typename, qualifier> class M, length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy, CTr>& print_matrix_on(std::basic_ostream<CTy, CTr>& os, M<C, R, T, Q> const& a)
+ {
+ typename std::basic_ostream<CTy,CTr>::sentry const cerberus(os);
+
+ if(cerberus)
+ {
+ io::format_punct<CTy> const& fmt(io::get_facet<io::format_punct<CTy> >(os));
+
+ length_t const& cols(type<M<C, R, T, Q> >::cols);
+ length_t const& rows(type<M<C, R, T, Q> >::rows);
+
+ if(fmt.formatted)
+ {
+ os << fmt.newline << fmt.delim_left;
+
+ switch(fmt.order)
+ {
+ case io::column_major:
+ {
+ for(length_t i(0); i < rows; ++i)
+ {
+ if (0 != i)
+ os << fmt.space;
+
+ os << row(a, i);
+
+ if(rows-1 != i)
+ os << fmt.newline;
+ }
+ }
+ break;
+
+ case io::row_major:
+ {
+ for(length_t i(0); i < cols; ++i)
+ {
+ if(0 != i)
+ os << fmt.space;
+
+ os << column(a, i);
+
+ if(cols-1 != i)
+ os << fmt.newline;
+ }
+ }
+ break;
+ }
+
+ os << fmt.delim_right;
+ }
+ else
+ {
+ switch (fmt.order)
+ {
+ case io::column_major:
+ {
+ for(length_t i(0); i < cols; ++i)
+ {
+ os << column(a, i);
+
+ if(cols - 1 != i)
+ os << fmt.space;
+ }
+ }
+ break;
+
+ case io::row_major:
+ {
+ for (length_t i(0); i < rows; ++i)
+ {
+ os << row(a, i);
+
+ if (rows-1 != i)
+ os << fmt.space;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ return os;
+ }
+}//namespace detail
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, mat<2, 2, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, mat<2, 3, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, mat<2, 4, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, mat<3, 2, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, mat<3, 3, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr> & operator<<(std::basic_ostream<CTy,CTr>& os, mat<3, 4, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr> & operator<<(std::basic_ostream<CTy,CTr>& os, mat<4, 2, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr> & operator<<(std::basic_ostream<CTy,CTr>& os, mat<4, 3, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy,CTr> & operator<<(std::basic_ostream<CTy,CTr>& os, mat<4, 4, T, Q> const& a)
+ {
+ return detail::print_matrix_on(os, a);
+ }
+
+namespace detail
+{
+ template<typename CTy, typename CTr, template<length_t, length_t, typename, qualifier> class M, length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy, CTr>& print_matrix_pair_on(std::basic_ostream<CTy, CTr>& os, std::pair<M<C, R, T, Q> const, M<C, R, T, Q> const> const& a)
+ {
+ typename std::basic_ostream<CTy,CTr>::sentry const cerberus(os);
+
+ if(cerberus)
+ {
+ io::format_punct<CTy> const& fmt(io::get_facet<io::format_punct<CTy> >(os));
+ M<C, R, T, Q> const& ml(a.first);
+ M<C, R, T, Q> const& mr(a.second);
+ length_t const& cols(type<M<C, R, T, Q> >::cols);
+ length_t const& rows(type<M<C, R, T, Q> >::rows);
+
+ if(fmt.formatted)
+ {
+ os << fmt.newline << fmt.delim_left;
+
+ switch(fmt.order)
+ {
+ case io::column_major:
+ {
+ for(length_t i(0); i < rows; ++i)
+ {
+ if(0 != i)
+ os << fmt.space;
+
+ os << row(ml, i) << ((rows-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << row(mr, i);
+
+ if(rows-1 != i)
+ os << fmt.newline;
+ }
+ }
+ break;
+ case io::row_major:
+ {
+ for(length_t i(0); i < cols; ++i)
+ {
+ if(0 != i)
+ os << fmt.space;
+
+ os << column(ml, i) << ((cols-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << column(mr, i);
+
+ if(cols-1 != i)
+ os << fmt.newline;
+ }
+ }
+ break;
+ }
+
+ os << fmt.delim_right;
+ }
+ else
+ {
+ os << ml << fmt.space << mr;
+ }
+ }
+
+ return os;
+ }
+}//namespace detail
+
+ template<typename CTy, typename CTr, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER std::basic_ostream<CTy, CTr>& operator<<(
+ std::basic_ostream<CTy, CTr> & os,
+ std::pair<mat<4, 4, T, Q> const,
+ mat<4, 4, T, Q> const> const& a)
+ {
+ return detail::print_matrix_pair_on(os, a);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/log_base.hpp b/3rdparty/glm/source/glm/gtx/log_base.hpp
new file mode 100644
index 0000000..ba28c9d
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/log_base.hpp
@@ -0,0 +1,48 @@
+/// @ref gtx_log_base
+/// @file glm/gtx/log_base.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_log_base GLM_GTX_log_base
+/// @ingroup gtx
+///
+/// Include <glm/gtx/log_base.hpp> to use the features of this extension.
+///
+/// Logarithm for any base. base can be a vector or a scalar.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_log_base is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_log_base extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_log_base
+ /// @{
+
+ /// Logarithm for any base.
+ /// From GLM_GTX_log_base.
+ template<typename genType>
+ GLM_FUNC_DECL genType log(
+ genType const& x,
+ genType const& base);
+
+ /// Logarithm for any base.
+ /// From GLM_GTX_log_base.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> sign(
+ vec<L, T, Q> const& x,
+ vec<L, T, Q> const& base);
+
+ /// @}
+}//namespace glm
+
+#include "log_base.inl"
diff --git a/3rdparty/glm/source/glm/gtx/log_base.inl b/3rdparty/glm/source/glm/gtx/log_base.inl
new file mode 100644
index 0000000..4bbb8e8
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/log_base.inl
@@ -0,0 +1,16 @@
+/// @ref gtx_log_base
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType log(genType const& x, genType const& base)
+ {
+ return glm::log(x) / glm::log(base);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, T, Q> log(vec<L, T, Q> const& x, vec<L, T, Q> const& base)
+ {
+ return glm::log(x) / glm::log(base);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/matrix_cross_product.hpp b/3rdparty/glm/source/glm/gtx/matrix_cross_product.hpp
new file mode 100644
index 0000000..1e585f9
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_cross_product.hpp
@@ -0,0 +1,47 @@
+/// @ref gtx_matrix_cross_product
+/// @file glm/gtx/matrix_cross_product.hpp
+///
+/// @see core (dependence)
+/// @see gtx_extented_min_max (dependence)
+///
+/// @defgroup gtx_matrix_cross_product GLM_GTX_matrix_cross_product
+/// @ingroup gtx
+///
+/// Include <glm/gtx/matrix_cross_product.hpp> to use the features of this extension.
+///
+/// Build cross product matrices
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_matrix_cross_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_matrix_cross_product extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_matrix_cross_product
+ /// @{
+
+ //! Build a cross product matrix.
+ //! From GLM_GTX_matrix_cross_product extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> matrixCross3(
+ vec<3, T, Q> const& x);
+
+ //! Build a cross product matrix.
+ //! From GLM_GTX_matrix_cross_product extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> matrixCross4(
+ vec<3, T, Q> const& x);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_cross_product.inl"
diff --git a/3rdparty/glm/source/glm/gtx/matrix_cross_product.inl b/3rdparty/glm/source/glm/gtx/matrix_cross_product.inl
new file mode 100644
index 0000000..3a15397
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_cross_product.inl
@@ -0,0 +1,37 @@
+/// @ref gtx_matrix_cross_product
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> matrixCross3
+ (
+ vec<3, T, Q> const& x
+ )
+ {
+ mat<3, 3, T, Q> Result(T(0));
+ Result[0][1] = x.z;
+ Result[1][0] = -x.z;
+ Result[0][2] = -x.y;
+ Result[2][0] = x.y;
+ Result[1][2] = x.x;
+ Result[2][1] = -x.x;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> matrixCross4
+ (
+ vec<3, T, Q> const& x
+ )
+ {
+ mat<4, 4, T, Q> Result(T(0));
+ Result[0][1] = x.z;
+ Result[1][0] = -x.z;
+ Result[0][2] = -x.y;
+ Result[2][0] = x.y;
+ Result[1][2] = x.x;
+ Result[2][1] = -x.x;
+ return Result;
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/matrix_decompose.hpp b/3rdparty/glm/source/glm/gtx/matrix_decompose.hpp
new file mode 100644
index 0000000..acd7a7f
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_decompose.hpp
@@ -0,0 +1,46 @@
+/// @ref gtx_matrix_decompose
+/// @file glm/gtx/matrix_decompose.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_matrix_decompose GLM_GTX_matrix_decompose
+/// @ingroup gtx
+///
+/// Include <glm/gtx/matrix_decompose.hpp> to use the features of this extension.
+///
+/// Decomposes a model matrix to translations, rotation and scale components
+
+#pragma once
+
+// Dependencies
+#include "../mat4x4.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+#include "../geometric.hpp"
+#include "../gtc/quaternion.hpp"
+#include "../gtc/matrix_transform.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_matrix_decompose is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_matrix_decompose extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_matrix_decompose
+ /// @{
+
+ /// Decomposes a model matrix to translations, rotation and scale components
+ /// @see gtx_matrix_decompose
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool decompose(
+ mat<4, 4, T, Q> const& modelMatrix,
+ vec<3, T, Q> & scale, qua<T, Q> & orientation, vec<3, T, Q> & translation, vec<3, T, Q> & skew, vec<4, T, Q> & perspective);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_decompose.inl"
diff --git a/3rdparty/glm/source/glm/gtx/matrix_decompose.inl b/3rdparty/glm/source/glm/gtx/matrix_decompose.inl
new file mode 100644
index 0000000..aa4386a
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_decompose.inl
@@ -0,0 +1,192 @@
+/// @ref gtx_matrix_decompose
+
+#include "../gtc/constants.hpp"
+#include "../gtc/epsilon.hpp"
+
+namespace glm{
+namespace detail
+{
+ /// Make a linear combination of two vectors and return the result.
+ // result = (a * ascl) + (b * bscl)
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> combine(
+ vec<3, T, Q> const& a,
+ vec<3, T, Q> const& b,
+ T ascl, T bscl)
+ {
+ return (a * ascl) + (b * bscl);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> scale(vec<3, T, Q> const& v, T desiredLength)
+ {
+ return v * desiredLength / length(v);
+ }
+}//namespace detail
+
+ // Matrix decompose
+ // http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp
+ // Decomposes the mode matrix to translations,rotation scale components
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool decompose(mat<4, 4, T, Q> const& ModelMatrix, vec<3, T, Q> & Scale, qua<T, Q> & Orientation, vec<3, T, Q> & Translation, vec<3, T, Q> & Skew, vec<4, T, Q> & Perspective)
+ {
+ mat<4, 4, T, Q> LocalMatrix(ModelMatrix);
+
+ // Normalize the matrix.
+ if(epsilonEqual(LocalMatrix[3][3], static_cast<T>(0), epsilon<T>()))
+ return false;
+
+ for(length_t i = 0; i < 4; ++i)
+ for(length_t j = 0; j < 4; ++j)
+ LocalMatrix[i][j] /= LocalMatrix[3][3];
+
+ // perspectiveMatrix is used to solve for perspective, but it also provides
+ // an easy way to test for singularity of the upper 3x3 component.
+ mat<4, 4, T, Q> PerspectiveMatrix(LocalMatrix);
+
+ for(length_t i = 0; i < 3; i++)
+ PerspectiveMatrix[i][3] = static_cast<T>(0);
+ PerspectiveMatrix[3][3] = static_cast<T>(1);
+
+ /// TODO: Fixme!
+ if(epsilonEqual(determinant(PerspectiveMatrix), static_cast<T>(0), epsilon<T>()))
+ return false;
+
+ // First, isolate perspective. This is the messiest.
+ if(
+ epsilonNotEqual(LocalMatrix[0][3], static_cast<T>(0), epsilon<T>()) ||
+ epsilonNotEqual(LocalMatrix[1][3], static_cast<T>(0), epsilon<T>()) ||
+ epsilonNotEqual(LocalMatrix[2][3], static_cast<T>(0), epsilon<T>()))
+ {
+ // rightHandSide is the right hand side of the equation.
+ vec<4, T, Q> RightHandSide;
+ RightHandSide[0] = LocalMatrix[0][3];
+ RightHandSide[1] = LocalMatrix[1][3];
+ RightHandSide[2] = LocalMatrix[2][3];
+ RightHandSide[3] = LocalMatrix[3][3];
+
+ // Solve the equation by inverting PerspectiveMatrix and multiplying
+ // rightHandSide by the inverse. (This is the easiest way, not
+ // necessarily the best.)
+ mat<4, 4, T, Q> InversePerspectiveMatrix = glm::inverse(PerspectiveMatrix);// inverse(PerspectiveMatrix, inversePerspectiveMatrix);
+ mat<4, 4, T, Q> TransposedInversePerspectiveMatrix = glm::transpose(InversePerspectiveMatrix);// transposeMatrix4(inversePerspectiveMatrix, transposedInversePerspectiveMatrix);
+
+ Perspective = TransposedInversePerspectiveMatrix * RightHandSide;
+ // v4MulPointByMatrix(rightHandSide, transposedInversePerspectiveMatrix, perspectivePoint);
+
+ // Clear the perspective partition
+ LocalMatrix[0][3] = LocalMatrix[1][3] = LocalMatrix[2][3] = static_cast<T>(0);
+ LocalMatrix[3][3] = static_cast<T>(1);
+ }
+ else
+ {
+ // No perspective.
+ Perspective = vec<4, T, Q>(0, 0, 0, 1);
+ }
+
+ // Next take care of translation (easy).
+ Translation = vec<3, T, Q>(LocalMatrix[3]);
+ LocalMatrix[3] = vec<4, T, Q>(0, 0, 0, LocalMatrix[3].w);
+
+ vec<3, T, Q> Row[3], Pdum3;
+
+ // Now get scale and shear.
+ for(length_t i = 0; i < 3; ++i)
+ for(length_t j = 0; j < 3; ++j)
+ Row[i][j] = LocalMatrix[i][j];
+
+ // Compute X scale factor and normalize first row.
+ Scale.x = length(Row[0]);// v3Length(Row[0]);
+
+ Row[0] = detail::scale(Row[0], static_cast<T>(1));
+
+ // Compute XY shear factor and make 2nd row orthogonal to 1st.
+ Skew.z = dot(Row[0], Row[1]);
+ Row[1] = detail::combine(Row[1], Row[0], static_cast<T>(1), -Skew.z);
+
+ // Now, compute Y scale and normalize 2nd row.
+ Scale.y = length(Row[1]);
+ Row[1] = detail::scale(Row[1], static_cast<T>(1));
+ Skew.z /= Scale.y;
+
+ // Compute XZ and YZ shears, orthogonalize 3rd row.
+ Skew.y = glm::dot(Row[0], Row[2]);
+ Row[2] = detail::combine(Row[2], Row[0], static_cast<T>(1), -Skew.y);
+ Skew.x = glm::dot(Row[1], Row[2]);
+ Row[2] = detail::combine(Row[2], Row[1], static_cast<T>(1), -Skew.x);
+
+ // Next, get Z scale and normalize 3rd row.
+ Scale.z = length(Row[2]);
+ Row[2] = detail::scale(Row[2], static_cast<T>(1));
+ Skew.y /= Scale.z;
+ Skew.x /= Scale.z;
+
+ // At this point, the matrix (in rows[]) is orthonormal.
+ // Check for a coordinate system flip. If the determinant
+ // is -1, then negate the matrix and the scaling factors.
+ Pdum3 = cross(Row[1], Row[2]); // v3Cross(row[1], row[2], Pdum3);
+ if(dot(Row[0], Pdum3) < 0)
+ {
+ for(length_t i = 0; i < 3; i++)
+ {
+ Scale[i] *= static_cast<T>(-1);
+ Row[i] *= static_cast<T>(-1);
+ }
+ }
+
+ // Now, get the rotations out, as described in the gem.
+
+ // FIXME - Add the ability to return either quaternions (which are
+ // easier to recompose with) or Euler angles (rx, ry, rz), which
+ // are easier for authors to deal with. The latter will only be useful
+ // when we fix https://bugs.webkit.org/show_bug.cgi?id=23799, so I
+ // will leave the Euler angle code here for now.
+
+ // ret.rotateY = asin(-Row[0][2]);
+ // if (cos(ret.rotateY) != 0) {
+ // ret.rotateX = atan2(Row[1][2], Row[2][2]);
+ // ret.rotateZ = atan2(Row[0][1], Row[0][0]);
+ // } else {
+ // ret.rotateX = atan2(-Row[2][0], Row[1][1]);
+ // ret.rotateZ = 0;
+ // }
+
+ int i, j, k = 0;
+ T root, trace = Row[0].x + Row[1].y + Row[2].z;
+ if(trace > static_cast<T>(0))
+ {
+ root = sqrt(trace + static_cast<T>(1.0));
+ Orientation.w = static_cast<T>(0.5) * root;
+ root = static_cast<T>(0.5) / root;
+ Orientation.x = root * (Row[1].z - Row[2].y);
+ Orientation.y = root * (Row[2].x - Row[0].z);
+ Orientation.z = root * (Row[0].y - Row[1].x);
+ } // End if > 0
+ else
+ {
+ static int Next[3] = {1, 2, 0};
+ i = 0;
+ if(Row[1].y > Row[0].x) i = 1;
+ if(Row[2].z > Row[i][i]) i = 2;
+ j = Next[i];
+ k = Next[j];
+
+# ifdef GLM_FORCE_QUAT_DATA_XYZW
+ int off = 0;
+# else
+ int off = 1;
+# endif
+
+ root = sqrt(Row[i][i] - Row[j][j] - Row[k][k] + static_cast<T>(1.0));
+
+ Orientation[i + off] = static_cast<T>(0.5) * root;
+ root = static_cast<T>(0.5) / root;
+ Orientation[j + off] = root * (Row[i][j] + Row[j][i]);
+ Orientation[k + off] = root * (Row[i][k] + Row[k][i]);
+ Orientation.w = root * (Row[j][k] - Row[k][j]);
+ } // End if <= 0
+
+ return true;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/matrix_factorisation.hpp b/3rdparty/glm/source/glm/gtx/matrix_factorisation.hpp
new file mode 100644
index 0000000..5a975d6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_factorisation.hpp
@@ -0,0 +1,69 @@
+/// @ref gtx_matrix_factorisation
+/// @file glm/gtx/matrix_factorisation.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_matrix_factorisation GLM_GTX_matrix_factorisation
+/// @ingroup gtx
+///
+/// Include <glm/gtx/matrix_factorisation.hpp> to use the features of this extension.
+///
+/// Functions to factor matrices in various forms
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_matrix_factorisation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_matrix_factorisation extension included")
+# endif
+#endif
+
+/*
+Suggestions:
+ - Move helper functions flipud and fliplr to another file: They may be helpful in more general circumstances.
+ - Implement other types of matrix factorisation, such as: QL and LQ, L(D)U, eigendecompositions, etc...
+*/
+
+namespace glm
+{
+ /// @addtogroup gtx_matrix_factorisation
+ /// @{
+
+ /// Flips the matrix rows up and down.
+ ///
+ /// From GLM_GTX_matrix_factorisation extension.
+ template <length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL mat<C, R, T, Q> flipud(mat<C, R, T, Q> const& in);
+
+ /// Flips the matrix columns right and left.
+ ///
+ /// From GLM_GTX_matrix_factorisation extension.
+ template <length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL mat<C, R, T, Q> fliplr(mat<C, R, T, Q> const& in);
+
+ /// Performs QR factorisation of a matrix.
+ /// Returns 2 matrices, q and r, such that the columns of q are orthonormal and span the same subspace than those of the input matrix, r is an upper triangular matrix, and q*r=in.
+ /// Given an n-by-m input matrix, q has dimensions min(n,m)-by-m, and r has dimensions n-by-min(n,m).
+ ///
+ /// From GLM_GTX_matrix_factorisation extension.
+ template <length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL void qr_decompose(mat<C, R, T, Q> const& in, mat<(C < R ? C : R), R, T, Q>& q, mat<C, (C < R ? C : R), T, Q>& r);
+
+ /// Performs RQ factorisation of a matrix.
+ /// Returns 2 matrices, r and q, such that r is an upper triangular matrix, the rows of q are orthonormal and span the same subspace than those of the input matrix, and r*q=in.
+ /// Note that in the context of RQ factorisation, the diagonal is seen as starting in the lower-right corner of the matrix, instead of the usual upper-left.
+ /// Given an n-by-m input matrix, r has dimensions min(n,m)-by-m, and q has dimensions n-by-min(n,m).
+ ///
+ /// From GLM_GTX_matrix_factorisation extension.
+ template <length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL void rq_decompose(mat<C, R, T, Q> const& in, mat<(C < R ? C : R), R, T, Q>& r, mat<C, (C < R ? C : R), T, Q>& q);
+
+ /// @}
+}
+
+#include "matrix_factorisation.inl"
diff --git a/3rdparty/glm/source/glm/gtx/matrix_factorisation.inl b/3rdparty/glm/source/glm/gtx/matrix_factorisation.inl
new file mode 100644
index 0000000..c479b8a
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_factorisation.inl
@@ -0,0 +1,84 @@
+/// @ref gtx_matrix_factorisation
+
+namespace glm
+{
+ template <length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<C, R, T, Q> flipud(mat<C, R, T, Q> const& in)
+ {
+ mat<R, C, T, Q> tin = transpose(in);
+ tin = fliplr(tin);
+ mat<C, R, T, Q> out = transpose(tin);
+
+ return out;
+ }
+
+ template <length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<C, R, T, Q> fliplr(mat<C, R, T, Q> const& in)
+ {
+ mat<C, R, T, Q> out;
+ for (length_t i = 0; i < C; i++)
+ {
+ out[i] = in[(C - i) - 1];
+ }
+
+ return out;
+ }
+
+ template <length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER void qr_decompose(mat<C, R, T, Q> const& in, mat<(C < R ? C : R), R, T, Q>& q, mat<C, (C < R ? C : R), T, Q>& r)
+ {
+ // Uses modified Gram-Schmidt method
+ // Source: https://en.wikipedia.org/wiki/Gram�Schmidt_process
+ // And https://en.wikipedia.org/wiki/QR_decomposition
+
+ //For all the linearly independs columns of the input...
+ // (there can be no more linearly independents columns than there are rows.)
+ for (length_t i = 0; i < (C < R ? C : R); i++)
+ {
+ //Copy in Q the input's i-th column.
+ q[i] = in[i];
+
+ //j = [0,i[
+ // Make that column orthogonal to all the previous ones by substracting to it the non-orthogonal projection of all the previous columns.
+ // Also: Fill the zero elements of R
+ for (length_t j = 0; j < i; j++)
+ {
+ q[i] -= dot(q[i], q[j])*q[j];
+ r[j][i] = 0;
+ }
+
+ //Now, Q i-th column is orthogonal to all the previous columns. Normalize it.
+ q[i] = normalize(q[i]);
+
+ //j = [i,C[
+ //Finally, compute the corresponding coefficients of R by computing the projection of the resulting column on the other columns of the input.
+ for (length_t j = i; j < C; j++)
+ {
+ r[j][i] = dot(in[j], q[i]);
+ }
+ }
+ }
+
+ template <length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER void rq_decompose(mat<C, R, T, Q> const& in, mat<(C < R ? C : R), R, T, Q>& r, mat<C, (C < R ? C : R), T, Q>& q)
+ {
+ // From https://en.wikipedia.org/wiki/QR_decomposition:
+ // The RQ decomposition transforms a matrix A into the product of an upper triangular matrix R (also known as right-triangular) and an orthogonal matrix Q. The only difference from QR decomposition is the order of these matrices.
+ // QR decomposition is Gram�Schmidt orthogonalization of columns of A, started from the first column.
+ // RQ decomposition is Gram�Schmidt orthogonalization of rows of A, started from the last row.
+
+ mat<R, C, T, Q> tin = transpose(in);
+ tin = fliplr(tin);
+
+ mat<R, (C < R ? C : R), T, Q> tr;
+ mat<(C < R ? C : R), C, T, Q> tq;
+ qr_decompose(tin, tq, tr);
+
+ tr = fliplr(tr);
+ r = transpose(tr);
+ r = fliplr(r);
+
+ tq = fliplr(tq);
+ q = transpose(tq);
+ }
+} //namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/matrix_interpolation.hpp b/3rdparty/glm/source/glm/gtx/matrix_interpolation.hpp
new file mode 100644
index 0000000..7d5ad4c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_interpolation.hpp
@@ -0,0 +1,60 @@
+/// @ref gtx_matrix_interpolation
+/// @file glm/gtx/matrix_interpolation.hpp
+/// @author Ghenadii Ursachi ([email protected])
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_matrix_interpolation GLM_GTX_matrix_interpolation
+/// @ingroup gtx
+///
+/// Include <glm/gtx/matrix_interpolation.hpp> to use the features of this extension.
+///
+/// Allows to directly interpolate two matrices.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_matrix_interpolation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_matrix_interpolation extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_matrix_interpolation
+ /// @{
+
+ /// Get the axis and angle of the rotation from a matrix.
+ /// From GLM_GTX_matrix_interpolation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL void axisAngle(
+ mat<4, 4, T, Q> const& Mat, vec<3, T, Q> & Axis, T & Angle);
+
+ /// Build a matrix from axis and angle.
+ /// From GLM_GTX_matrix_interpolation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> axisAngleMatrix(
+ vec<3, T, Q> const& Axis, T const Angle);
+
+ /// Extracts the rotation part of a matrix.
+ /// From GLM_GTX_matrix_interpolation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> extractMatrixRotation(
+ mat<4, 4, T, Q> const& Mat);
+
+ /// Build a interpolation of 4 * 4 matrixes.
+ /// From GLM_GTX_matrix_interpolation extension.
+ /// Warning! works only with rotation and/or translation matrixes, scale will generate unexpected results.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> interpolate(
+ mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const Delta);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_interpolation.inl"
diff --git a/3rdparty/glm/source/glm/gtx/matrix_interpolation.inl b/3rdparty/glm/source/glm/gtx/matrix_interpolation.inl
new file mode 100644
index 0000000..f4ba3a6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_interpolation.inl
@@ -0,0 +1,146 @@
+/// @ref gtx_matrix_interpolation
+
+#include "../ext/scalar_constants.hpp"
+
+#include <limits>
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER void axisAngle(mat<4, 4, T, Q> const& m, vec<3, T, Q>& axis, T& angle)
+ {
+ T const epsilon =
+ std::numeric_limits<T>::epsilon() * static_cast<T>(1e2);
+
+ bool const nearSymmetrical =
+ abs(m[1][0] - m[0][1]) < epsilon &&
+ abs(m[2][0] - m[0][2]) < epsilon &&
+ abs(m[2][1] - m[1][2]) < epsilon;
+
+ if(nearSymmetrical)
+ {
+ bool const nearIdentity =
+ abs(m[1][0] + m[0][1]) < epsilon &&
+ abs(m[2][0] + m[0][2]) < epsilon &&
+ abs(m[2][1] + m[1][2]) < epsilon &&
+ abs(m[0][0] + m[1][1] + m[2][2] - T(3.0)) < epsilon;
+ if (nearIdentity)
+ {
+ angle = static_cast<T>(0.0);
+ axis = vec<3, T, Q>(
+ static_cast<T>(1.0), static_cast<T>(0.0), static_cast<T>(0.0));
+ return;
+ }
+ angle = pi<T>();
+ T xx = (m[0][0] + static_cast<T>(1.0)) * static_cast<T>(0.5);
+ T yy = (m[1][1] + static_cast<T>(1.0)) * static_cast<T>(0.5);
+ T zz = (m[2][2] + static_cast<T>(1.0)) * static_cast<T>(0.5);
+ T xy = (m[1][0] + m[0][1]) * static_cast<T>(0.25);
+ T xz = (m[2][0] + m[0][2]) * static_cast<T>(0.25);
+ T yz = (m[2][1] + m[1][2]) * static_cast<T>(0.25);
+ if((xx > yy) && (xx > zz))
+ {
+ if(xx < epsilon)
+ {
+ axis.x = static_cast<T>(0.0);
+ axis.y = static_cast<T>(0.7071);
+ axis.z = static_cast<T>(0.7071);
+ }
+ else
+ {
+ axis.x = sqrt(xx);
+ axis.y = xy / axis.x;
+ axis.z = xz / axis.x;
+ }
+ }
+ else if (yy > zz)
+ {
+ if(yy < epsilon)
+ {
+ axis.x = static_cast<T>(0.7071);
+ axis.y = static_cast<T>(0.0);
+ axis.z = static_cast<T>(0.7071);
+ }
+ else
+ {
+ axis.y = sqrt(yy);
+ axis.x = xy / axis.y;
+ axis.z = yz / axis.y;
+ }
+ }
+ else
+ {
+ if (zz < epsilon)
+ {
+ axis.x = static_cast<T>(0.7071);
+ axis.y = static_cast<T>(0.7071);
+ axis.z = static_cast<T>(0.0);
+ }
+ else
+ {
+ axis.z = sqrt(zz);
+ axis.x = xz / axis.z;
+ axis.y = yz / axis.z;
+ }
+ }
+ return;
+ }
+
+ T const angleCos = (m[0][0] + m[1][1] + m[2][2] - static_cast<T>(1)) * static_cast<T>(0.5);
+ if(angleCos >= static_cast<T>(1.0))
+ {
+ angle = static_cast<T>(0.0);
+ }
+ else if (angleCos <= static_cast<T>(-1.0))
+ {
+ angle = pi<T>();
+ }
+ else
+ {
+ angle = acos(angleCos);
+ }
+
+ axis = glm::normalize(glm::vec<3, T, Q>(
+ m[1][2] - m[2][1], m[2][0] - m[0][2], m[0][1] - m[1][0]));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> axisAngleMatrix(vec<3, T, Q> const& axis, T const angle)
+ {
+ T c = cos(angle);
+ T s = sin(angle);
+ T t = static_cast<T>(1) - c;
+ vec<3, T, Q> n = normalize(axis);
+
+ return mat<4, 4, T, Q>(
+ t * n.x * n.x + c, t * n.x * n.y + n.z * s, t * n.x * n.z - n.y * s, static_cast<T>(0.0),
+ t * n.x * n.y - n.z * s, t * n.y * n.y + c, t * n.y * n.z + n.x * s, static_cast<T>(0.0),
+ t * n.x * n.z + n.y * s, t * n.y * n.z - n.x * s, t * n.z * n.z + c, static_cast<T>(0.0),
+ static_cast<T>(0.0), static_cast<T>(0.0), static_cast<T>(0.0), static_cast<T>(1.0));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> extractMatrixRotation(mat<4, 4, T, Q> const& m)
+ {
+ return mat<4, 4, T, Q>(
+ m[0][0], m[0][1], m[0][2], static_cast<T>(0.0),
+ m[1][0], m[1][1], m[1][2], static_cast<T>(0.0),
+ m[2][0], m[2][1], m[2][2], static_cast<T>(0.0),
+ static_cast<T>(0.0), static_cast<T>(0.0), static_cast<T>(0.0), static_cast<T>(1.0));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> interpolate(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const delta)
+ {
+ mat<4, 4, T, Q> m1rot = extractMatrixRotation(m1);
+ mat<4, 4, T, Q> dltRotation = m2 * transpose(m1rot);
+ vec<3, T, Q> dltAxis;
+ T dltAngle;
+ axisAngle(dltRotation, dltAxis, dltAngle);
+ mat<4, 4, T, Q> out = axisAngleMatrix(dltAxis, dltAngle * delta) * m1rot;
+ out[3][0] = m1[3][0] + delta * (m2[3][0] - m1[3][0]);
+ out[3][1] = m1[3][1] + delta * (m2[3][1] - m1[3][1]);
+ out[3][2] = m1[3][2] + delta * (m2[3][2] - m1[3][2]);
+ return out;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/matrix_major_storage.hpp b/3rdparty/glm/source/glm/gtx/matrix_major_storage.hpp
new file mode 100644
index 0000000..8c6bc22
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_major_storage.hpp
@@ -0,0 +1,119 @@
+/// @ref gtx_matrix_major_storage
+/// @file glm/gtx/matrix_major_storage.hpp
+///
+/// @see core (dependence)
+/// @see gtx_extented_min_max (dependence)
+///
+/// @defgroup gtx_matrix_major_storage GLM_GTX_matrix_major_storage
+/// @ingroup gtx
+///
+/// Include <glm/gtx/matrix_major_storage.hpp> to use the features of this extension.
+///
+/// Build matrices with specific matrix order, row or column
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_matrix_major_storage is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_matrix_major_storage extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_matrix_major_storage
+ /// @{
+
+ //! Build a row major matrix from row vectors.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2(
+ vec<2, T, Q> const& v1,
+ vec<2, T, Q> const& v2);
+
+ //! Build a row major matrix from other matrix.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2(
+ mat<2, 2, T, Q> const& m);
+
+ //! Build a row major matrix from row vectors.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3(
+ vec<3, T, Q> const& v1,
+ vec<3, T, Q> const& v2,
+ vec<3, T, Q> const& v3);
+
+ //! Build a row major matrix from other matrix.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3(
+ mat<3, 3, T, Q> const& m);
+
+ //! Build a row major matrix from row vectors.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4(
+ vec<4, T, Q> const& v1,
+ vec<4, T, Q> const& v2,
+ vec<4, T, Q> const& v3,
+ vec<4, T, Q> const& v4);
+
+ //! Build a row major matrix from other matrix.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4(
+ mat<4, 4, T, Q> const& m);
+
+ //! Build a column major matrix from column vectors.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2(
+ vec<2, T, Q> const& v1,
+ vec<2, T, Q> const& v2);
+
+ //! Build a column major matrix from other matrix.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2(
+ mat<2, 2, T, Q> const& m);
+
+ //! Build a column major matrix from column vectors.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3(
+ vec<3, T, Q> const& v1,
+ vec<3, T, Q> const& v2,
+ vec<3, T, Q> const& v3);
+
+ //! Build a column major matrix from other matrix.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3(
+ mat<3, 3, T, Q> const& m);
+
+ //! Build a column major matrix from column vectors.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4(
+ vec<4, T, Q> const& v1,
+ vec<4, T, Q> const& v2,
+ vec<4, T, Q> const& v3,
+ vec<4, T, Q> const& v4);
+
+ //! Build a column major matrix from other matrix.
+ //! From GLM_GTX_matrix_major_storage extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4(
+ mat<4, 4, T, Q> const& m);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_major_storage.inl"
diff --git a/3rdparty/glm/source/glm/gtx/matrix_major_storage.inl b/3rdparty/glm/source/glm/gtx/matrix_major_storage.inl
new file mode 100644
index 0000000..279dd34
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_major_storage.inl
@@ -0,0 +1,166 @@
+/// @ref gtx_matrix_major_storage
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2
+ (
+ vec<2, T, Q> const& v1,
+ vec<2, T, Q> const& v2
+ )
+ {
+ mat<2, 2, T, Q> Result;
+ Result[0][0] = v1.x;
+ Result[1][0] = v1.y;
+ Result[0][1] = v2.x;
+ Result[1][1] = v2.y;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2(
+ const mat<2, 2, T, Q>& m)
+ {
+ mat<2, 2, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3(
+ const vec<3, T, Q>& v1,
+ const vec<3, T, Q>& v2,
+ const vec<3, T, Q>& v3)
+ {
+ mat<3, 3, T, Q> Result;
+ Result[0][0] = v1.x;
+ Result[1][0] = v1.y;
+ Result[2][0] = v1.z;
+ Result[0][1] = v2.x;
+ Result[1][1] = v2.y;
+ Result[2][1] = v2.z;
+ Result[0][2] = v3.x;
+ Result[1][2] = v3.y;
+ Result[2][2] = v3.z;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3(
+ const mat<3, 3, T, Q>& m)
+ {
+ mat<3, 3, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[0][2] = m[2][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[1][2] = m[2][1];
+ Result[2][0] = m[0][2];
+ Result[2][1] = m[1][2];
+ Result[2][2] = m[2][2];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4(
+ const vec<4, T, Q>& v1,
+ const vec<4, T, Q>& v2,
+ const vec<4, T, Q>& v3,
+ const vec<4, T, Q>& v4)
+ {
+ mat<4, 4, T, Q> Result;
+ Result[0][0] = v1.x;
+ Result[1][0] = v1.y;
+ Result[2][0] = v1.z;
+ Result[3][0] = v1.w;
+ Result[0][1] = v2.x;
+ Result[1][1] = v2.y;
+ Result[2][1] = v2.z;
+ Result[3][1] = v2.w;
+ Result[0][2] = v3.x;
+ Result[1][2] = v3.y;
+ Result[2][2] = v3.z;
+ Result[3][2] = v3.w;
+ Result[0][3] = v4.x;
+ Result[1][3] = v4.y;
+ Result[2][3] = v4.z;
+ Result[3][3] = v4.w;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4(
+ const mat<4, 4, T, Q>& m)
+ {
+ mat<4, 4, T, Q> Result;
+ Result[0][0] = m[0][0];
+ Result[0][1] = m[1][0];
+ Result[0][2] = m[2][0];
+ Result[0][3] = m[3][0];
+ Result[1][0] = m[0][1];
+ Result[1][1] = m[1][1];
+ Result[1][2] = m[2][1];
+ Result[1][3] = m[3][1];
+ Result[2][0] = m[0][2];
+ Result[2][1] = m[1][2];
+ Result[2][2] = m[2][2];
+ Result[2][3] = m[3][2];
+ Result[3][0] = m[0][3];
+ Result[3][1] = m[1][3];
+ Result[3][2] = m[2][3];
+ Result[3][3] = m[3][3];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2(
+ const vec<2, T, Q>& v1,
+ const vec<2, T, Q>& v2)
+ {
+ return mat<2, 2, T, Q>(v1, v2);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2(
+ const mat<2, 2, T, Q>& m)
+ {
+ return mat<2, 2, T, Q>(m);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3(
+ const vec<3, T, Q>& v1,
+ const vec<3, T, Q>& v2,
+ const vec<3, T, Q>& v3)
+ {
+ return mat<3, 3, T, Q>(v1, v2, v3);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3(
+ const mat<3, 3, T, Q>& m)
+ {
+ return mat<3, 3, T, Q>(m);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4(
+ const vec<4, T, Q>& v1,
+ const vec<4, T, Q>& v2,
+ const vec<4, T, Q>& v3,
+ const vec<4, T, Q>& v4)
+ {
+ return mat<4, 4, T, Q>(v1, v2, v3, v4);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4(
+ const mat<4, 4, T, Q>& m)
+ {
+ return mat<4, 4, T, Q>(m);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/matrix_operation.hpp b/3rdparty/glm/source/glm/gtx/matrix_operation.hpp
new file mode 100644
index 0000000..de6ff1f
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_operation.hpp
@@ -0,0 +1,103 @@
+/// @ref gtx_matrix_operation
+/// @file glm/gtx/matrix_operation.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_matrix_operation GLM_GTX_matrix_operation
+/// @ingroup gtx
+///
+/// Include <glm/gtx/matrix_operation.hpp> to use the features of this extension.
+///
+/// Build diagonal matrices from vectors.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_matrix_operation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_matrix_operation extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_matrix_operation
+ /// @{
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> diagonal2x2(
+ vec<2, T, Q> const& v);
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 3, T, Q> diagonal2x3(
+ vec<2, T, Q> const& v);
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 4, T, Q> diagonal2x4(
+ vec<2, T, Q> const& v);
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 2, T, Q> diagonal3x2(
+ vec<2, T, Q> const& v);
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> diagonal3x3(
+ vec<3, T, Q> const& v);
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 4, T, Q> diagonal3x4(
+ vec<3, T, Q> const& v);
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 2, T, Q> diagonal4x2(
+ vec<2, T, Q> const& v);
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 3, T, Q> diagonal4x3(
+ vec<3, T, Q> const& v);
+
+ //! Build a diagonal matrix.
+ //! From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> diagonal4x4(
+ vec<4, T, Q> const& v);
+
+ /// Build an adjugate matrix.
+ /// From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m);
+
+ /// Build an adjugate matrix.
+ /// From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m);
+
+ /// Build an adjugate matrix.
+ /// From GLM_GTX_matrix_operation extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_operation.inl"
diff --git a/3rdparty/glm/source/glm/gtx/matrix_operation.inl b/3rdparty/glm/source/glm/gtx/matrix_operation.inl
new file mode 100644
index 0000000..a4f4a85
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_operation.inl
@@ -0,0 +1,176 @@
+/// @ref gtx_matrix_operation
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> diagonal2x2
+ (
+ vec<2, T, Q> const& v
+ )
+ {
+ mat<2, 2, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 3, T, Q> diagonal2x3
+ (
+ vec<2, T, Q> const& v
+ )
+ {
+ mat<2, 3, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 4, T, Q> diagonal2x4
+ (
+ vec<2, T, Q> const& v
+ )
+ {
+ mat<2, 4, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 2, T, Q> diagonal3x2
+ (
+ vec<2, T, Q> const& v
+ )
+ {
+ mat<3, 2, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> diagonal3x3
+ (
+ vec<3, T, Q> const& v
+ )
+ {
+ mat<3, 3, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ Result[2][2] = v[2];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 4, T, Q> diagonal3x4
+ (
+ vec<3, T, Q> const& v
+ )
+ {
+ mat<3, 4, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ Result[2][2] = v[2];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> diagonal4x4
+ (
+ vec<4, T, Q> const& v
+ )
+ {
+ mat<4, 4, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ Result[2][2] = v[2];
+ Result[3][3] = v[3];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 3, T, Q> diagonal4x3
+ (
+ vec<3, T, Q> const& v
+ )
+ {
+ mat<4, 3, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ Result[2][2] = v[2];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 2, T, Q> diagonal4x2
+ (
+ vec<2, T, Q> const& v
+ )
+ {
+ mat<4, 2, T, Q> Result(static_cast<T>(1));
+ Result[0][0] = v[0];
+ Result[1][1] = v[1];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m)
+ {
+ return mat<2, 2, T, Q>(
+ +m[1][1], -m[0][1],
+ -m[1][0], +m[0][0]);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m)
+ {
+ T const m00 = determinant(mat<2, 2, T, Q>(m[1][1], m[2][1], m[1][2], m[2][2]));
+ T const m01 = determinant(mat<2, 2, T, Q>(m[0][1], m[2][1], m[0][2], m[2][2]));
+ T const m02 = determinant(mat<2, 2, T, Q>(m[0][1], m[1][1], m[0][2], m[1][2]));
+
+ T const m10 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][2], m[2][2]));
+ T const m11 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][2], m[2][2]));
+ T const m12 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][2], m[1][2]));
+
+ T const m20 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][1], m[2][1]));
+ T const m21 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][1], m[2][1]));
+ T const m22 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][1], m[1][1]));
+
+ return mat<3, 3, T, Q>(
+ +m00, -m01, +m02,
+ -m10, +m11, -m12,
+ +m20, -m21, +m22);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m)
+ {
+ T const m00 = determinant(mat<3, 3, T, Q>(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]));
+ T const m01 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]));
+ T const m02 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]));
+ T const m03 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]));
+
+ T const m10 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]));
+ T const m11 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]));
+ T const m12 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]));
+ T const m13 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]));
+
+ T const m20 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3]));
+ T const m21 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3]));
+ T const m22 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3]));
+ T const m23 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2]));
+
+ T const m30 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3]));
+ T const m31 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3]));
+ T const m32 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3]));
+ T const m33 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]));
+
+ return mat<4, 4, T, Q>(
+ +m00, -m10, +m20, -m30,
+ -m01, +m11, -m21, +m31,
+ +m02, -m12, +m22, -m32,
+ -m03, +m13, -m23, +m33);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/matrix_query.hpp b/3rdparty/glm/source/glm/gtx/matrix_query.hpp
new file mode 100644
index 0000000..8011b2b
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_query.hpp
@@ -0,0 +1,77 @@
+/// @ref gtx_matrix_query
+/// @file glm/gtx/matrix_query.hpp
+///
+/// @see core (dependence)
+/// @see gtx_vector_query (dependence)
+///
+/// @defgroup gtx_matrix_query GLM_GTX_matrix_query
+/// @ingroup gtx
+///
+/// Include <glm/gtx/matrix_query.hpp> to use the features of this extension.
+///
+/// Query to evaluate matrix properties
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtx/vector_query.hpp"
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_matrix_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_matrix_query extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_matrix_query
+ /// @{
+
+ /// Return whether a matrix a null matrix.
+ /// From GLM_GTX_matrix_query extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon);
+
+ /// Return whether a matrix a null matrix.
+ /// From GLM_GTX_matrix_query extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon);
+
+ /// Return whether a matrix is a null matrix.
+ /// From GLM_GTX_matrix_query extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon);
+
+ /// Return whether a matrix is an identity matrix.
+ /// From GLM_GTX_matrix_query extension.
+ template<length_t C, length_t R, typename T, qualifier Q, template<length_t, length_t, typename, qualifier> class matType>
+ GLM_FUNC_DECL bool isIdentity(matType<C, R, T, Q> const& m, T const& epsilon);
+
+ /// Return whether a matrix is a normalized matrix.
+ /// From GLM_GTX_matrix_query extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon);
+
+ /// Return whether a matrix is a normalized matrix.
+ /// From GLM_GTX_matrix_query extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon);
+
+ /// Return whether a matrix is a normalized matrix.
+ /// From GLM_GTX_matrix_query extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon);
+
+ /// Return whether a matrix is an orthonormalized matrix.
+ /// From GLM_GTX_matrix_query extension.
+ template<length_t C, length_t R, typename T, qualifier Q, template<length_t, length_t, typename, qualifier> class matType>
+ GLM_FUNC_DECL bool isOrthogonal(matType<C, R, T, Q> const& m, T const& epsilon);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_query.inl"
diff --git a/3rdparty/glm/source/glm/gtx/matrix_query.inl b/3rdparty/glm/source/glm/gtx/matrix_query.inl
new file mode 100644
index 0000000..b763c1a
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_query.inl
@@ -0,0 +1,113 @@
+/// @ref gtx_matrix_query
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon)
+ {
+ bool result = true;
+ for(length_t i = 0; result && i < m.length() ; ++i)
+ result = isNull(m[i], epsilon);
+ return result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon)
+ {
+ bool result = true;
+ for(length_t i = 0; result && i < m.length() ; ++i)
+ result = isNull(m[i], epsilon);
+ return result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon)
+ {
+ bool result = true;
+ for(length_t i = 0; result && i < m.length() ; ++i)
+ result = isNull(m[i], epsilon);
+ return result;
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isIdentity(mat<C, R, T, Q> const& m, T const& epsilon)
+ {
+ bool result = true;
+ for(length_t i = 0; result && i < m.length(); ++i)
+ {
+ for(length_t j = 0; result && j < glm::min(i, m[0].length()); ++j)
+ result = abs(m[i][j]) <= epsilon;
+ if(result && i < m[0].length())
+ result = abs(m[i][i] - 1) <= epsilon;
+ for(length_t j = i + 1; result && j < m[0].length(); ++j)
+ result = abs(m[i][j]) <= epsilon;
+ }
+ return result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon)
+ {
+ bool result(true);
+ for(length_t i = 0; result && i < m.length(); ++i)
+ result = isNormalized(m[i], epsilon);
+ for(length_t i = 0; result && i < m.length(); ++i)
+ {
+ typename mat<2, 2, T, Q>::col_type v;
+ for(length_t j = 0; j < m.length(); ++j)
+ v[j] = m[j][i];
+ result = isNormalized(v, epsilon);
+ }
+ return result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon)
+ {
+ bool result(true);
+ for(length_t i = 0; result && i < m.length(); ++i)
+ result = isNormalized(m[i], epsilon);
+ for(length_t i = 0; result && i < m.length(); ++i)
+ {
+ typename mat<3, 3, T, Q>::col_type v;
+ for(length_t j = 0; j < m.length(); ++j)
+ v[j] = m[j][i];
+ result = isNormalized(v, epsilon);
+ }
+ return result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon)
+ {
+ bool result(true);
+ for(length_t i = 0; result && i < m.length(); ++i)
+ result = isNormalized(m[i], epsilon);
+ for(length_t i = 0; result && i < m.length(); ++i)
+ {
+ typename mat<4, 4, T, Q>::col_type v;
+ for(length_t j = 0; j < m.length(); ++j)
+ v[j] = m[j][i];
+ result = isNormalized(v, epsilon);
+ }
+ return result;
+ }
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isOrthogonal(mat<C, R, T, Q> const& m, T const& epsilon)
+ {
+ bool result = true;
+ for(length_t i(0); result && i < m.length() - 1; ++i)
+ for(length_t j(i + 1); result && j < m.length(); ++j)
+ result = areOrthogonal(m[i], m[j], epsilon);
+
+ if(result)
+ {
+ mat<C, R, T, Q> tmp = transpose(m);
+ for(length_t i(0); result && i < m.length() - 1 ; ++i)
+ for(length_t j(i + 1); result && j < m.length(); ++j)
+ result = areOrthogonal(tmp[i], tmp[j], epsilon);
+ }
+ return result;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/matrix_transform_2d.hpp b/3rdparty/glm/source/glm/gtx/matrix_transform_2d.hpp
new file mode 100644
index 0000000..5f9c540
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_transform_2d.hpp
@@ -0,0 +1,81 @@
+/// @ref gtx_matrix_transform_2d
+/// @file glm/gtx/matrix_transform_2d.hpp
+/// @author Miguel Ángel Pérez Martínez
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_matrix_transform_2d GLM_GTX_matrix_transform_2d
+/// @ingroup gtx
+///
+/// Include <glm/gtx/matrix_transform_2d.hpp> to use the features of this extension.
+///
+/// Defines functions that generate common 2d transformation matrices.
+
+#pragma once
+
+// Dependency:
+#include "../mat3x3.hpp"
+#include "../vec2.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_matrix_transform_2d is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_matrix_transform_2d extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_matrix_transform_2d
+ /// @{
+
+ /// Builds a translation 3 * 3 matrix created from a vector of 2 components.
+ ///
+ /// @param m Input matrix multiplied by this translation matrix.
+ /// @param v Coordinates of a translation vector.
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate(
+ mat<3, 3, T, Q> const& m,
+ vec<2, T, Q> const& v);
+
+ /// Builds a rotation 3 * 3 matrix created from an angle.
+ ///
+ /// @param m Input matrix multiplied by this translation matrix.
+ /// @param angle Rotation angle expressed in radians.
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate(
+ mat<3, 3, T, Q> const& m,
+ T angle);
+
+ /// Builds a scale 3 * 3 matrix created from a vector of 2 components.
+ ///
+ /// @param m Input matrix multiplied by this translation matrix.
+ /// @param v Coordinates of a scale vector.
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale(
+ mat<3, 3, T, Q> const& m,
+ vec<2, T, Q> const& v);
+
+ /// Builds an horizontal (parallel to the x axis) shear 3 * 3 matrix.
+ ///
+ /// @param m Input matrix multiplied by this translation matrix.
+ /// @param y Shear factor.
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX(
+ mat<3, 3, T, Q> const& m,
+ T y);
+
+ /// Builds a vertical (parallel to the y axis) shear 3 * 3 matrix.
+ ///
+ /// @param m Input matrix multiplied by this translation matrix.
+ /// @param x Shear factor.
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY(
+ mat<3, 3, T, Q> const& m,
+ T x);
+
+ /// @}
+}//namespace glm
+
+#include "matrix_transform_2d.inl"
diff --git a/3rdparty/glm/source/glm/gtx/matrix_transform_2d.inl b/3rdparty/glm/source/glm/gtx/matrix_transform_2d.inl
new file mode 100644
index 0000000..a68d24d
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/matrix_transform_2d.inl
@@ -0,0 +1,68 @@
+/// @ref gtx_matrix_transform_2d
+/// @author Miguel Ángel Pérez Martínez
+
+#include "../trigonometric.hpp"
+
+namespace glm
+{
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate(
+ mat<3, 3, T, Q> const& m,
+ vec<2, T, Q> const& v)
+ {
+ mat<3, 3, T, Q> Result(m);
+ Result[2] = m[0] * v[0] + m[1] * v[1] + m[2];
+ return Result;
+ }
+
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate(
+ mat<3, 3, T, Q> const& m,
+ T angle)
+ {
+ T const a = angle;
+ T const c = cos(a);
+ T const s = sin(a);
+
+ mat<3, 3, T, Q> Result;
+ Result[0] = m[0] * c + m[1] * s;
+ Result[1] = m[0] * -s + m[1] * c;
+ Result[2] = m[2];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale(
+ mat<3, 3, T, Q> const& m,
+ vec<2, T, Q> const& v)
+ {
+ mat<3, 3, T, Q> Result;
+ Result[0] = m[0] * v[0];
+ Result[1] = m[1] * v[1];
+ Result[2] = m[2];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX(
+ mat<3, 3, T, Q> const& m,
+ T y)
+ {
+ mat<3, 3, T, Q> Result(1);
+ Result[0][1] = y;
+ return m * Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY(
+ mat<3, 3, T, Q> const& m,
+ T x)
+ {
+ mat<3, 3, T, Q> Result(1);
+ Result[1][0] = x;
+ return m * Result;
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/mixed_product.hpp b/3rdparty/glm/source/glm/gtx/mixed_product.hpp
new file mode 100644
index 0000000..b242e35
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/mixed_product.hpp
@@ -0,0 +1,41 @@
+/// @ref gtx_mixed_product
+/// @file glm/gtx/mixed_product.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_mixed_product GLM_GTX_mixed_producte
+/// @ingroup gtx
+///
+/// Include <glm/gtx/mixed_product.hpp> to use the features of this extension.
+///
+/// Mixed product of 3 vectors.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_mixed_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_mixed_product extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_mixed_product
+ /// @{
+
+ /// @brief Mixed product of 3 vectors (from GLM_GTX_mixed_product extension)
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T mixedProduct(
+ vec<3, T, Q> const& v1,
+ vec<3, T, Q> const& v2,
+ vec<3, T, Q> const& v3);
+
+ /// @}
+}// namespace glm
+
+#include "mixed_product.inl"
diff --git a/3rdparty/glm/source/glm/gtx/mixed_product.inl b/3rdparty/glm/source/glm/gtx/mixed_product.inl
new file mode 100644
index 0000000..e5cdbdb
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/mixed_product.inl
@@ -0,0 +1,15 @@
+/// @ref gtx_mixed_product
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T mixedProduct
+ (
+ vec<3, T, Q> const& v1,
+ vec<3, T, Q> const& v2,
+ vec<3, T, Q> const& v3
+ )
+ {
+ return dot(cross(v1, v2), v3);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/norm.hpp b/3rdparty/glm/source/glm/gtx/norm.hpp
new file mode 100644
index 0000000..dfaebb7
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/norm.hpp
@@ -0,0 +1,88 @@
+/// @ref gtx_norm
+/// @file glm/gtx/norm.hpp
+///
+/// @see core (dependence)
+/// @see gtx_quaternion (dependence)
+/// @see gtx_component_wise (dependence)
+///
+/// @defgroup gtx_norm GLM_GTX_norm
+/// @ingroup gtx
+///
+/// Include <glm/gtx/norm.hpp> to use the features of this extension.
+///
+/// Various ways to compute vector norms.
+
+#pragma once
+
+// Dependency:
+#include "../geometric.hpp"
+#include "../gtx/quaternion.hpp"
+#include "../gtx/component_wise.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_norm is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_norm extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_norm
+ /// @{
+
+ /// Returns the squared length of x.
+ /// From GLM_GTX_norm extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T length2(vec<L, T, Q> const& x);
+
+ /// Returns the squared distance between p0 and p1, i.e., length2(p0 - p1).
+ /// From GLM_GTX_norm extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T distance2(vec<L, T, Q> const& p0, vec<L, T, Q> const& p1);
+
+ //! Returns the L1 norm between x and y.
+ //! From GLM_GTX_norm extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y);
+
+ //! Returns the L1 norm of v.
+ //! From GLM_GTX_norm extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& v);
+
+ //! Returns the L2 norm between x and y.
+ //! From GLM_GTX_norm extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y);
+
+ //! Returns the L2 norm of v.
+ //! From GLM_GTX_norm extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x);
+
+ //! Returns the L norm between x and y.
+ //! From GLM_GTX_norm extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth);
+
+ //! Returns the L norm of v.
+ //! From GLM_GTX_norm extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, unsigned int Depth);
+
+ //! Returns the LMax norm between x and y.
+ //! From GLM_GTX_norm extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y);
+
+ //! Returns the LMax norm of v.
+ //! From GLM_GTX_norm extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x);
+
+ /// @}
+}//namespace glm
+
+#include "norm.inl"
diff --git a/3rdparty/glm/source/glm/gtx/norm.inl b/3rdparty/glm/source/glm/gtx/norm.inl
new file mode 100644
index 0000000..6db561b
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/norm.inl
@@ -0,0 +1,95 @@
+/// @ref gtx_norm
+
+#include "../detail/qualifier.hpp"
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q, bool Aligned>
+ struct compute_length2
+ {
+ GLM_FUNC_QUALIFIER static T call(vec<L, T, Q> const& v)
+ {
+ return dot(v, v);
+ }
+ };
+}//namespace detail
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType length2(genType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'length2' accepts only floating-point inputs");
+ return x * x;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T length2(vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'length2' accepts only floating-point inputs");
+ return detail::compute_length2<L, T, Q, detail::is_aligned<Q>::value>::call(v);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T distance2(T p0, T p1)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'distance2' accepts only floating-point inputs");
+ return length2(p1 - p0);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T distance2(vec<L, T, Q> const& p0, vec<L, T, Q> const& p1)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'distance2' accepts only floating-point inputs");
+ return length2(p1 - p0);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b)
+ {
+ return abs(b.x - a.x) + abs(b.y - a.y) + abs(b.z - a.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& v)
+ {
+ return abs(v.x) + abs(v.y) + abs(v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b
+ )
+ {
+ return length(b - a);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& v)
+ {
+ return length(v);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth)
+ {
+ return pow(pow(abs(y.x - x.x), T(Depth)) + pow(abs(y.y - x.y), T(Depth)) + pow(abs(y.z - x.z), T(Depth)), T(1) / T(Depth));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& v, unsigned int Depth)
+ {
+ return pow(pow(abs(v.x), T(Depth)) + pow(abs(v.y), T(Depth)) + pow(abs(v.z), T(Depth)), T(1) / T(Depth));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& a, vec<3, T, Q> const& b)
+ {
+ return compMax(abs(b - a));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& v)
+ {
+ return compMax(abs(v));
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/normal.hpp b/3rdparty/glm/source/glm/gtx/normal.hpp
new file mode 100644
index 0000000..068682f
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/normal.hpp
@@ -0,0 +1,41 @@
+/// @ref gtx_normal
+/// @file glm/gtx/normal.hpp
+///
+/// @see core (dependence)
+/// @see gtx_extented_min_max (dependence)
+///
+/// @defgroup gtx_normal GLM_GTX_normal
+/// @ingroup gtx
+///
+/// Include <glm/gtx/normal.hpp> to use the features of this extension.
+///
+/// Compute the normal of a triangle.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_normal is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_normal extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_normal
+ /// @{
+
+ /// Computes triangle normal from triangle points.
+ ///
+ /// @see gtx_normal
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> triangleNormal(vec<3, T, Q> const& p1, vec<3, T, Q> const& p2, vec<3, T, Q> const& p3);
+
+ /// @}
+}//namespace glm
+
+#include "normal.inl"
diff --git a/3rdparty/glm/source/glm/gtx/normal.inl b/3rdparty/glm/source/glm/gtx/normal.inl
new file mode 100644
index 0000000..74f9fc9
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/normal.inl
@@ -0,0 +1,15 @@
+/// @ref gtx_normal
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> triangleNormal
+ (
+ vec<3, T, Q> const& p1,
+ vec<3, T, Q> const& p2,
+ vec<3, T, Q> const& p3
+ )
+ {
+ return normalize(cross(p1 - p2, p1 - p3));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/normalize_dot.hpp b/3rdparty/glm/source/glm/gtx/normalize_dot.hpp
new file mode 100644
index 0000000..5195802
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/normalize_dot.hpp
@@ -0,0 +1,49 @@
+/// @ref gtx_normalize_dot
+/// @file glm/gtx/normalize_dot.hpp
+///
+/// @see core (dependence)
+/// @see gtx_fast_square_root (dependence)
+///
+/// @defgroup gtx_normalize_dot GLM_GTX_normalize_dot
+/// @ingroup gtx
+///
+/// Include <glm/gtx/normalized_dot.hpp> to use the features of this extension.
+///
+/// Dot product of vectors that need to be normalize with a single square root.
+
+#pragma once
+
+// Dependency:
+#include "../gtx/fast_square_root.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_normalize_dot is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_normalize_dot extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_normalize_dot
+ /// @{
+
+ /// Normalize parameters and returns the dot product of x and y.
+ /// It's faster that dot(normalize(x), normalize(y)).
+ ///
+ /// @see gtx_normalize_dot extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T normalizeDot(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Normalize parameters and returns the dot product of x and y.
+ /// Faster that dot(fastNormalize(x), fastNormalize(y)).
+ ///
+ /// @see gtx_normalize_dot extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T fastNormalizeDot(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// @}
+}//namespace glm
+
+#include "normalize_dot.inl"
diff --git a/3rdparty/glm/source/glm/gtx/normalize_dot.inl b/3rdparty/glm/source/glm/gtx/normalize_dot.inl
new file mode 100644
index 0000000..7bcd9a5
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/normalize_dot.inl
@@ -0,0 +1,16 @@
+/// @ref gtx_normalize_dot
+
+namespace glm
+{
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T normalizeDot(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ return glm::dot(x, y) * glm::inversesqrt(glm::dot(x, x) * glm::dot(y, y));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T fastNormalizeDot(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ return glm::dot(x, y) * glm::fastInverseSqrt(glm::dot(x, x) * glm::dot(y, y));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/number_precision.hpp b/3rdparty/glm/source/glm/gtx/number_precision.hpp
new file mode 100644
index 0000000..3a606bd
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/number_precision.hpp
@@ -0,0 +1,61 @@
+/// @ref gtx_number_precision
+/// @file glm/gtx/number_precision.hpp
+///
+/// @see core (dependence)
+/// @see gtc_type_precision (dependence)
+/// @see gtc_quaternion (dependence)
+///
+/// @defgroup gtx_number_precision GLM_GTX_number_precision
+/// @ingroup gtx
+///
+/// Include <glm/gtx/number_precision.hpp> to use the features of this extension.
+///
+/// Defined size types.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/type_precision.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_number_precision is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_number_precision extension included")
+# endif
+#endif
+
+namespace glm{
+namespace gtx
+{
+ /////////////////////////////
+ // Unsigned int vector types
+
+ /// @addtogroup gtx_number_precision
+ /// @{
+
+ typedef u8 u8vec1; //!< \brief 8bit unsigned integer scalar. (from GLM_GTX_number_precision extension)
+ typedef u16 u16vec1; //!< \brief 16bit unsigned integer scalar. (from GLM_GTX_number_precision extension)
+ typedef u32 u32vec1; //!< \brief 32bit unsigned integer scalar. (from GLM_GTX_number_precision extension)
+ typedef u64 u64vec1; //!< \brief 64bit unsigned integer scalar. (from GLM_GTX_number_precision extension)
+
+ //////////////////////
+ // Float vector types
+
+ typedef f32 f32vec1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension)
+ typedef f64 f64vec1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension)
+
+ //////////////////////
+ // Float matrix types
+
+ typedef f32 f32mat1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension)
+ typedef f32 f32mat1x1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension)
+ typedef f64 f64mat1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension)
+ typedef f64 f64mat1x1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension)
+
+ /// @}
+}//namespace gtx
+}//namespace glm
+
+#include "number_precision.inl"
diff --git a/3rdparty/glm/source/glm/gtx/number_precision.inl b/3rdparty/glm/source/glm/gtx/number_precision.inl
new file mode 100644
index 0000000..b39d71c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/number_precision.inl
@@ -0,0 +1,6 @@
+/// @ref gtx_number_precision
+
+namespace glm
+{
+
+}
diff --git a/3rdparty/glm/source/glm/gtx/optimum_pow.hpp b/3rdparty/glm/source/glm/gtx/optimum_pow.hpp
new file mode 100644
index 0000000..9284a47
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/optimum_pow.hpp
@@ -0,0 +1,54 @@
+/// @ref gtx_optimum_pow
+/// @file glm/gtx/optimum_pow.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_optimum_pow GLM_GTX_optimum_pow
+/// @ingroup gtx
+///
+/// Include <glm/gtx/optimum_pow.hpp> to use the features of this extension.
+///
+/// Integer exponentiation of power functions.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_optimum_pow is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_optimum_pow extension included")
+# endif
+#endif
+
+namespace glm{
+namespace gtx
+{
+ /// @addtogroup gtx_optimum_pow
+ /// @{
+
+ /// Returns x raised to the power of 2.
+ ///
+ /// @see gtx_optimum_pow
+ template<typename genType>
+ GLM_FUNC_DECL genType pow2(genType const& x);
+
+ /// Returns x raised to the power of 3.
+ ///
+ /// @see gtx_optimum_pow
+ template<typename genType>
+ GLM_FUNC_DECL genType pow3(genType const& x);
+
+ /// Returns x raised to the power of 4.
+ ///
+ /// @see gtx_optimum_pow
+ template<typename genType>
+ GLM_FUNC_DECL genType pow4(genType const& x);
+
+ /// @}
+}//namespace gtx
+}//namespace glm
+
+#include "optimum_pow.inl"
diff --git a/3rdparty/glm/source/glm/gtx/optimum_pow.inl b/3rdparty/glm/source/glm/gtx/optimum_pow.inl
new file mode 100644
index 0000000..a26c19c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/optimum_pow.inl
@@ -0,0 +1,22 @@
+/// @ref gtx_optimum_pow
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType pow2(genType const& x)
+ {
+ return x * x;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType pow3(genType const& x)
+ {
+ return x * x * x;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType pow4(genType const& x)
+ {
+ return (x * x) * (x * x);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/orthonormalize.hpp b/3rdparty/glm/source/glm/gtx/orthonormalize.hpp
new file mode 100644
index 0000000..3e004fb
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/orthonormalize.hpp
@@ -0,0 +1,49 @@
+/// @ref gtx_orthonormalize
+/// @file glm/gtx/orthonormalize.hpp
+///
+/// @see core (dependence)
+/// @see gtx_extented_min_max (dependence)
+///
+/// @defgroup gtx_orthonormalize GLM_GTX_orthonormalize
+/// @ingroup gtx
+///
+/// Include <glm/gtx/orthonormalize.hpp> to use the features of this extension.
+///
+/// Orthonormalize matrices.
+
+#pragma once
+
+// Dependency:
+#include "../vec3.hpp"
+#include "../mat3x3.hpp"
+#include "../geometric.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_orthonormalize is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_orthonormalize extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_orthonormalize
+ /// @{
+
+ /// Returns the orthonormalized matrix of m.
+ ///
+ /// @see gtx_orthonormalize
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m);
+
+ /// Orthonormalizes x according y.
+ ///
+ /// @see gtx_orthonormalize
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y);
+
+ /// @}
+}//namespace glm
+
+#include "orthonormalize.inl"
diff --git a/3rdparty/glm/source/glm/gtx/orthonormalize.inl b/3rdparty/glm/source/glm/gtx/orthonormalize.inl
new file mode 100644
index 0000000..cb553ba
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/orthonormalize.inl
@@ -0,0 +1,29 @@
+/// @ref gtx_orthonormalize
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m)
+ {
+ mat<3, 3, T, Q> r = m;
+
+ r[0] = normalize(r[0]);
+
+ T d0 = dot(r[0], r[1]);
+ r[1] -= r[0] * d0;
+ r[1] = normalize(r[1]);
+
+ T d1 = dot(r[1], r[2]);
+ d0 = dot(r[0], r[2]);
+ r[2] -= r[0] * d0 + r[1] * d1;
+ r[2] = normalize(r[2]);
+
+ return r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y)
+ {
+ return normalize(x - y * dot(y, x));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/pca.hpp b/3rdparty/glm/source/glm/gtx/pca.hpp
new file mode 100644
index 0000000..93da745
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/pca.hpp
@@ -0,0 +1,111 @@
+/// @ref gtx_pca
+/// @file glm/gtx/pca.hpp
+///
+/// @see core (dependence)
+/// @see ext_scalar_relational (dependence)
+///
+/// @defgroup gtx_pca GLM_GTX_pca
+/// @ingroup gtx
+///
+/// Include <glm/gtx/pca.hpp> to use the features of this extension.
+///
+/// Implements functions required for fundamental 'princple component analysis' in 2D, 3D, and 4D:
+/// 1) Computing a covariance matrics from a list of _relative_ position vectors
+/// 2) Compute the eigenvalues and eigenvectors of the covariance matrics
+/// This is useful, e.g., to compute an object-aligned bounding box from vertices of an object.
+/// https://en.wikipedia.org/wiki/Principal_component_analysis
+///
+/// Example:
+/// ```
+/// std::vector<glm::dvec3> ptData;
+/// // ... fill ptData with some point data, e.g. vertices
+///
+/// glm::dvec3 center = computeCenter(ptData);
+///
+/// glm::dmat3 covarMat = glm::computeCovarianceMatrix(ptData.data(), ptData.size(), center);
+///
+/// glm::dvec3 evals;
+/// glm::dmat3 evecs;
+/// int evcnt = glm::findEigenvaluesSymReal(covarMat, evals, evecs);
+///
+/// if(evcnt != 3)
+/// // ... error handling
+///
+/// glm::sortEigenvalues(evals, evecs);
+///
+/// // ... now evecs[0] points in the direction (symmetric) of the largest spatial distribuion within ptData
+/// ```
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../ext/scalar_relational.hpp"
+
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_pca is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_pca extension included")
+# endif
+#endif
+
+namespace glm {
+ /// @addtogroup gtx_pca
+ /// @{
+
+ /// Compute a covariance matrix form an array of relative coordinates `v` (e.g., relative to the center of gravity of the object)
+ /// @param v Points to a memory holding `n` times vectors
+ template<length_t D, typename T, qualifier Q>
+ GLM_INLINE mat<D, D, T, Q> computeCovarianceMatrix(vec<D, T, Q> const* v, size_t n);
+
+ /// Compute a covariance matrix form an array of absolute coordinates `v` and a precomputed center of gravity `c`
+ /// @param v Points to a memory holding `n` times vectors
+ template<length_t D, typename T, qualifier Q>
+ GLM_INLINE mat<D, D, T, Q> computeCovarianceMatrix(vec<D, T, Q> const* v, size_t n, vec<D, T, Q> const& c);
+
+ /// Compute a covariance matrix form a pair of iterators `b` (begin) and `e` (end) of a container with relative coordinates (e.g., relative to the center of gravity of the object)
+ /// Dereferencing an iterator of type I must yield a `vec&lt;D, T, Q%gt;`
+ template<length_t D, typename T, qualifier Q, typename I>
+ GLM_FUNC_DECL mat<D, D, T, Q> computeCovarianceMatrix(I const& b, I const& e);
+
+ /// Compute a covariance matrix form a pair of iterators `b` (begin) and `e` (end) of a container with absolute coordinates and a precomputed center of gravity `c`
+ /// Dereferencing an iterator of type I must yield a `vec&lt;D, T, Q%gt;`
+ template<length_t D, typename T, qualifier Q, typename I>
+ GLM_FUNC_DECL mat<D, D, T, Q> computeCovarianceMatrix(I const& b, I const& e, vec<D, T, Q> const& c);
+
+ /// Assuming the provided covariance matrix `covarMat` is symmetric and real-valued, this function find the `D` Eigenvalues of the matrix, and also provides the corresponding Eigenvectors.
+ /// Note: the data in `outEigenvalues` and `outEigenvectors` are in matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`.
+ /// This is a numeric implementation to find the Eigenvalues, using 'QL decomposition` (variant of QR decomposition: https://en.wikipedia.org/wiki/QR_decomposition).
+ /// @param covarMat A symmetric, real-valued covariance matrix, e.g. computed from `computeCovarianceMatrix`.
+ /// @param outEigenvalues Vector to receive the found eigenvalues
+ /// @param outEigenvectors Matrix to receive the found eigenvectors corresponding to the found eigenvalues, as column vectors
+ /// @return The number of eigenvalues found, usually D if the precondition of the covariance matrix is met.
+ template<length_t D, typename T, qualifier Q>
+ GLM_FUNC_DECL unsigned int findEigenvaluesSymReal
+ (
+ mat<D, D, T, Q> const& covarMat,
+ vec<D, T, Q>& outEigenvalues,
+ mat<D, D, T, Q>& outEigenvectors
+ );
+
+ /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue.
+ /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`.
+ template<typename T, qualifier Q>
+ GLM_INLINE void sortEigenvalues(vec<2, T, Q>& eigenvalues, mat<2, 2, T, Q>& eigenvectors);
+
+ /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue.
+ /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`.
+ template<typename T, qualifier Q>
+ GLM_INLINE void sortEigenvalues(vec<3, T, Q>& eigenvalues, mat<3, 3, T, Q>& eigenvectors);
+
+ /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue.
+ /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`.
+ template<typename T, qualifier Q>
+ GLM_INLINE void sortEigenvalues(vec<4, T, Q>& eigenvalues, mat<4, 4, T, Q>& eigenvectors);
+
+ /// @}
+}//namespace glm
+
+#include "pca.inl"
diff --git a/3rdparty/glm/source/glm/gtx/pca.inl b/3rdparty/glm/source/glm/gtx/pca.inl
new file mode 100644
index 0000000..d5a24b7
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/pca.inl
@@ -0,0 +1,343 @@
+/// @ref gtx_pca
+
+#ifndef GLM_HAS_CXX11_STL
+#include <algorithm>
+#else
+#include <utility>
+#endif
+
+namespace glm {
+
+
+ template<length_t D, typename T, qualifier Q>
+ GLM_INLINE mat<D, D, T, Q> computeCovarianceMatrix(vec<D, T, Q> const* v, size_t n)
+ {
+ return computeCovarianceMatrix<D, T, Q, vec<D, T, Q> const*>(v, v + n);
+ }
+
+
+ template<length_t D, typename T, qualifier Q>
+ GLM_INLINE mat<D, D, T, Q> computeCovarianceMatrix(vec<D, T, Q> const* v, size_t n, vec<D, T, Q> const& c)
+ {
+ return computeCovarianceMatrix<D, T, Q, vec<D, T, Q> const*>(v, v + n, c);
+ }
+
+
+ template<length_t D, typename T, qualifier Q, typename I>
+ GLM_FUNC_DECL mat<D, D, T, Q> computeCovarianceMatrix(I const& b, I const& e)
+ {
+ glm::mat<D, D, T, Q> m(0);
+
+ size_t cnt = 0;
+ for(I i = b; i != e; i++)
+ {
+ vec<D, T, Q> const& v = *i;
+ for(length_t x = 0; x < D; ++x)
+ for(length_t y = 0; y < D; ++y)
+ m[x][y] += static_cast<T>(v[x] * v[y]);
+ cnt++;
+ }
+ if(cnt > 0)
+ m /= static_cast<T>(cnt);
+
+ return m;
+ }
+
+
+ template<length_t D, typename T, qualifier Q, typename I>
+ GLM_FUNC_DECL mat<D, D, T, Q> computeCovarianceMatrix(I const& b, I const& e, vec<D, T, Q> const& c)
+ {
+ glm::mat<D, D, T, Q> m(0);
+ glm::vec<D, T, Q> v;
+
+ size_t cnt = 0;
+ for(I i = b; i != e; i++)
+ {
+ v = *i - c;
+ for(length_t x = 0; x < D; ++x)
+ for(length_t y = 0; y < D; ++y)
+ m[x][y] += static_cast<T>(v[x] * v[y]);
+ cnt++;
+ }
+ if(cnt > 0)
+ m /= static_cast<T>(cnt);
+
+ return m;
+ }
+
+ namespace _internal_
+ {
+
+ template<typename T>
+ GLM_INLINE T transferSign(T const& v, T const& s)
+ {
+ return ((s) >= 0 ? glm::abs(v) : -glm::abs(v));
+ }
+
+ template<typename T>
+ GLM_INLINE T pythag(T const& a, T const& b) {
+ static const T epsilon = static_cast<T>(0.0000001);
+ T absa = glm::abs(a);
+ T absb = glm::abs(b);
+ if(absa > absb) {
+ absb /= absa;
+ absb *= absb;
+ return absa * glm::sqrt(static_cast<T>(1) + absb);
+ }
+ if(glm::equal<T>(absb, 0, epsilon)) return static_cast<T>(0);
+ absa /= absb;
+ absa *= absa;
+ return absb * glm::sqrt(static_cast<T>(1) + absa);
+ }
+
+ }
+
+ template<length_t D, typename T, qualifier Q>
+ GLM_FUNC_DECL unsigned int findEigenvaluesSymReal
+ (
+ mat<D, D, T, Q> const& covarMat,
+ vec<D, T, Q>& outEigenvalues,
+ mat<D, D, T, Q>& outEigenvectors
+ )
+ {
+ using _internal_::transferSign;
+ using _internal_::pythag;
+
+ T a[D * D]; // matrix -- input and workspace for algorithm (will be changed inplace)
+ T d[D]; // diagonal elements
+ T e[D]; // off-diagonal elements
+
+ for(length_t r = 0; r < D; r++)
+ for(length_t c = 0; c < D; c++)
+ a[(r) * D + (c)] = covarMat[c][r];
+
+ // 1. Householder reduction.
+ length_t l, k, j, i;
+ T scale, hh, h, g, f;
+ static const T epsilon = static_cast<T>(0.0000001);
+
+ for(i = D; i >= 2; i--)
+ {
+ l = i - 1;
+ h = scale = 0;
+ if(l > 1)
+ {
+ for(k = 1; k <= l; k++)
+ {
+ scale += glm::abs(a[(i - 1) * D + (k - 1)]);
+ }
+ if(glm::equal<T>(scale, 0, epsilon))
+ {
+ e[i - 1] = a[(i - 1) * D + (l - 1)];
+ }
+ else
+ {
+ for(k = 1; k <= l; k++)
+ {
+ a[(i - 1) * D + (k - 1)] /= scale;
+ h += a[(i - 1) * D + (k - 1)] * a[(i - 1) * D + (k - 1)];
+ }
+ f = a[(i - 1) * D + (l - 1)];
+ g = ((f >= 0) ? -glm::sqrt(h) : glm::sqrt(h));
+ e[i - 1] = scale * g;
+ h -= f * g;
+ a[(i - 1) * D + (l - 1)] = f - g;
+ f = 0;
+ for(j = 1; j <= l; j++)
+ {
+ a[(j - 1) * D + (i - 1)] = a[(i - 1) * D + (j - 1)] / h;
+ g = 0;
+ for(k = 1; k <= j; k++)
+ {
+ g += a[(j - 1) * D + (k - 1)] * a[(i - 1) * D + (k - 1)];
+ }
+ for(k = j + 1; k <= l; k++)
+ {
+ g += a[(k - 1) * D + (j - 1)] * a[(i - 1) * D + (k - 1)];
+ }
+ e[j - 1] = g / h;
+ f += e[j - 1] * a[(i - 1) * D + (j - 1)];
+ }
+ hh = f / (h + h);
+ for(j = 1; j <= l; j++)
+ {
+ f = a[(i - 1) * D + (j - 1)];
+ e[j - 1] = g = e[j - 1] - hh * f;
+ for(k = 1; k <= j; k++)
+ {
+ a[(j - 1) * D + (k - 1)] -= (f * e[k - 1] + g * a[(i - 1) * D + (k - 1)]);
+ }
+ }
+ }
+ }
+ else
+ {
+ e[i - 1] = a[(i - 1) * D + (l - 1)];
+ }
+ d[i - 1] = h;
+ }
+ d[0] = 0;
+ e[0] = 0;
+ for(i = 1; i <= D; i++)
+ {
+ l = i - 1;
+ if(!glm::equal<T>(d[i - 1], 0, epsilon))
+ {
+ for(j = 1; j <= l; j++)
+ {
+ g = 0;
+ for(k = 1; k <= l; k++)
+ {
+ g += a[(i - 1) * D + (k - 1)] * a[(k - 1) * D + (j - 1)];
+ }
+ for(k = 1; k <= l; k++)
+ {
+ a[(k - 1) * D + (j - 1)] -= g * a[(k - 1) * D + (i - 1)];
+ }
+ }
+ }
+ d[i - 1] = a[(i - 1) * D + (i - 1)];
+ a[(i - 1) * D + (i - 1)] = 1;
+ for(j = 1; j <= l; j++)
+ {
+ a[(j - 1) * D + (i - 1)] = a[(i - 1) * D + (j - 1)] = 0;
+ }
+ }
+
+ // 2. Calculation of eigenvalues and eigenvectors (QL algorithm)
+ length_t m, iter;
+ T s, r, p, dd, c, b;
+ const length_t MAX_ITER = 30;
+
+ for(i = 2; i <= D; i++)
+ {
+ e[i - 2] = e[i - 1];
+ }
+ e[D - 1] = 0;
+
+ for(l = 1; l <= D; l++)
+ {
+ iter = 0;
+ do
+ {
+ for(m = l; m <= D - 1; m++)
+ {
+ dd = glm::abs(d[m - 1]) + glm::abs(d[m - 1 + 1]);
+ if(glm::equal<T>(glm::abs(e[m - 1]) + dd, dd, epsilon))
+ break;
+ }
+ if(m != l)
+ {
+ if(iter++ == MAX_ITER)
+ {
+ return 0; // Too many iterations in FindEigenvalues
+ }
+ g = (d[l - 1 + 1] - d[l - 1]) / (2 * e[l - 1]);
+ r = pythag<T>(g, 1);
+ g = d[m - 1] - d[l - 1] + e[l - 1] / (g + transferSign(r, g));
+ s = c = 1;
+ p = 0;
+ for(i = m - 1; i >= l; i--)
+ {
+ f = s * e[i - 1];
+ b = c * e[i - 1];
+ e[i - 1 + 1] = r = pythag(f, g);
+ if(glm::equal<T>(r, 0, epsilon))
+ {
+ d[i - 1 + 1] -= p;
+ e[m - 1] = 0;
+ break;
+ }
+ s = f / r;
+ c = g / r;
+ g = d[i - 1 + 1] - p;
+ r = (d[i - 1] - g) * s + 2 * c * b;
+ d[i - 1 + 1] = g + (p = s * r);
+ g = c * r - b;
+ for(k = 1; k <= D; k++)
+ {
+ f = a[(k - 1) * D + (i - 1 + 1)];
+ a[(k - 1) * D + (i - 1 + 1)] = s * a[(k - 1) * D + (i - 1)] + c * f;
+ a[(k - 1) * D + (i - 1)] = c * a[(k - 1) * D + (i - 1)] - s * f;
+ }
+ }
+ if(glm::equal<T>(r, 0, epsilon) && (i >= l))
+ continue;
+ d[l - 1] -= p;
+ e[l - 1] = g;
+ e[m - 1] = 0;
+ }
+ } while(m != l);
+ }
+
+ // 3. output
+ for(i = 0; i < D; i++)
+ outEigenvalues[i] = d[i];
+ for(i = 0; i < D; i++)
+ for(j = 0; j < D; j++)
+ outEigenvectors[i][j] = a[(j) * D + (i)];
+
+ return D;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE void sortEigenvalues(vec<2, T, Q>& eigenvalues, mat<2, 2, T, Q>& eigenvectors)
+ {
+ if (eigenvalues[0] < eigenvalues[1])
+ {
+ std::swap(eigenvalues[0], eigenvalues[1]);
+ std::swap(eigenvectors[0], eigenvectors[1]);
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE void sortEigenvalues(vec<3, T, Q>& eigenvalues, mat<3, 3, T, Q>& eigenvectors)
+ {
+ if (eigenvalues[0] < eigenvalues[1])
+ {
+ std::swap(eigenvalues[0], eigenvalues[1]);
+ std::swap(eigenvectors[0], eigenvectors[1]);
+ }
+ if (eigenvalues[0] < eigenvalues[2])
+ {
+ std::swap(eigenvalues[0], eigenvalues[2]);
+ std::swap(eigenvectors[0], eigenvectors[2]);
+ }
+ if (eigenvalues[1] < eigenvalues[2])
+ {
+ std::swap(eigenvalues[1], eigenvalues[2]);
+ std::swap(eigenvectors[1], eigenvectors[2]);
+ }
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE void sortEigenvalues(vec<4, T, Q>& eigenvalues, mat<4, 4, T, Q>& eigenvectors)
+ {
+ if (eigenvalues[0] < eigenvalues[2])
+ {
+ std::swap(eigenvalues[0], eigenvalues[2]);
+ std::swap(eigenvectors[0], eigenvectors[2]);
+ }
+ if (eigenvalues[1] < eigenvalues[3])
+ {
+ std::swap(eigenvalues[1], eigenvalues[3]);
+ std::swap(eigenvectors[1], eigenvectors[3]);
+ }
+ if (eigenvalues[0] < eigenvalues[1])
+ {
+ std::swap(eigenvalues[0], eigenvalues[1]);
+ std::swap(eigenvectors[0], eigenvectors[1]);
+ }
+ if (eigenvalues[2] < eigenvalues[3])
+ {
+ std::swap(eigenvalues[2], eigenvalues[3]);
+ std::swap(eigenvectors[2], eigenvectors[3]);
+ }
+ if (eigenvalues[1] < eigenvalues[2])
+ {
+ std::swap(eigenvalues[1], eigenvalues[2]);
+ std::swap(eigenvectors[1], eigenvectors[2]);
+ }
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/perpendicular.hpp b/3rdparty/glm/source/glm/gtx/perpendicular.hpp
new file mode 100644
index 0000000..72b77b6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/perpendicular.hpp
@@ -0,0 +1,41 @@
+/// @ref gtx_perpendicular
+/// @file glm/gtx/perpendicular.hpp
+///
+/// @see core (dependence)
+/// @see gtx_projection (dependence)
+///
+/// @defgroup gtx_perpendicular GLM_GTX_perpendicular
+/// @ingroup gtx
+///
+/// Include <glm/gtx/perpendicular.hpp> to use the features of this extension.
+///
+/// Perpendicular of a vector from other one
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtx/projection.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_perpendicular is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_perpendicular extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_perpendicular
+ /// @{
+
+ //! Projects x a perpendicular axis of Normal.
+ //! From GLM_GTX_perpendicular extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType perp(genType const& x, genType const& Normal);
+
+ /// @}
+}//namespace glm
+
+#include "perpendicular.inl"
diff --git a/3rdparty/glm/source/glm/gtx/perpendicular.inl b/3rdparty/glm/source/glm/gtx/perpendicular.inl
new file mode 100644
index 0000000..1e72f33
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/perpendicular.inl
@@ -0,0 +1,10 @@
+/// @ref gtx_perpendicular
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType perp(genType const& x, genType const& Normal)
+ {
+ return x - proj(x, Normal);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/polar_coordinates.hpp b/3rdparty/glm/source/glm/gtx/polar_coordinates.hpp
new file mode 100644
index 0000000..76beb82
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/polar_coordinates.hpp
@@ -0,0 +1,48 @@
+/// @ref gtx_polar_coordinates
+/// @file glm/gtx/polar_coordinates.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_polar_coordinates GLM_GTX_polar_coordinates
+/// @ingroup gtx
+///
+/// Include <glm/gtx/polar_coordinates.hpp> to use the features of this extension.
+///
+/// Conversion from Euclidean space to polar space and revert.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_polar_coordinates is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_polar_coordinates extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_polar_coordinates
+ /// @{
+
+ /// Convert Euclidean to Polar coordinates, x is the latitude, y the longitude and z the xz distance.
+ ///
+ /// @see gtx_polar_coordinates
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> polar(
+ vec<3, T, Q> const& euclidean);
+
+ /// Convert Polar to Euclidean coordinates.
+ ///
+ /// @see gtx_polar_coordinates
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> euclidean(
+ vec<2, T, Q> const& polar);
+
+ /// @}
+}//namespace glm
+
+#include "polar_coordinates.inl"
diff --git a/3rdparty/glm/source/glm/gtx/polar_coordinates.inl b/3rdparty/glm/source/glm/gtx/polar_coordinates.inl
new file mode 100644
index 0000000..371c8dd
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/polar_coordinates.inl
@@ -0,0 +1,36 @@
+/// @ref gtx_polar_coordinates
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> polar
+ (
+ vec<3, T, Q> const& euclidean
+ )
+ {
+ T const Length(length(euclidean));
+ vec<3, T, Q> const tmp(euclidean / Length);
+ T const xz_dist(sqrt(tmp.x * tmp.x + tmp.z * tmp.z));
+
+ return vec<3, T, Q>(
+ asin(tmp.y), // latitude
+ atan(tmp.x, tmp.z), // longitude
+ xz_dist); // xz distance
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> euclidean
+ (
+ vec<2, T, Q> const& polar
+ )
+ {
+ T const latitude(polar.x);
+ T const longitude(polar.y);
+
+ return vec<3, T, Q>(
+ cos(latitude) * sin(longitude),
+ sin(latitude),
+ cos(latitude) * cos(longitude));
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/projection.hpp b/3rdparty/glm/source/glm/gtx/projection.hpp
new file mode 100644
index 0000000..678f3ad
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/projection.hpp
@@ -0,0 +1,43 @@
+/// @ref gtx_projection
+/// @file glm/gtx/projection.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_projection GLM_GTX_projection
+/// @ingroup gtx
+///
+/// Include <glm/gtx/projection.hpp> to use the features of this extension.
+///
+/// Projection of a vector to other one
+
+#pragma once
+
+// Dependency:
+#include "../geometric.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_projection is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_projection extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_projection
+ /// @{
+
+ /// Projects x on Normal.
+ ///
+ /// @param[in] x A vector to project
+ /// @param[in] Normal A normal that doesn't need to be of unit length.
+ ///
+ /// @see gtx_projection
+ template<typename genType>
+ GLM_FUNC_DECL genType proj(genType const& x, genType const& Normal);
+
+ /// @}
+}//namespace glm
+
+#include "projection.inl"
diff --git a/3rdparty/glm/source/glm/gtx/projection.inl b/3rdparty/glm/source/glm/gtx/projection.inl
new file mode 100644
index 0000000..f23f884
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/projection.inl
@@ -0,0 +1,10 @@
+/// @ref gtx_projection
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType proj(genType const& x, genType const& Normal)
+ {
+ return glm::dot(x, Normal) / glm::dot(Normal, Normal) * Normal;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/quaternion.hpp b/3rdparty/glm/source/glm/gtx/quaternion.hpp
new file mode 100644
index 0000000..5c2b5ad
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/quaternion.hpp
@@ -0,0 +1,174 @@
+/// @ref gtx_quaternion
+/// @file glm/gtx/quaternion.hpp
+///
+/// @see core (dependence)
+/// @see gtx_extented_min_max (dependence)
+///
+/// @defgroup gtx_quaternion GLM_GTX_quaternion
+/// @ingroup gtx
+///
+/// Include <glm/gtx/quaternion.hpp> to use the features of this extension.
+///
+/// Extented quaternion types and functions
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/constants.hpp"
+#include "../gtc/quaternion.hpp"
+#include "../ext/quaternion_exponential.hpp"
+#include "../gtx/norm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_quaternion extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_quaternion
+ /// @{
+
+ /// Create an identity quaternion.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR qua<T, Q> quat_identity();
+
+ /// Compute a cross product between a quaternion and a vector.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> cross(
+ qua<T, Q> const& q,
+ vec<3, T, Q> const& v);
+
+ //! Compute a cross product between a vector and a quaternion.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> cross(
+ vec<3, T, Q> const& v,
+ qua<T, Q> const& q);
+
+ //! Compute a point on a path according squad equation.
+ //! q1 and q2 are control points; s1 and s2 are intermediate control points.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> squad(
+ qua<T, Q> const& q1,
+ qua<T, Q> const& q2,
+ qua<T, Q> const& s1,
+ qua<T, Q> const& s2,
+ T const& h);
+
+ //! Returns an intermediate control point for squad interpolation.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> intermediate(
+ qua<T, Q> const& prev,
+ qua<T, Q> const& curr,
+ qua<T, Q> const& next);
+
+ //! Returns quarternion square root.
+ ///
+ /// @see gtx_quaternion
+ //template<typename T, qualifier Q>
+ //qua<T, Q> sqrt(
+ // qua<T, Q> const& q);
+
+ //! Rotates a 3 components vector by a quaternion.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> rotate(
+ qua<T, Q> const& q,
+ vec<3, T, Q> const& v);
+
+ /// Rotates a 4 components vector by a quaternion.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> rotate(
+ qua<T, Q> const& q,
+ vec<4, T, Q> const& v);
+
+ /// Extract the real component of a quaternion.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T extractRealComponent(
+ qua<T, Q> const& q);
+
+ /// Converts a quaternion to a 3 * 3 matrix.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> toMat3(
+ qua<T, Q> const& x){return mat3_cast(x);}
+
+ /// Converts a quaternion to a 4 * 4 matrix.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> toMat4(
+ qua<T, Q> const& x){return mat4_cast(x);}
+
+ /// Converts a 3 * 3 matrix to a quaternion.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> toQuat(
+ mat<3, 3, T, Q> const& x){return quat_cast(x);}
+
+ /// Converts a 4 * 4 matrix to a quaternion.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> toQuat(
+ mat<4, 4, T, Q> const& x){return quat_cast(x);}
+
+ /// Quaternion interpolation using the rotation short path.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> shortMix(
+ qua<T, Q> const& x,
+ qua<T, Q> const& y,
+ T const& a);
+
+ /// Quaternion normalized linear interpolation.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> fastMix(
+ qua<T, Q> const& x,
+ qua<T, Q> const& y,
+ T const& a);
+
+ /// Compute the rotation between two vectors.
+ /// @param orig vector, needs to be normalized
+ /// @param dest vector, needs to be normalized
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> rotation(
+ vec<3, T, Q> const& orig,
+ vec<3, T, Q> const& dest);
+
+ /// Returns the squared length of x.
+ ///
+ /// @see gtx_quaternion
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR T length2(qua<T, Q> const& q);
+
+ /// @}
+}//namespace glm
+
+#include "quaternion.inl"
diff --git a/3rdparty/glm/source/glm/gtx/quaternion.inl b/3rdparty/glm/source/glm/gtx/quaternion.inl
new file mode 100644
index 0000000..d125bcc
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/quaternion.inl
@@ -0,0 +1,159 @@
+/// @ref gtx_quaternion
+
+#include <limits>
+#include "../gtc/constants.hpp"
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua<T, Q> quat_identity()
+ {
+ return qua<T, Q>(static_cast<T>(1), static_cast<T>(0), static_cast<T>(0), static_cast<T>(0));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> cross(vec<3, T, Q> const& v, qua<T, Q> const& q)
+ {
+ return inverse(q) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> cross(qua<T, Q> const& q, vec<3, T, Q> const& v)
+ {
+ return q * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> squad
+ (
+ qua<T, Q> const& q1,
+ qua<T, Q> const& q2,
+ qua<T, Q> const& s1,
+ qua<T, Q> const& s2,
+ T const& h)
+ {
+ return mix(mix(q1, q2, h), mix(s1, s2, h), static_cast<T>(2) * (static_cast<T>(1) - h) * h);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> intermediate
+ (
+ qua<T, Q> const& prev,
+ qua<T, Q> const& curr,
+ qua<T, Q> const& next
+ )
+ {
+ qua<T, Q> invQuat = inverse(curr);
+ return exp((log(next * invQuat) + log(prev * invQuat)) / static_cast<T>(-4)) * curr;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rotate(qua<T, Q> const& q, vec<3, T, Q> const& v)
+ {
+ return q * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> rotate(qua<T, Q> const& q, vec<4, T, Q> const& v)
+ {
+ return q * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T extractRealComponent(qua<T, Q> const& q)
+ {
+ T w = static_cast<T>(1) - q.x * q.x - q.y * q.y - q.z * q.z;
+ if(w < T(0))
+ return T(0);
+ else
+ return -sqrt(w);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR T length2(qua<T, Q> const& q)
+ {
+ return q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> shortMix(qua<T, Q> const& x, qua<T, Q> const& y, T const& a)
+ {
+ if(a <= static_cast<T>(0)) return x;
+ if(a >= static_cast<T>(1)) return y;
+
+ T fCos = dot(x, y);
+ qua<T, Q> y2(y); //BUG!!! qua<T> y2;
+ if(fCos < static_cast<T>(0))
+ {
+ y2 = -y;
+ fCos = -fCos;
+ }
+
+ //if(fCos > 1.0f) // problem
+ T k0, k1;
+ if(fCos > (static_cast<T>(1) - epsilon<T>()))
+ {
+ k0 = static_cast<T>(1) - a;
+ k1 = static_cast<T>(0) + a; //BUG!!! 1.0f + a;
+ }
+ else
+ {
+ T fSin = sqrt(T(1) - fCos * fCos);
+ T fAngle = atan(fSin, fCos);
+ T fOneOverSin = static_cast<T>(1) / fSin;
+ k0 = sin((static_cast<T>(1) - a) * fAngle) * fOneOverSin;
+ k1 = sin((static_cast<T>(0) + a) * fAngle) * fOneOverSin;
+ }
+
+ return qua<T, Q>(
+ k0 * x.w + k1 * y2.w,
+ k0 * x.x + k1 * y2.x,
+ k0 * x.y + k1 * y2.y,
+ k0 * x.z + k1 * y2.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> fastMix(qua<T, Q> const& x, qua<T, Q> const& y, T const& a)
+ {
+ return glm::normalize(x * (static_cast<T>(1) - a) + (y * a));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> rotation(vec<3, T, Q> const& orig, vec<3, T, Q> const& dest)
+ {
+ T cosTheta = dot(orig, dest);
+ vec<3, T, Q> rotationAxis;
+
+ if(cosTheta >= static_cast<T>(1) - epsilon<T>()) {
+ // orig and dest point in the same direction
+ return quat_identity<T,Q>();
+ }
+
+ if(cosTheta < static_cast<T>(-1) + epsilon<T>())
+ {
+ // special case when vectors in opposite directions :
+ // there is no "ideal" rotation axis
+ // So guess one; any will do as long as it's perpendicular to start
+ // This implementation favors a rotation around the Up axis (Y),
+ // since it's often what you want to do.
+ rotationAxis = cross(vec<3, T, Q>(0, 0, 1), orig);
+ if(length2(rotationAxis) < epsilon<T>()) // bad luck, they were parallel, try again!
+ rotationAxis = cross(vec<3, T, Q>(1, 0, 0), orig);
+
+ rotationAxis = normalize(rotationAxis);
+ return angleAxis(pi<T>(), rotationAxis);
+ }
+
+ // Implementation from Stan Melax's Game Programming Gems 1 article
+ rotationAxis = cross(orig, dest);
+
+ T s = sqrt((T(1) + cosTheta) * static_cast<T>(2));
+ T invs = static_cast<T>(1) / s;
+
+ return qua<T, Q>(
+ s * static_cast<T>(0.5f),
+ rotationAxis.x * invs,
+ rotationAxis.y * invs,
+ rotationAxis.z * invs);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/range.hpp b/3rdparty/glm/source/glm/gtx/range.hpp
new file mode 100644
index 0000000..93bcb9a
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/range.hpp
@@ -0,0 +1,98 @@
+/// @ref gtx_range
+/// @file glm/gtx/range.hpp
+/// @author Joshua Moerman
+///
+/// @defgroup gtx_range GLM_GTX_range
+/// @ingroup gtx
+///
+/// Include <glm/gtx/range.hpp> to use the features of this extension.
+///
+/// Defines begin and end for vectors and matrices. Useful for range-based for loop.
+/// The range is defined over the elements, not over columns or rows (e.g. mat4 has 16 elements).
+
+#pragma once
+
+// Dependencies
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_range is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_range extension included")
+# endif
+#endif
+
+#include "../gtc/type_ptr.hpp"
+#include "../gtc/vec1.hpp"
+
+namespace glm
+{
+ /// @addtogroup gtx_range
+ /// @{
+
+# if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(push)
+# pragma warning(disable : 4100) // unreferenced formal parameter
+# endif
+
+ template<typename T, qualifier Q>
+ inline length_t components(vec<1, T, Q> const& v)
+ {
+ return v.length();
+ }
+
+ template<typename T, qualifier Q>
+ inline length_t components(vec<2, T, Q> const& v)
+ {
+ return v.length();
+ }
+
+ template<typename T, qualifier Q>
+ inline length_t components(vec<3, T, Q> const& v)
+ {
+ return v.length();
+ }
+
+ template<typename T, qualifier Q>
+ inline length_t components(vec<4, T, Q> const& v)
+ {
+ return v.length();
+ }
+
+ template<typename genType>
+ inline length_t components(genType const& m)
+ {
+ return m.length() * m[0].length();
+ }
+
+ template<typename genType>
+ inline typename genType::value_type const * begin(genType const& v)
+ {
+ return value_ptr(v);
+ }
+
+ template<typename genType>
+ inline typename genType::value_type const * end(genType const& v)
+ {
+ return begin(v) + components(v);
+ }
+
+ template<typename genType>
+ inline typename genType::value_type * begin(genType& v)
+ {
+ return value_ptr(v);
+ }
+
+ template<typename genType>
+ inline typename genType::value_type * end(genType& v)
+ {
+ return begin(v) + components(v);
+ }
+
+# if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(pop)
+# endif
+
+ /// @}
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/raw_data.hpp b/3rdparty/glm/source/glm/gtx/raw_data.hpp
new file mode 100644
index 0000000..86cbe77
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/raw_data.hpp
@@ -0,0 +1,51 @@
+/// @ref gtx_raw_data
+/// @file glm/gtx/raw_data.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_raw_data GLM_GTX_raw_data
+/// @ingroup gtx
+///
+/// Include <glm/gtx/raw_data.hpp> to use the features of this extension.
+///
+/// Projection of a vector to other one
+
+#pragma once
+
+// Dependencies
+#include "../ext/scalar_uint_sized.hpp"
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_raw_data is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_raw_data extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_raw_data
+ /// @{
+
+ //! Type for byte numbers.
+ //! From GLM_GTX_raw_data extension.
+ typedef detail::uint8 byte;
+
+ //! Type for word numbers.
+ //! From GLM_GTX_raw_data extension.
+ typedef detail::uint16 word;
+
+ //! Type for dword numbers.
+ //! From GLM_GTX_raw_data extension.
+ typedef detail::uint32 dword;
+
+ //! Type for qword numbers.
+ //! From GLM_GTX_raw_data extension.
+ typedef detail::uint64 qword;
+
+ /// @}
+}// namespace glm
+
+#include "raw_data.inl"
diff --git a/3rdparty/glm/source/glm/gtx/raw_data.inl b/3rdparty/glm/source/glm/gtx/raw_data.inl
new file mode 100644
index 0000000..c740317
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/raw_data.inl
@@ -0,0 +1,2 @@
+/// @ref gtx_raw_data
+
diff --git a/3rdparty/glm/source/glm/gtx/rotate_normalized_axis.hpp b/3rdparty/glm/source/glm/gtx/rotate_normalized_axis.hpp
new file mode 100644
index 0000000..2103ca0
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/rotate_normalized_axis.hpp
@@ -0,0 +1,68 @@
+/// @ref gtx_rotate_normalized_axis
+/// @file glm/gtx/rotate_normalized_axis.hpp
+///
+/// @see core (dependence)
+/// @see gtc_matrix_transform
+/// @see gtc_quaternion
+///
+/// @defgroup gtx_rotate_normalized_axis GLM_GTX_rotate_normalized_axis
+/// @ingroup gtx
+///
+/// Include <glm/gtx/rotate_normalized_axis.hpp> to use the features of this extension.
+///
+/// Quaternions and matrices rotations around normalized axis.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/epsilon.hpp"
+#include "../gtc/quaternion.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_rotate_normalized_axis is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_rotate_normalized_axis extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_rotate_normalized_axis
+ /// @{
+
+ /// Builds a rotation 4 * 4 matrix created from a normalized axis and an angle.
+ ///
+ /// @param m Input matrix multiplied by this rotation matrix.
+ /// @param angle Rotation angle expressed in radians.
+ /// @param axis Rotation axis, must be normalized.
+ /// @tparam T Value type used to build the matrix. Currently supported: half (not recommended), float or double.
+ ///
+ /// @see gtx_rotate_normalized_axis
+ /// @see - rotate(T angle, T x, T y, T z)
+ /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z)
+ /// @see - rotate(T angle, vec<3, T, Q> const& v)
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> rotateNormalizedAxis(
+ mat<4, 4, T, Q> const& m,
+ T const& angle,
+ vec<3, T, Q> const& axis);
+
+ /// Rotates a quaternion from a vector of 3 components normalized axis and an angle.
+ ///
+ /// @param q Source orientation
+ /// @param angle Angle expressed in radians.
+ /// @param axis Normalized axis of the rotation, must be normalized.
+ ///
+ /// @see gtx_rotate_normalized_axis
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL qua<T, Q> rotateNormalizedAxis(
+ qua<T, Q> const& q,
+ T const& angle,
+ vec<3, T, Q> const& axis);
+
+ /// @}
+}//namespace glm
+
+#include "rotate_normalized_axis.inl"
diff --git a/3rdparty/glm/source/glm/gtx/rotate_normalized_axis.inl b/3rdparty/glm/source/glm/gtx/rotate_normalized_axis.inl
new file mode 100644
index 0000000..b2e9278
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/rotate_normalized_axis.inl
@@ -0,0 +1,58 @@
+/// @ref gtx_rotate_normalized_axis
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotateNormalizedAxis
+ (
+ mat<4, 4, T, Q> const& m,
+ T const& angle,
+ vec<3, T, Q> const& v
+ )
+ {
+ T const a = angle;
+ T const c = cos(a);
+ T const s = sin(a);
+
+ vec<3, T, Q> const axis(v);
+
+ vec<3, T, Q> const temp((static_cast<T>(1) - c) * axis);
+
+ mat<4, 4, T, Q> Rotate;
+ Rotate[0][0] = c + temp[0] * axis[0];
+ Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2];
+ Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1];
+
+ Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2];
+ Rotate[1][1] = c + temp[1] * axis[1];
+ Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0];
+
+ Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1];
+ Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0];
+ Rotate[2][2] = c + temp[2] * axis[2];
+
+ mat<4, 4, T, Q> Result;
+ Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2];
+ Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2];
+ Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2];
+ Result[3] = m[3];
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER qua<T, Q> rotateNormalizedAxis
+ (
+ qua<T, Q> const& q,
+ T const& angle,
+ vec<3, T, Q> const& v
+ )
+ {
+ vec<3, T, Q> const Tmp(v);
+
+ T const AngleRad(angle);
+ T const Sin = sin(AngleRad * T(0.5));
+
+ return q * qua<T, Q>(cos(AngleRad * static_cast<T>(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin);
+ //return gtc::quaternion::cross(q, tquat<T, Q>(cos(AngleRad * T(0.5)), Tmp.x * fSin, Tmp.y * fSin, Tmp.z * fSin));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/rotate_vector.hpp b/3rdparty/glm/source/glm/gtx/rotate_vector.hpp
new file mode 100644
index 0000000..dcd5b95
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/rotate_vector.hpp
@@ -0,0 +1,123 @@
+/// @ref gtx_rotate_vector
+/// @file glm/gtx/rotate_vector.hpp
+///
+/// @see core (dependence)
+/// @see gtx_transform (dependence)
+///
+/// @defgroup gtx_rotate_vector GLM_GTX_rotate_vector
+/// @ingroup gtx
+///
+/// Include <glm/gtx/rotate_vector.hpp> to use the features of this extension.
+///
+/// Function to directly rotate a vector
+
+#pragma once
+
+// Dependency:
+#include "../gtx/transform.hpp"
+#include "../gtc/epsilon.hpp"
+#include "../ext/vector_relational.hpp"
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_rotate_vector is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_rotate_vector extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_rotate_vector
+ /// @{
+
+ /// Returns Spherical interpolation between two vectors
+ ///
+ /// @param x A first vector
+ /// @param y A second vector
+ /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1].
+ ///
+ /// @see gtx_rotate_vector
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> slerp(
+ vec<3, T, Q> const& x,
+ vec<3, T, Q> const& y,
+ T const& a);
+
+ //! Rotate a two dimensional vector.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<2, T, Q> rotate(
+ vec<2, T, Q> const& v,
+ T const& angle);
+
+ //! Rotate a three dimensional vector around an axis.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> rotate(
+ vec<3, T, Q> const& v,
+ T const& angle,
+ vec<3, T, Q> const& normal);
+
+ //! Rotate a four dimensional vector around an axis.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> rotate(
+ vec<4, T, Q> const& v,
+ T const& angle,
+ vec<3, T, Q> const& normal);
+
+ //! Rotate a three dimensional vector around the X axis.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> rotateX(
+ vec<3, T, Q> const& v,
+ T const& angle);
+
+ //! Rotate a three dimensional vector around the Y axis.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> rotateY(
+ vec<3, T, Q> const& v,
+ T const& angle);
+
+ //! Rotate a three dimensional vector around the Z axis.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<3, T, Q> rotateZ(
+ vec<3, T, Q> const& v,
+ T const& angle);
+
+ //! Rotate a four dimensional vector around the X axis.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> rotateX(
+ vec<4, T, Q> const& v,
+ T const& angle);
+
+ //! Rotate a four dimensional vector around the Y axis.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> rotateY(
+ vec<4, T, Q> const& v,
+ T const& angle);
+
+ //! Rotate a four dimensional vector around the Z axis.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL vec<4, T, Q> rotateZ(
+ vec<4, T, Q> const& v,
+ T const& angle);
+
+ //! Build a rotation matrix from a normal and a up vector.
+ //! From GLM_GTX_rotate_vector extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> orientation(
+ vec<3, T, Q> const& Normal,
+ vec<3, T, Q> const& Up);
+
+ /// @}
+}//namespace glm
+
+#include "rotate_vector.inl"
diff --git a/3rdparty/glm/source/glm/gtx/rotate_vector.inl b/3rdparty/glm/source/glm/gtx/rotate_vector.inl
new file mode 100644
index 0000000..f8136e7
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/rotate_vector.inl
@@ -0,0 +1,187 @@
+/// @ref gtx_rotate_vector
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> slerp
+ (
+ vec<3, T, Q> const& x,
+ vec<3, T, Q> const& y,
+ T const& a
+ )
+ {
+ // get cosine of angle between vectors (-1 -> 1)
+ T CosAlpha = dot(x, y);
+ // get angle (0 -> pi)
+ T Alpha = acos(CosAlpha);
+ // get sine of angle between vectors (0 -> 1)
+ T SinAlpha = sin(Alpha);
+ // this breaks down when SinAlpha = 0, i.e. Alpha = 0 or pi
+ T t1 = sin((static_cast<T>(1) - a) * Alpha) / SinAlpha;
+ T t2 = sin(a * Alpha) / SinAlpha;
+
+ // interpolate src vectors
+ return x * t1 + y * t2;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, T, Q> rotate
+ (
+ vec<2, T, Q> const& v,
+ T const& angle
+ )
+ {
+ vec<2, T, Q> Result;
+ T const Cos(cos(angle));
+ T const Sin(sin(angle));
+
+ Result.x = v.x * Cos - v.y * Sin;
+ Result.y = v.x * Sin + v.y * Cos;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rotate
+ (
+ vec<3, T, Q> const& v,
+ T const& angle,
+ vec<3, T, Q> const& normal
+ )
+ {
+ return mat<3, 3, T, Q>(glm::rotate(angle, normal)) * v;
+ }
+ /*
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rotateGTX(
+ const vec<3, T, Q>& x,
+ T angle,
+ const vec<3, T, Q>& normal)
+ {
+ const T Cos = cos(radians(angle));
+ const T Sin = sin(radians(angle));
+ return x * Cos + ((x * normal) * (T(1) - Cos)) * normal + cross(x, normal) * Sin;
+ }
+ */
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> rotate
+ (
+ vec<4, T, Q> const& v,
+ T const& angle,
+ vec<3, T, Q> const& normal
+ )
+ {
+ return rotate(angle, normal) * v;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rotateX
+ (
+ vec<3, T, Q> const& v,
+ T const& angle
+ )
+ {
+ vec<3, T, Q> Result(v);
+ T const Cos(cos(angle));
+ T const Sin(sin(angle));
+
+ Result.y = v.y * Cos - v.z * Sin;
+ Result.z = v.y * Sin + v.z * Cos;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rotateY
+ (
+ vec<3, T, Q> const& v,
+ T const& angle
+ )
+ {
+ vec<3, T, Q> Result = v;
+ T const Cos(cos(angle));
+ T const Sin(sin(angle));
+
+ Result.x = v.x * Cos + v.z * Sin;
+ Result.z = -v.x * Sin + v.z * Cos;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, T, Q> rotateZ
+ (
+ vec<3, T, Q> const& v,
+ T const& angle
+ )
+ {
+ vec<3, T, Q> Result = v;
+ T const Cos(cos(angle));
+ T const Sin(sin(angle));
+
+ Result.x = v.x * Cos - v.y * Sin;
+ Result.y = v.x * Sin + v.y * Cos;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> rotateX
+ (
+ vec<4, T, Q> const& v,
+ T const& angle
+ )
+ {
+ vec<4, T, Q> Result = v;
+ T const Cos(cos(angle));
+ T const Sin(sin(angle));
+
+ Result.y = v.y * Cos - v.z * Sin;
+ Result.z = v.y * Sin + v.z * Cos;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> rotateY
+ (
+ vec<4, T, Q> const& v,
+ T const& angle
+ )
+ {
+ vec<4, T, Q> Result = v;
+ T const Cos(cos(angle));
+ T const Sin(sin(angle));
+
+ Result.x = v.x * Cos + v.z * Sin;
+ Result.z = -v.x * Sin + v.z * Cos;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, T, Q> rotateZ
+ (
+ vec<4, T, Q> const& v,
+ T const& angle
+ )
+ {
+ vec<4, T, Q> Result = v;
+ T const Cos(cos(angle));
+ T const Sin(sin(angle));
+
+ Result.x = v.x * Cos - v.y * Sin;
+ Result.y = v.x * Sin + v.y * Cos;
+ return Result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientation
+ (
+ vec<3, T, Q> const& Normal,
+ vec<3, T, Q> const& Up
+ )
+ {
+ if(all(equal(Normal, Up, epsilon<T>())))
+ return mat<4, 4, T, Q>(static_cast<T>(1));
+
+ vec<3, T, Q> RotationAxis = cross(Up, Normal);
+ T Angle = acos(dot(Normal, Up));
+
+ return rotate(Angle, RotationAxis);
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/scalar_multiplication.hpp b/3rdparty/glm/source/glm/gtx/scalar_multiplication.hpp
new file mode 100644
index 0000000..496ba19
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/scalar_multiplication.hpp
@@ -0,0 +1,75 @@
+/// @ref gtx
+/// @file glm/gtx/scalar_multiplication.hpp
+/// @author Joshua Moerman
+///
+/// Include <glm/gtx/scalar_multiplication.hpp> to use the features of this extension.
+///
+/// Enables scalar multiplication for all types
+///
+/// Since GLSL is very strict about types, the following (often used) combinations do not work:
+/// double * vec4
+/// int * vec4
+/// vec4 / int
+/// So we'll fix that! Of course "float * vec4" should remain the same (hence the enable_if magic)
+
+#pragma once
+
+#include "../detail/setup.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_scalar_multiplication is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_scalar_multiplication extension included")
+# endif
+#endif
+
+#include "../vec2.hpp"
+#include "../vec3.hpp"
+#include "../vec4.hpp"
+#include "../mat2x2.hpp"
+#include <type_traits>
+
+namespace glm
+{
+ template<typename T, typename Vec>
+ using return_type_scalar_multiplication = typename std::enable_if<
+ !std::is_same<T, float>::value // T may not be a float
+ && std::is_arithmetic<T>::value, Vec // But it may be an int or double (no vec3 or mat3, ...)
+ >::type;
+
+#define GLM_IMPLEMENT_SCAL_MULT(Vec) \
+ template<typename T> \
+ return_type_scalar_multiplication<T, Vec> \
+ operator*(T const& s, Vec rh){ \
+ return rh *= static_cast<float>(s); \
+ } \
+ \
+ template<typename T> \
+ return_type_scalar_multiplication<T, Vec> \
+ operator*(Vec lh, T const& s){ \
+ return lh *= static_cast<float>(s); \
+ } \
+ \
+ template<typename T> \
+ return_type_scalar_multiplication<T, Vec> \
+ operator/(Vec lh, T const& s){ \
+ return lh *= 1.0f / static_cast<float>(s); \
+ }
+
+GLM_IMPLEMENT_SCAL_MULT(vec2)
+GLM_IMPLEMENT_SCAL_MULT(vec3)
+GLM_IMPLEMENT_SCAL_MULT(vec4)
+
+GLM_IMPLEMENT_SCAL_MULT(mat2)
+GLM_IMPLEMENT_SCAL_MULT(mat2x3)
+GLM_IMPLEMENT_SCAL_MULT(mat2x4)
+GLM_IMPLEMENT_SCAL_MULT(mat3x2)
+GLM_IMPLEMENT_SCAL_MULT(mat3)
+GLM_IMPLEMENT_SCAL_MULT(mat3x4)
+GLM_IMPLEMENT_SCAL_MULT(mat4x2)
+GLM_IMPLEMENT_SCAL_MULT(mat4x3)
+GLM_IMPLEMENT_SCAL_MULT(mat4)
+
+#undef GLM_IMPLEMENT_SCAL_MULT
+} // namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/scalar_relational.hpp b/3rdparty/glm/source/glm/gtx/scalar_relational.hpp
new file mode 100644
index 0000000..8be9c57
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/scalar_relational.hpp
@@ -0,0 +1,36 @@
+/// @ref gtx_scalar_relational
+/// @file glm/gtx/scalar_relational.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_scalar_relational GLM_GTX_scalar_relational
+/// @ingroup gtx
+///
+/// Include <glm/gtx/scalar_relational.hpp> to use the features of this extension.
+///
+/// Extend a position from a source to a position at a defined length.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_extend extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_scalar_relational
+ /// @{
+
+
+
+ /// @}
+}//namespace glm
+
+#include "scalar_relational.inl"
diff --git a/3rdparty/glm/source/glm/gtx/scalar_relational.inl b/3rdparty/glm/source/glm/gtx/scalar_relational.inl
new file mode 100644
index 0000000..c2a121c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/scalar_relational.inl
@@ -0,0 +1,88 @@
+/// @ref gtx_scalar_relational
+
+namespace glm
+{
+ template<typename T>
+ GLM_FUNC_QUALIFIER bool lessThan
+ (
+ T const& x,
+ T const& y
+ )
+ {
+ return x < y;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER bool lessThanEqual
+ (
+ T const& x,
+ T const& y
+ )
+ {
+ return x <= y;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER bool greaterThan
+ (
+ T const& x,
+ T const& y
+ )
+ {
+ return x > y;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER bool greaterThanEqual
+ (
+ T const& x,
+ T const& y
+ )
+ {
+ return x >= y;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER bool equal
+ (
+ T const& x,
+ T const& y
+ )
+ {
+ return detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(x, y);
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER bool notEqual
+ (
+ T const& x,
+ T const& y
+ )
+ {
+ return !detail::compute_equal<T, std::numeric_limits<T>::is_iec559>::call(x, y);
+ }
+
+ GLM_FUNC_QUALIFIER bool any
+ (
+ bool const& x
+ )
+ {
+ return x;
+ }
+
+ GLM_FUNC_QUALIFIER bool all
+ (
+ bool const& x
+ )
+ {
+ return x;
+ }
+
+ GLM_FUNC_QUALIFIER bool not_
+ (
+ bool const& x
+ )
+ {
+ return !x;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/spline.hpp b/3rdparty/glm/source/glm/gtx/spline.hpp
new file mode 100644
index 0000000..731c979
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/spline.hpp
@@ -0,0 +1,65 @@
+/// @ref gtx_spline
+/// @file glm/gtx/spline.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_spline GLM_GTX_spline
+/// @ingroup gtx
+///
+/// Include <glm/gtx/spline.hpp> to use the features of this extension.
+///
+/// Spline functions
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtx/optimum_pow.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_spline is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_spline extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_spline
+ /// @{
+
+ /// Return a point from a catmull rom curve.
+ /// @see gtx_spline extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType catmullRom(
+ genType const& v1,
+ genType const& v2,
+ genType const& v3,
+ genType const& v4,
+ typename genType::value_type const& s);
+
+ /// Return a point from a hermite curve.
+ /// @see gtx_spline extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType hermite(
+ genType const& v1,
+ genType const& t1,
+ genType const& v2,
+ genType const& t2,
+ typename genType::value_type const& s);
+
+ /// Return a point from a cubic curve.
+ /// @see gtx_spline extension.
+ template<typename genType>
+ GLM_FUNC_DECL genType cubic(
+ genType const& v1,
+ genType const& v2,
+ genType const& v3,
+ genType const& v4,
+ typename genType::value_type const& s);
+
+ /// @}
+}//namespace glm
+
+#include "spline.inl"
diff --git a/3rdparty/glm/source/glm/gtx/spline.inl b/3rdparty/glm/source/glm/gtx/spline.inl
new file mode 100644
index 0000000..c3fd056
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/spline.inl
@@ -0,0 +1,60 @@
+/// @ref gtx_spline
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType catmullRom
+ (
+ genType const& v1,
+ genType const& v2,
+ genType const& v3,
+ genType const& v4,
+ typename genType::value_type const& s
+ )
+ {
+ typename genType::value_type s2 = pow2(s);
+ typename genType::value_type s3 = pow3(s);
+
+ typename genType::value_type f1 = -s3 + typename genType::value_type(2) * s2 - s;
+ typename genType::value_type f2 = typename genType::value_type(3) * s3 - typename genType::value_type(5) * s2 + typename genType::value_type(2);
+ typename genType::value_type f3 = typename genType::value_type(-3) * s3 + typename genType::value_type(4) * s2 + s;
+ typename genType::value_type f4 = s3 - s2;
+
+ return (f1 * v1 + f2 * v2 + f3 * v3 + f4 * v4) / typename genType::value_type(2);
+
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType hermite
+ (
+ genType const& v1,
+ genType const& t1,
+ genType const& v2,
+ genType const& t2,
+ typename genType::value_type const& s
+ )
+ {
+ typename genType::value_type s2 = pow2(s);
+ typename genType::value_type s3 = pow3(s);
+
+ typename genType::value_type f1 = typename genType::value_type(2) * s3 - typename genType::value_type(3) * s2 + typename genType::value_type(1);
+ typename genType::value_type f2 = typename genType::value_type(-2) * s3 + typename genType::value_type(3) * s2;
+ typename genType::value_type f3 = s3 - typename genType::value_type(2) * s2 + s;
+ typename genType::value_type f4 = s3 - s2;
+
+ return f1 * v1 + f2 * v2 + f3 * t1 + f4 * t2;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType cubic
+ (
+ genType const& v1,
+ genType const& v2,
+ genType const& v3,
+ genType const& v4,
+ typename genType::value_type const& s
+ )
+ {
+ return ((v1 * s + v2) * s + v3) * s + v4;
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/std_based_type.hpp b/3rdparty/glm/source/glm/gtx/std_based_type.hpp
new file mode 100644
index 0000000..cd3be8c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/std_based_type.hpp
@@ -0,0 +1,68 @@
+/// @ref gtx_std_based_type
+/// @file glm/gtx/std_based_type.hpp
+///
+/// @see core (dependence)
+/// @see gtx_extented_min_max (dependence)
+///
+/// @defgroup gtx_std_based_type GLM_GTX_std_based_type
+/// @ingroup gtx
+///
+/// Include <glm/gtx/std_based_type.hpp> to use the features of this extension.
+///
+/// Adds vector types based on STL value types.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include <cstdlib>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_std_based_type is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_std_based_type extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_std_based_type
+ /// @{
+
+ /// Vector type based of one std::size_t component.
+ /// @see GLM_GTX_std_based_type
+ typedef vec<1, std::size_t, defaultp> size1;
+
+ /// Vector type based of two std::size_t components.
+ /// @see GLM_GTX_std_based_type
+ typedef vec<2, std::size_t, defaultp> size2;
+
+ /// Vector type based of three std::size_t components.
+ /// @see GLM_GTX_std_based_type
+ typedef vec<3, std::size_t, defaultp> size3;
+
+ /// Vector type based of four std::size_t components.
+ /// @see GLM_GTX_std_based_type
+ typedef vec<4, std::size_t, defaultp> size4;
+
+ /// Vector type based of one std::size_t component.
+ /// @see GLM_GTX_std_based_type
+ typedef vec<1, std::size_t, defaultp> size1_t;
+
+ /// Vector type based of two std::size_t components.
+ /// @see GLM_GTX_std_based_type
+ typedef vec<2, std::size_t, defaultp> size2_t;
+
+ /// Vector type based of three std::size_t components.
+ /// @see GLM_GTX_std_based_type
+ typedef vec<3, std::size_t, defaultp> size3_t;
+
+ /// Vector type based of four std::size_t components.
+ /// @see GLM_GTX_std_based_type
+ typedef vec<4, std::size_t, defaultp> size4_t;
+
+ /// @}
+}//namespace glm
+
+#include "std_based_type.inl"
diff --git a/3rdparty/glm/source/glm/gtx/std_based_type.inl b/3rdparty/glm/source/glm/gtx/std_based_type.inl
new file mode 100644
index 0000000..9c34bdb
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/std_based_type.inl
@@ -0,0 +1,6 @@
+/// @ref gtx_std_based_type
+
+namespace glm
+{
+
+}
diff --git a/3rdparty/glm/source/glm/gtx/string_cast.hpp b/3rdparty/glm/source/glm/gtx/string_cast.hpp
new file mode 100644
index 0000000..71f6ece
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/string_cast.hpp
@@ -0,0 +1,46 @@
+/// @ref gtx_string_cast
+/// @file glm/gtx/string_cast.hpp
+///
+/// @see core (dependence)
+/// @see gtx_integer (dependence)
+/// @see gtx_quaternion (dependence)
+///
+/// @defgroup gtx_string_cast GLM_GTX_string_cast
+/// @ingroup gtx
+///
+/// Include <glm/gtx/string_cast.hpp> to use the features of this extension.
+///
+/// Setup strings for GLM type values
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/type_precision.hpp"
+#include "../gtc/quaternion.hpp"
+#include "../gtx/dual_quaternion.hpp"
+#include <string>
+#include <cmath>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_string_cast is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_string_cast extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_string_cast
+ /// @{
+
+ /// Create a string from a GLM vector or matrix typed variable.
+ /// @see gtx_string_cast extension.
+ template<typename genType>
+ GLM_FUNC_DECL std::string to_string(genType const& x);
+
+ /// @}
+}//namespace glm
+
+#include "string_cast.inl"
diff --git a/3rdparty/glm/source/glm/gtx/string_cast.inl b/3rdparty/glm/source/glm/gtx/string_cast.inl
new file mode 100644
index 0000000..f67751d
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/string_cast.inl
@@ -0,0 +1,492 @@
+/// @ref gtx_string_cast
+
+#include <cstdarg>
+#include <cstdio>
+
+namespace glm{
+namespace detail
+{
+ template <typename T>
+ struct cast
+ {
+ typedef T value_type;
+ };
+
+ template <>
+ struct cast<float>
+ {
+ typedef double value_type;
+ };
+
+ GLM_FUNC_QUALIFIER std::string format(const char* msg, ...)
+ {
+ std::size_t const STRING_BUFFER(4096);
+ char text[STRING_BUFFER];
+ va_list list;
+
+ if(msg == GLM_NULLPTR)
+ return std::string();
+
+ va_start(list, msg);
+# if (GLM_COMPILER & GLM_COMPILER_VC)
+ vsprintf_s(text, STRING_BUFFER, msg, list);
+# else//
+ std::vsprintf(text, msg, list);
+# endif//
+ va_end(list);
+
+ return std::string(text);
+ }
+
+ static const char* LabelTrue = "true";
+ static const char* LabelFalse = "false";
+
+ template<typename T, bool isFloat = false>
+ struct literal
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "%d";}
+ };
+
+ template<typename T>
+ struct literal<T, true>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "%f";}
+ };
+
+# if GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC
+ template<>
+ struct literal<uint64_t, false>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "%lld";}
+ };
+
+ template<>
+ struct literal<int64_t, false>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "%lld";}
+ };
+# endif//GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC
+
+ template<typename T>
+ struct prefix{};
+
+ template<>
+ struct prefix<float>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "";}
+ };
+
+ template<>
+ struct prefix<double>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "d";}
+ };
+
+ template<>
+ struct prefix<bool>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "b";}
+ };
+
+ template<>
+ struct prefix<uint8_t>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "u8";}
+ };
+
+ template<>
+ struct prefix<int8_t>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "i8";}
+ };
+
+ template<>
+ struct prefix<uint16_t>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "u16";}
+ };
+
+ template<>
+ struct prefix<int16_t>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "i16";}
+ };
+
+ template<>
+ struct prefix<uint32_t>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "u";}
+ };
+
+ template<>
+ struct prefix<int32_t>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "i";}
+ };
+
+ template<>
+ struct prefix<uint64_t>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "u64";}
+ };
+
+ template<>
+ struct prefix<int64_t>
+ {
+ GLM_FUNC_QUALIFIER static char const * value() {return "i64";}
+ };
+
+ template<typename matType>
+ struct compute_to_string
+ {};
+
+ template<qualifier Q>
+ struct compute_to_string<vec<1, bool, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(vec<1, bool, Q> const& x)
+ {
+ return detail::format("bvec1(%s)",
+ x[0] ? detail::LabelTrue : detail::LabelFalse);
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_to_string<vec<2, bool, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(vec<2, bool, Q> const& x)
+ {
+ return detail::format("bvec2(%s, %s)",
+ x[0] ? detail::LabelTrue : detail::LabelFalse,
+ x[1] ? detail::LabelTrue : detail::LabelFalse);
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_to_string<vec<3, bool, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(vec<3, bool, Q> const& x)
+ {
+ return detail::format("bvec3(%s, %s, %s)",
+ x[0] ? detail::LabelTrue : detail::LabelFalse,
+ x[1] ? detail::LabelTrue : detail::LabelFalse,
+ x[2] ? detail::LabelTrue : detail::LabelFalse);
+ }
+ };
+
+ template<qualifier Q>
+ struct compute_to_string<vec<4, bool, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(vec<4, bool, Q> const& x)
+ {
+ return detail::format("bvec4(%s, %s, %s, %s)",
+ x[0] ? detail::LabelTrue : detail::LabelFalse,
+ x[1] ? detail::LabelTrue : detail::LabelFalse,
+ x[2] ? detail::LabelTrue : detail::LabelFalse,
+ x[3] ? detail::LabelTrue : detail::LabelFalse);
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<vec<1, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(vec<1, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%svec1(%s)",
+ PrefixStr,
+ LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<vec<2, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(vec<2, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%svec2(%s, %s)",
+ PrefixStr,
+ LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0]),
+ static_cast<typename cast<T>::value_type>(x[1]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<vec<3, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(vec<3, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%svec3(%s, %s, %s)",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0]),
+ static_cast<typename cast<T>::value_type>(x[1]),
+ static_cast<typename cast<T>::value_type>(x[2]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<vec<4, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(vec<4, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%svec4(%s, %s, %s, %s)",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0]),
+ static_cast<typename cast<T>::value_type>(x[1]),
+ static_cast<typename cast<T>::value_type>(x[2]),
+ static_cast<typename cast<T>::value_type>(x[3]));
+ }
+ };
+
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<2, 2, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<2, 2, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat2x2((%s, %s), (%s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<2, 3, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<2, 3, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat2x3((%s, %s, %s), (%s, %s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]), static_cast<typename cast<T>::value_type>(x[0][2]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]), static_cast<typename cast<T>::value_type>(x[1][2]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<2, 4, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<2, 4, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat2x4((%s, %s, %s, %s), (%s, %s, %s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]), static_cast<typename cast<T>::value_type>(x[0][2]), static_cast<typename cast<T>::value_type>(x[0][3]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]), static_cast<typename cast<T>::value_type>(x[1][2]), static_cast<typename cast<T>::value_type>(x[1][3]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<3, 2, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<3, 2, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat3x2((%s, %s), (%s, %s), (%s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]),
+ static_cast<typename cast<T>::value_type>(x[2][0]), static_cast<typename cast<T>::value_type>(x[2][1]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<3, 3, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<3, 3, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat3x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]), static_cast<typename cast<T>::value_type>(x[0][2]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]), static_cast<typename cast<T>::value_type>(x[1][2]),
+ static_cast<typename cast<T>::value_type>(x[2][0]), static_cast<typename cast<T>::value_type>(x[2][1]), static_cast<typename cast<T>::value_type>(x[2][2]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<3, 4, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<3, 4, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat3x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]), static_cast<typename cast<T>::value_type>(x[0][2]), static_cast<typename cast<T>::value_type>(x[0][3]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]), static_cast<typename cast<T>::value_type>(x[1][2]), static_cast<typename cast<T>::value_type>(x[1][3]),
+ static_cast<typename cast<T>::value_type>(x[2][0]), static_cast<typename cast<T>::value_type>(x[2][1]), static_cast<typename cast<T>::value_type>(x[2][2]), static_cast<typename cast<T>::value_type>(x[2][3]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<4, 2, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<4, 2, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat4x2((%s, %s), (%s, %s), (%s, %s), (%s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]),
+ static_cast<typename cast<T>::value_type>(x[2][0]), static_cast<typename cast<T>::value_type>(x[2][1]),
+ static_cast<typename cast<T>::value_type>(x[3][0]), static_cast<typename cast<T>::value_type>(x[3][1]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<4, 3, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<4, 3, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat4x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s), (%s, %s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]), static_cast<typename cast<T>::value_type>(x[0][2]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]), static_cast<typename cast<T>::value_type>(x[1][2]),
+ static_cast<typename cast<T>::value_type>(x[2][0]), static_cast<typename cast<T>::value_type>(x[2][1]), static_cast<typename cast<T>::value_type>(x[2][2]),
+ static_cast<typename cast<T>::value_type>(x[3][0]), static_cast<typename cast<T>::value_type>(x[3][1]), static_cast<typename cast<T>::value_type>(x[3][2]));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<mat<4, 4, T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(mat<4, 4, T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%smat4x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x[0][0]), static_cast<typename cast<T>::value_type>(x[0][1]), static_cast<typename cast<T>::value_type>(x[0][2]), static_cast<typename cast<T>::value_type>(x[0][3]),
+ static_cast<typename cast<T>::value_type>(x[1][0]), static_cast<typename cast<T>::value_type>(x[1][1]), static_cast<typename cast<T>::value_type>(x[1][2]), static_cast<typename cast<T>::value_type>(x[1][3]),
+ static_cast<typename cast<T>::value_type>(x[2][0]), static_cast<typename cast<T>::value_type>(x[2][1]), static_cast<typename cast<T>::value_type>(x[2][2]), static_cast<typename cast<T>::value_type>(x[2][3]),
+ static_cast<typename cast<T>::value_type>(x[3][0]), static_cast<typename cast<T>::value_type>(x[3][1]), static_cast<typename cast<T>::value_type>(x[3][2]), static_cast<typename cast<T>::value_type>(x[3][3]));
+ }
+ };
+
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<qua<T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(qua<T, Q> const& q)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%squat(%s, {%s, %s, %s})",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(q.w),
+ static_cast<typename cast<T>::value_type>(q.x),
+ static_cast<typename cast<T>::value_type>(q.y),
+ static_cast<typename cast<T>::value_type>(q.z));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_to_string<tdualquat<T, Q> >
+ {
+ GLM_FUNC_QUALIFIER static std::string call(tdualquat<T, Q> const& x)
+ {
+ char const * PrefixStr = prefix<T>::value();
+ char const * LiteralStr = literal<T, std::numeric_limits<T>::is_iec559>::value();
+ std::string FormatStr(detail::format("%sdualquat((%s, {%s, %s, %s}), (%s, {%s, %s, %s}))",
+ PrefixStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr,
+ LiteralStr, LiteralStr, LiteralStr, LiteralStr));
+
+ return detail::format(FormatStr.c_str(),
+ static_cast<typename cast<T>::value_type>(x.real.w),
+ static_cast<typename cast<T>::value_type>(x.real.x),
+ static_cast<typename cast<T>::value_type>(x.real.y),
+ static_cast<typename cast<T>::value_type>(x.real.z),
+ static_cast<typename cast<T>::value_type>(x.dual.w),
+ static_cast<typename cast<T>::value_type>(x.dual.x),
+ static_cast<typename cast<T>::value_type>(x.dual.y),
+ static_cast<typename cast<T>::value_type>(x.dual.z));
+ }
+ };
+
+}//namespace detail
+
+template<class matType>
+GLM_FUNC_QUALIFIER std::string to_string(matType const& x)
+{
+ return detail::compute_to_string<matType>::call(x);
+}
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/texture.hpp b/3rdparty/glm/source/glm/gtx/texture.hpp
new file mode 100644
index 0000000..20585e6
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/texture.hpp
@@ -0,0 +1,46 @@
+/// @ref gtx_texture
+/// @file glm/gtx/texture.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_texture GLM_GTX_texture
+/// @ingroup gtx
+///
+/// Include <glm/gtx/texture.hpp> to use the features of this extension.
+///
+/// Wrapping mode of texture coordinates.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/integer.hpp"
+#include "../gtx/component_wise.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_texture is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_texture extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_texture
+ /// @{
+
+ /// Compute the number of mipmaps levels necessary to create a mipmap complete texture
+ ///
+ /// @param Extent Extent of the texture base level mipmap
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point or signed integer scalar types
+ /// @tparam Q Value from qualifier enum
+ template <length_t L, typename T, qualifier Q>
+ T levels(vec<L, T, Q> const& Extent);
+
+ /// @}
+}// namespace glm
+
+#include "texture.inl"
+
diff --git a/3rdparty/glm/source/glm/gtx/texture.inl b/3rdparty/glm/source/glm/gtx/texture.inl
new file mode 100644
index 0000000..593c826
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/texture.inl
@@ -0,0 +1,17 @@
+/// @ref gtx_texture
+
+namespace glm
+{
+ template <length_t L, typename T, qualifier Q>
+ inline T levels(vec<L, T, Q> const& Extent)
+ {
+ return glm::log2(compMax(Extent)) + static_cast<T>(1);
+ }
+
+ template <typename T>
+ inline T levels(T Extent)
+ {
+ return vec<1, T, defaultp>(Extent).x;
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/gtx/transform.hpp b/3rdparty/glm/source/glm/gtx/transform.hpp
new file mode 100644
index 0000000..0279fc8
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/transform.hpp
@@ -0,0 +1,60 @@
+/// @ref gtx_transform
+/// @file glm/gtx/transform.hpp
+///
+/// @see core (dependence)
+/// @see gtc_matrix_transform (dependence)
+/// @see gtx_transform
+/// @see gtx_transform2
+///
+/// @defgroup gtx_transform GLM_GTX_transform
+/// @ingroup gtx
+///
+/// Include <glm/gtx/transform.hpp> to use the features of this extension.
+///
+/// Add transformation matrices
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/matrix_transform.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_transform is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_transform extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_transform
+ /// @{
+
+ /// Transforms a matrix with a translation 4 * 4 matrix created from 3 scalars.
+ /// @see gtc_matrix_transform
+ /// @see gtx_transform
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> translate(
+ vec<3, T, Q> const& v);
+
+ /// Builds a rotation 4 * 4 matrix created from an axis of 3 scalars and an angle expressed in radians.
+ /// @see gtc_matrix_transform
+ /// @see gtx_transform
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> rotate(
+ T angle,
+ vec<3, T, Q> const& v);
+
+ /// Transforms a matrix with a scale 4 * 4 matrix created from a vector of 3 components.
+ /// @see gtc_matrix_transform
+ /// @see gtx_transform
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> scale(
+ vec<3, T, Q> const& v);
+
+ /// @}
+}// namespace glm
+
+#include "transform.inl"
diff --git a/3rdparty/glm/source/glm/gtx/transform.inl b/3rdparty/glm/source/glm/gtx/transform.inl
new file mode 100644
index 0000000..48ee680
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/transform.inl
@@ -0,0 +1,23 @@
+/// @ref gtx_transform
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(vec<3, T, Q> const& v)
+ {
+ return translate(mat<4, 4, T, Q>(static_cast<T>(1)), v);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(T angle, vec<3, T, Q> const& v)
+ {
+ return rotate(mat<4, 4, T, Q>(static_cast<T>(1)), angle, v);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(vec<3, T, Q> const& v)
+ {
+ return scale(mat<4, 4, T, Q>(static_cast<T>(1)), v);
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/transform2.hpp b/3rdparty/glm/source/glm/gtx/transform2.hpp
new file mode 100644
index 0000000..0d8ba9d
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/transform2.hpp
@@ -0,0 +1,89 @@
+/// @ref gtx_transform2
+/// @file glm/gtx/transform2.hpp
+///
+/// @see core (dependence)
+/// @see gtx_transform (dependence)
+///
+/// @defgroup gtx_transform2 GLM_GTX_transform2
+/// @ingroup gtx
+///
+/// Include <glm/gtx/transform2.hpp> to use the features of this extension.
+///
+/// Add extra transformation matrices
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtx/transform.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_transform2 is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_transform2 extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_transform2
+ /// @{
+
+ //! Transforms a matrix with a shearing on X axis.
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T y);
+
+ //! Transforms a matrix with a shearing on Y axis.
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T x);
+
+ //! Transforms a matrix with a shearing on X axis
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T y, T z);
+
+ //! Transforms a matrix with a shearing on Y axis.
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T x, T z);
+
+ //! Transforms a matrix with a shearing on Z axis.
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T x, T y);
+
+ //template<typename T> GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear(const mat<4, 4, T, Q> & m, shearPlane, planePoint, angle)
+ // Identity + tan(angle) * cross(Normal, OnPlaneVector) 0
+ // - dot(PointOnPlane, normal) * OnPlaneVector 1
+
+ // Reflect functions seem to don't work
+ //template<typename T> mat<3, 3, T, Q> reflect2D(const mat<3, 3, T, Q> & m, const vec<3, T, Q>& normal){return reflect2DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension)
+ //template<typename T> mat<4, 4, T, Q> reflect3D(const mat<4, 4, T, Q> & m, const vec<3, T, Q>& normal){return reflect3DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension)
+
+ //! Build planar projection matrix along normal axis.
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<3, 3, T, Q> proj2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal);
+
+ //! Build planar projection matrix along normal axis.
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> proj3D(mat<4, 4, T, Q> const & m, vec<3, T, Q> const& normal);
+
+ //! Build a scale bias matrix.
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(T scale, T bias);
+
+ //! Build a scale bias matrix.
+ //! From GLM_GTX_transform2 extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias);
+
+ /// @}
+}// namespace glm
+
+#include "transform2.inl"
diff --git a/3rdparty/glm/source/glm/gtx/transform2.inl b/3rdparty/glm/source/glm/gtx/transform2.inl
new file mode 100644
index 0000000..0118ab0
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/transform2.inl
@@ -0,0 +1,125 @@
+/// @ref gtx_transform2
+
+namespace glm
+{
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T s)
+ {
+ mat<3, 3, T, Q> r(1);
+ r[1][0] = s;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T s)
+ {
+ mat<3, 3, T, Q> r(1);
+ r[0][1] = s;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T s, T t)
+ {
+ mat<4, 4, T, Q> r(1);
+ r[0][1] = s;
+ r[0][2] = t;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T s, T t)
+ {
+ mat<4, 4, T, Q> r(1);
+ r[1][0] = s;
+ r[1][2] = t;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T s, T t)
+ {
+ mat<4, 4, T, Q> r(1);
+ r[2][0] = s;
+ r[2][1] = t;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> reflect2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal)
+ {
+ mat<3, 3, T, Q> r(static_cast<T>(1));
+ r[0][0] = static_cast<T>(1) - static_cast<T>(2) * normal.x * normal.x;
+ r[0][1] = -static_cast<T>(2) * normal.x * normal.y;
+ r[1][0] = -static_cast<T>(2) * normal.x * normal.y;
+ r[1][1] = static_cast<T>(1) - static_cast<T>(2) * normal.y * normal.y;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> reflect3D(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& normal)
+ {
+ mat<4, 4, T, Q> r(static_cast<T>(1));
+ r[0][0] = static_cast<T>(1) - static_cast<T>(2) * normal.x * normal.x;
+ r[0][1] = -static_cast<T>(2) * normal.x * normal.y;
+ r[0][2] = -static_cast<T>(2) * normal.x * normal.z;
+
+ r[1][0] = -static_cast<T>(2) * normal.x * normal.y;
+ r[1][1] = static_cast<T>(1) - static_cast<T>(2) * normal.y * normal.y;
+ r[1][2] = -static_cast<T>(2) * normal.y * normal.z;
+
+ r[2][0] = -static_cast<T>(2) * normal.x * normal.z;
+ r[2][1] = -static_cast<T>(2) * normal.y * normal.z;
+ r[2][2] = static_cast<T>(1) - static_cast<T>(2) * normal.z * normal.z;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<3, 3, T, Q> proj2D(
+ const mat<3, 3, T, Q>& m,
+ const vec<3, T, Q>& normal)
+ {
+ mat<3, 3, T, Q> r(static_cast<T>(1));
+ r[0][0] = static_cast<T>(1) - normal.x * normal.x;
+ r[0][1] = - normal.x * normal.y;
+ r[1][0] = - normal.x * normal.y;
+ r[1][1] = static_cast<T>(1) - normal.y * normal.y;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> proj3D(
+ const mat<4, 4, T, Q>& m,
+ const vec<3, T, Q>& normal)
+ {
+ mat<4, 4, T, Q> r(static_cast<T>(1));
+ r[0][0] = static_cast<T>(1) - normal.x * normal.x;
+ r[0][1] = - normal.x * normal.y;
+ r[0][2] = - normal.x * normal.z;
+ r[1][0] = - normal.x * normal.y;
+ r[1][1] = static_cast<T>(1) - normal.y * normal.y;
+ r[1][2] = - normal.y * normal.z;
+ r[2][0] = - normal.x * normal.z;
+ r[2][1] = - normal.y * normal.z;
+ r[2][2] = static_cast<T>(1) - normal.z * normal.z;
+ return m * r;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(T scale, T bias)
+ {
+ mat<4, 4, T, Q> result;
+ result[3] = vec<4, T, Q>(vec<3, T, Q>(bias), static_cast<T>(1));
+ result[0][0] = scale;
+ result[1][1] = scale;
+ result[2][2] = scale;
+ return result;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias)
+ {
+ return m * scaleBias<T, Q>(scale, bias);
+ }
+}//namespace glm
+
diff --git a/3rdparty/glm/source/glm/gtx/type_aligned.hpp b/3rdparty/glm/source/glm/gtx/type_aligned.hpp
new file mode 100644
index 0000000..2ae522c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/type_aligned.hpp
@@ -0,0 +1,982 @@
+/// @ref gtx_type_aligned
+/// @file glm/gtx/type_aligned.hpp
+///
+/// @see core (dependence)
+/// @see gtc_quaternion (dependence)
+///
+/// @defgroup gtx_type_aligned GLM_GTX_type_aligned
+/// @ingroup gtx
+///
+/// Include <glm/gtx/type_aligned.hpp> to use the features of this extension.
+///
+/// Defines aligned types.
+
+#pragma once
+
+// Dependency:
+#include "../gtc/type_precision.hpp"
+#include "../gtc/quaternion.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_type_aligned is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_type_aligned extension included")
+# endif
+#endif
+
+namespace glm
+{
+ ///////////////////////////
+ // Signed int vector types
+
+ /// @addtogroup gtx_type_aligned
+ /// @{
+
+ /// Low qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_int8, aligned_lowp_int8, 1);
+
+ /// Low qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_int16, aligned_lowp_int16, 2);
+
+ /// Low qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_int32, aligned_lowp_int32, 4);
+
+ /// Low qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_int64, aligned_lowp_int64, 8);
+
+
+ /// Low qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_int8_t, aligned_lowp_int8_t, 1);
+
+ /// Low qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_int16_t, aligned_lowp_int16_t, 2);
+
+ /// Low qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_int32_t, aligned_lowp_int32_t, 4);
+
+ /// Low qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_int64_t, aligned_lowp_int64_t, 8);
+
+
+ /// Low qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_i8, aligned_lowp_i8, 1);
+
+ /// Low qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_i16, aligned_lowp_i16, 2);
+
+ /// Low qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_i32, aligned_lowp_i32, 4);
+
+ /// Low qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_i64, aligned_lowp_i64, 8);
+
+
+ /// Medium qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_int8, aligned_mediump_int8, 1);
+
+ /// Medium qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_int16, aligned_mediump_int16, 2);
+
+ /// Medium qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_int32, aligned_mediump_int32, 4);
+
+ /// Medium qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_int64, aligned_mediump_int64, 8);
+
+
+ /// Medium qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_int8_t, aligned_mediump_int8_t, 1);
+
+ /// Medium qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_int16_t, aligned_mediump_int16_t, 2);
+
+ /// Medium qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_int32_t, aligned_mediump_int32_t, 4);
+
+ /// Medium qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_int64_t, aligned_mediump_int64_t, 8);
+
+
+ /// Medium qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_i8, aligned_mediump_i8, 1);
+
+ /// Medium qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_i16, aligned_mediump_i16, 2);
+
+ /// Medium qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_i32, aligned_mediump_i32, 4);
+
+ /// Medium qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_i64, aligned_mediump_i64, 8);
+
+
+ /// High qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_int8, aligned_highp_int8, 1);
+
+ /// High qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_int16, aligned_highp_int16, 2);
+
+ /// High qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_int32, aligned_highp_int32, 4);
+
+ /// High qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_int64, aligned_highp_int64, 8);
+
+
+ /// High qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_int8_t, aligned_highp_int8_t, 1);
+
+ /// High qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_int16_t, aligned_highp_int16_t, 2);
+
+ /// High qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_int32_t, aligned_highp_int32_t, 4);
+
+ /// High qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_int64_t, aligned_highp_int64_t, 8);
+
+
+ /// High qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_i8, aligned_highp_i8, 1);
+
+ /// High qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_i16, aligned_highp_i16, 2);
+
+ /// High qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_i32, aligned_highp_i32, 4);
+
+ /// High qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_i64, aligned_highp_i64, 8);
+
+
+ /// Default qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(int8, aligned_int8, 1);
+
+ /// Default qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(int16, aligned_int16, 2);
+
+ /// Default qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(int32, aligned_int32, 4);
+
+ /// Default qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(int64, aligned_int64, 8);
+
+
+ /// Default qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(int8_t, aligned_int8_t, 1);
+
+ /// Default qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(int16_t, aligned_int16_t, 2);
+
+ /// Default qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(int32_t, aligned_int32_t, 4);
+
+ /// Default qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(int64_t, aligned_int64_t, 8);
+
+
+ /// Default qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i8, aligned_i8, 1);
+
+ /// Default qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i16, aligned_i16, 2);
+
+ /// Default qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i32, aligned_i32, 4);
+
+ /// Default qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i64, aligned_i64, 8);
+
+
+ /// Default qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(ivec1, aligned_ivec1, 4);
+
+ /// Default qualifier 32 bit signed integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(ivec2, aligned_ivec2, 8);
+
+ /// Default qualifier 32 bit signed integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(ivec3, aligned_ivec3, 16);
+
+ /// Default qualifier 32 bit signed integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(ivec4, aligned_ivec4, 16);
+
+
+ /// Default qualifier 8 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i8vec1, aligned_i8vec1, 1);
+
+ /// Default qualifier 8 bit signed integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i8vec2, aligned_i8vec2, 2);
+
+ /// Default qualifier 8 bit signed integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i8vec3, aligned_i8vec3, 4);
+
+ /// Default qualifier 8 bit signed integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i8vec4, aligned_i8vec4, 4);
+
+
+ /// Default qualifier 16 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i16vec1, aligned_i16vec1, 2);
+
+ /// Default qualifier 16 bit signed integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i16vec2, aligned_i16vec2, 4);
+
+ /// Default qualifier 16 bit signed integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i16vec3, aligned_i16vec3, 8);
+
+ /// Default qualifier 16 bit signed integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i16vec4, aligned_i16vec4, 8);
+
+
+ /// Default qualifier 32 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i32vec1, aligned_i32vec1, 4);
+
+ /// Default qualifier 32 bit signed integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i32vec2, aligned_i32vec2, 8);
+
+ /// Default qualifier 32 bit signed integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i32vec3, aligned_i32vec3, 16);
+
+ /// Default qualifier 32 bit signed integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i32vec4, aligned_i32vec4, 16);
+
+
+ /// Default qualifier 64 bit signed integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i64vec1, aligned_i64vec1, 8);
+
+ /// Default qualifier 64 bit signed integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i64vec2, aligned_i64vec2, 16);
+
+ /// Default qualifier 64 bit signed integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i64vec3, aligned_i64vec3, 32);
+
+ /// Default qualifier 64 bit signed integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(i64vec4, aligned_i64vec4, 32);
+
+
+ /////////////////////////////
+ // Unsigned int vector types
+
+ /// Low qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_uint8, aligned_lowp_uint8, 1);
+
+ /// Low qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_uint16, aligned_lowp_uint16, 2);
+
+ /// Low qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_uint32, aligned_lowp_uint32, 4);
+
+ /// Low qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_uint64, aligned_lowp_uint64, 8);
+
+
+ /// Low qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_uint8_t, aligned_lowp_uint8_t, 1);
+
+ /// Low qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_uint16_t, aligned_lowp_uint16_t, 2);
+
+ /// Low qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_uint32_t, aligned_lowp_uint32_t, 4);
+
+ /// Low qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_uint64_t, aligned_lowp_uint64_t, 8);
+
+
+ /// Low qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_u8, aligned_lowp_u8, 1);
+
+ /// Low qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_u16, aligned_lowp_u16, 2);
+
+ /// Low qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_u32, aligned_lowp_u32, 4);
+
+ /// Low qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(lowp_u64, aligned_lowp_u64, 8);
+
+
+ /// Medium qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_uint8, aligned_mediump_uint8, 1);
+
+ /// Medium qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_uint16, aligned_mediump_uint16, 2);
+
+ /// Medium qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_uint32, aligned_mediump_uint32, 4);
+
+ /// Medium qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_uint64, aligned_mediump_uint64, 8);
+
+
+ /// Medium qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_uint8_t, aligned_mediump_uint8_t, 1);
+
+ /// Medium qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_uint16_t, aligned_mediump_uint16_t, 2);
+
+ /// Medium qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_uint32_t, aligned_mediump_uint32_t, 4);
+
+ /// Medium qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_uint64_t, aligned_mediump_uint64_t, 8);
+
+
+ /// Medium qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_u8, aligned_mediump_u8, 1);
+
+ /// Medium qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_u16, aligned_mediump_u16, 2);
+
+ /// Medium qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_u32, aligned_mediump_u32, 4);
+
+ /// Medium qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mediump_u64, aligned_mediump_u64, 8);
+
+
+ /// High qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_uint8, aligned_highp_uint8, 1);
+
+ /// High qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_uint16, aligned_highp_uint16, 2);
+
+ /// High qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_uint32, aligned_highp_uint32, 4);
+
+ /// High qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_uint64, aligned_highp_uint64, 8);
+
+
+ /// High qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_uint8_t, aligned_highp_uint8_t, 1);
+
+ /// High qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_uint16_t, aligned_highp_uint16_t, 2);
+
+ /// High qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_uint32_t, aligned_highp_uint32_t, 4);
+
+ /// High qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_uint64_t, aligned_highp_uint64_t, 8);
+
+
+ /// High qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_u8, aligned_highp_u8, 1);
+
+ /// High qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_u16, aligned_highp_u16, 2);
+
+ /// High qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_u32, aligned_highp_u32, 4);
+
+ /// High qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(highp_u64, aligned_highp_u64, 8);
+
+
+ /// Default qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uint8, aligned_uint8, 1);
+
+ /// Default qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uint16, aligned_uint16, 2);
+
+ /// Default qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uint32, aligned_uint32, 4);
+
+ /// Default qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uint64, aligned_uint64, 8);
+
+
+ /// Default qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uint8_t, aligned_uint8_t, 1);
+
+ /// Default qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uint16_t, aligned_uint16_t, 2);
+
+ /// Default qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uint32_t, aligned_uint32_t, 4);
+
+ /// Default qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uint64_t, aligned_uint64_t, 8);
+
+
+ /// Default qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u8, aligned_u8, 1);
+
+ /// Default qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u16, aligned_u16, 2);
+
+ /// Default qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u32, aligned_u32, 4);
+
+ /// Default qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u64, aligned_u64, 8);
+
+
+ /// Default qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uvec1, aligned_uvec1, 4);
+
+ /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uvec2, aligned_uvec2, 8);
+
+ /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uvec3, aligned_uvec3, 16);
+
+ /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(uvec4, aligned_uvec4, 16);
+
+
+ /// Default qualifier 8 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u8vec1, aligned_u8vec1, 1);
+
+ /// Default qualifier 8 bit unsigned integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u8vec2, aligned_u8vec2, 2);
+
+ /// Default qualifier 8 bit unsigned integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u8vec3, aligned_u8vec3, 4);
+
+ /// Default qualifier 8 bit unsigned integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u8vec4, aligned_u8vec4, 4);
+
+
+ /// Default qualifier 16 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u16vec1, aligned_u16vec1, 2);
+
+ /// Default qualifier 16 bit unsigned integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u16vec2, aligned_u16vec2, 4);
+
+ /// Default qualifier 16 bit unsigned integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u16vec3, aligned_u16vec3, 8);
+
+ /// Default qualifier 16 bit unsigned integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u16vec4, aligned_u16vec4, 8);
+
+
+ /// Default qualifier 32 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u32vec1, aligned_u32vec1, 4);
+
+ /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u32vec2, aligned_u32vec2, 8);
+
+ /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u32vec3, aligned_u32vec3, 16);
+
+ /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u32vec4, aligned_u32vec4, 16);
+
+
+ /// Default qualifier 64 bit unsigned integer aligned scalar type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u64vec1, aligned_u64vec1, 8);
+
+ /// Default qualifier 64 bit unsigned integer aligned vector of 2 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u64vec2, aligned_u64vec2, 16);
+
+ /// Default qualifier 64 bit unsigned integer aligned vector of 3 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u64vec3, aligned_u64vec3, 32);
+
+ /// Default qualifier 64 bit unsigned integer aligned vector of 4 components type.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(u64vec4, aligned_u64vec4, 32);
+
+
+ //////////////////////
+ // Float vector types
+
+ /// 32 bit single-qualifier floating-point aligned scalar.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(float32, aligned_float32, 4);
+
+ /// 32 bit single-qualifier floating-point aligned scalar.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(float32_t, aligned_float32_t, 4);
+
+ /// 32 bit single-qualifier floating-point aligned scalar.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(float32, aligned_f32, 4);
+
+# ifndef GLM_FORCE_SINGLE_ONLY
+
+ /// 64 bit double-qualifier floating-point aligned scalar.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(float64, aligned_float64, 8);
+
+ /// 64 bit double-qualifier floating-point aligned scalar.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(float64_t, aligned_float64_t, 8);
+
+ /// 64 bit double-qualifier floating-point aligned scalar.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(float64, aligned_f64, 8);
+
+# endif//GLM_FORCE_SINGLE_ONLY
+
+
+ /// Single-qualifier floating-point aligned vector of 1 component.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(vec1, aligned_vec1, 4);
+
+ /// Single-qualifier floating-point aligned vector of 2 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(vec2, aligned_vec2, 8);
+
+ /// Single-qualifier floating-point aligned vector of 3 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(vec3, aligned_vec3, 16);
+
+ /// Single-qualifier floating-point aligned vector of 4 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(vec4, aligned_vec4, 16);
+
+
+ /// Single-qualifier floating-point aligned vector of 1 component.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fvec1, aligned_fvec1, 4);
+
+ /// Single-qualifier floating-point aligned vector of 2 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fvec2, aligned_fvec2, 8);
+
+ /// Single-qualifier floating-point aligned vector of 3 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fvec3, aligned_fvec3, 16);
+
+ /// Single-qualifier floating-point aligned vector of 4 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fvec4, aligned_fvec4, 16);
+
+
+ /// Single-qualifier floating-point aligned vector of 1 component.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32vec1, aligned_f32vec1, 4);
+
+ /// Single-qualifier floating-point aligned vector of 2 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32vec2, aligned_f32vec2, 8);
+
+ /// Single-qualifier floating-point aligned vector of 3 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32vec3, aligned_f32vec3, 16);
+
+ /// Single-qualifier floating-point aligned vector of 4 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32vec4, aligned_f32vec4, 16);
+
+
+ /// Double-qualifier floating-point aligned vector of 1 component.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(dvec1, aligned_dvec1, 8);
+
+ /// Double-qualifier floating-point aligned vector of 2 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(dvec2, aligned_dvec2, 16);
+
+ /// Double-qualifier floating-point aligned vector of 3 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(dvec3, aligned_dvec3, 32);
+
+ /// Double-qualifier floating-point aligned vector of 4 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(dvec4, aligned_dvec4, 32);
+
+
+# ifndef GLM_FORCE_SINGLE_ONLY
+
+ /// Double-qualifier floating-point aligned vector of 1 component.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64vec1, aligned_f64vec1, 8);
+
+ /// Double-qualifier floating-point aligned vector of 2 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64vec2, aligned_f64vec2, 16);
+
+ /// Double-qualifier floating-point aligned vector of 3 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64vec3, aligned_f64vec3, 32);
+
+ /// Double-qualifier floating-point aligned vector of 4 components.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64vec4, aligned_f64vec4, 32);
+
+# endif//GLM_FORCE_SINGLE_ONLY
+
+ //////////////////////
+ // Float matrix types
+
+ /// Single-qualifier floating-point aligned 1x1 matrix.
+ /// @see gtx_type_aligned
+ //typedef detail::tmat1<f32> mat1;
+
+ /// Single-qualifier floating-point aligned 2x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mat2, aligned_mat2, 16);
+
+ /// Single-qualifier floating-point aligned 3x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mat3, aligned_mat3, 16);
+
+ /// Single-qualifier floating-point aligned 4x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mat4, aligned_mat4, 16);
+
+
+ /// Single-qualifier floating-point aligned 1x1 matrix.
+ /// @see gtx_type_aligned
+ //typedef detail::tmat1x1<f32> mat1;
+
+ /// Single-qualifier floating-point aligned 2x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mat2x2, aligned_mat2x2, 16);
+
+ /// Single-qualifier floating-point aligned 3x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mat3x3, aligned_mat3x3, 16);
+
+ /// Single-qualifier floating-point aligned 4x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(mat4x4, aligned_mat4x4, 16);
+
+
+ /// Single-qualifier floating-point aligned 1x1 matrix.
+ /// @see gtx_type_aligned
+ //typedef detail::tmat1x1<f32> fmat1;
+
+ /// Single-qualifier floating-point aligned 2x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2, 16);
+
+ /// Single-qualifier floating-point aligned 3x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3, 16);
+
+ /// Single-qualifier floating-point aligned 4x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4, 16);
+
+
+ /// Single-qualifier floating-point aligned 1x1 matrix.
+ /// @see gtx_type_aligned
+ //typedef f32 fmat1x1;
+
+ /// Single-qualifier floating-point aligned 2x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2x2, 16);
+
+ /// Single-qualifier floating-point aligned 2x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat2x3, aligned_fmat2x3, 16);
+
+ /// Single-qualifier floating-point aligned 2x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat2x4, aligned_fmat2x4, 16);
+
+ /// Single-qualifier floating-point aligned 3x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat3x2, aligned_fmat3x2, 16);
+
+ /// Single-qualifier floating-point aligned 3x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3x3, 16);
+
+ /// Single-qualifier floating-point aligned 3x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat3x4, aligned_fmat3x4, 16);
+
+ /// Single-qualifier floating-point aligned 4x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat4x2, aligned_fmat4x2, 16);
+
+ /// Single-qualifier floating-point aligned 4x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat4x3, aligned_fmat4x3, 16);
+
+ /// Single-qualifier floating-point aligned 4x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4x4, 16);
+
+
+ /// Single-qualifier floating-point aligned 1x1 matrix.
+ /// @see gtx_type_aligned
+ //typedef detail::tmat1x1<f32, defaultp> f32mat1;
+
+ /// Single-qualifier floating-point aligned 2x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2, 16);
+
+ /// Single-qualifier floating-point aligned 3x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3, 16);
+
+ /// Single-qualifier floating-point aligned 4x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4, 16);
+
+
+ /// Single-qualifier floating-point aligned 1x1 matrix.
+ /// @see gtx_type_aligned
+ //typedef f32 f32mat1x1;
+
+ /// Single-qualifier floating-point aligned 2x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2x2, 16);
+
+ /// Single-qualifier floating-point aligned 2x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat2x3, aligned_f32mat2x3, 16);
+
+ /// Single-qualifier floating-point aligned 2x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat2x4, aligned_f32mat2x4, 16);
+
+ /// Single-qualifier floating-point aligned 3x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat3x2, aligned_f32mat3x2, 16);
+
+ /// Single-qualifier floating-point aligned 3x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3x3, 16);
+
+ /// Single-qualifier floating-point aligned 3x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat3x4, aligned_f32mat3x4, 16);
+
+ /// Single-qualifier floating-point aligned 4x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat4x2, aligned_f32mat4x2, 16);
+
+ /// Single-qualifier floating-point aligned 4x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat4x3, aligned_f32mat4x3, 16);
+
+ /// Single-qualifier floating-point aligned 4x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4x4, 16);
+
+
+# ifndef GLM_FORCE_SINGLE_ONLY
+
+ /// Double-qualifier floating-point aligned 1x1 matrix.
+ /// @see gtx_type_aligned
+ //typedef detail::tmat1x1<f64, defaultp> f64mat1;
+
+ /// Double-qualifier floating-point aligned 2x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2, 32);
+
+ /// Double-qualifier floating-point aligned 3x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3, 32);
+
+ /// Double-qualifier floating-point aligned 4x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4, 32);
+
+
+ /// Double-qualifier floating-point aligned 1x1 matrix.
+ /// @see gtx_type_aligned
+ //typedef f64 f64mat1x1;
+
+ /// Double-qualifier floating-point aligned 2x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2x2, 32);
+
+ /// Double-qualifier floating-point aligned 2x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat2x3, aligned_f64mat2x3, 32);
+
+ /// Double-qualifier floating-point aligned 2x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat2x4, aligned_f64mat2x4, 32);
+
+ /// Double-qualifier floating-point aligned 3x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat3x2, aligned_f64mat3x2, 32);
+
+ /// Double-qualifier floating-point aligned 3x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3x3, 32);
+
+ /// Double-qualifier floating-point aligned 3x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat3x4, aligned_f64mat3x4, 32);
+
+ /// Double-qualifier floating-point aligned 4x2 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat4x2, aligned_f64mat4x2, 32);
+
+ /// Double-qualifier floating-point aligned 4x3 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat4x3, aligned_f64mat4x3, 32);
+
+ /// Double-qualifier floating-point aligned 4x4 matrix.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4x4, 32);
+
+# endif//GLM_FORCE_SINGLE_ONLY
+
+
+ //////////////////////////
+ // Quaternion types
+
+ /// Single-qualifier floating-point aligned quaternion.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(quat, aligned_quat, 16);
+
+ /// Single-qualifier floating-point aligned quaternion.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(quat, aligned_fquat, 16);
+
+ /// Double-qualifier floating-point aligned quaternion.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(dquat, aligned_dquat, 32);
+
+ /// Single-qualifier floating-point aligned quaternion.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f32quat, aligned_f32quat, 16);
+
+# ifndef GLM_FORCE_SINGLE_ONLY
+
+ /// Double-qualifier floating-point aligned quaternion.
+ /// @see gtx_type_aligned
+ GLM_ALIGNED_TYPEDEF(f64quat, aligned_f64quat, 32);
+
+# endif//GLM_FORCE_SINGLE_ONLY
+
+ /// @}
+}//namespace glm
+
+#include "type_aligned.inl"
diff --git a/3rdparty/glm/source/glm/gtx/type_aligned.inl b/3rdparty/glm/source/glm/gtx/type_aligned.inl
new file mode 100644
index 0000000..54c1b81
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/type_aligned.inl
@@ -0,0 +1,6 @@
+/// @ref gtc_type_aligned
+
+namespace glm
+{
+
+}
diff --git a/3rdparty/glm/source/glm/gtx/type_trait.hpp b/3rdparty/glm/source/glm/gtx/type_trait.hpp
new file mode 100644
index 0000000..56685c8
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/type_trait.hpp
@@ -0,0 +1,85 @@
+/// @ref gtx_type_trait
+/// @file glm/gtx/type_trait.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_type_trait GLM_GTX_type_trait
+/// @ingroup gtx
+///
+/// Include <glm/gtx/type_trait.hpp> to use the features of this extension.
+///
+/// Defines traits for each type.
+
+#pragma once
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_type_trait is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_type_trait extension included")
+# endif
+#endif
+
+// Dependency:
+#include "../detail/qualifier.hpp"
+#include "../gtc/quaternion.hpp"
+#include "../gtx/dual_quaternion.hpp"
+
+namespace glm
+{
+ /// @addtogroup gtx_type_trait
+ /// @{
+
+ template<typename T>
+ struct type
+ {
+ static bool const is_vec = false;
+ static bool const is_mat = false;
+ static bool const is_quat = false;
+ static length_t const components = 0;
+ static length_t const cols = 0;
+ static length_t const rows = 0;
+ };
+
+ template<length_t L, typename T, qualifier Q>
+ struct type<vec<L, T, Q> >
+ {
+ static bool const is_vec = true;
+ static bool const is_mat = false;
+ static bool const is_quat = false;
+ static length_t const components = L;
+ };
+
+ template<length_t C, length_t R, typename T, qualifier Q>
+ struct type<mat<C, R, T, Q> >
+ {
+ static bool const is_vec = false;
+ static bool const is_mat = true;
+ static bool const is_quat = false;
+ static length_t const components = C;
+ static length_t const cols = C;
+ static length_t const rows = R;
+ };
+
+ template<typename T, qualifier Q>
+ struct type<qua<T, Q> >
+ {
+ static bool const is_vec = false;
+ static bool const is_mat = false;
+ static bool const is_quat = true;
+ static length_t const components = 4;
+ };
+
+ template<typename T, qualifier Q>
+ struct type<tdualquat<T, Q> >
+ {
+ static bool const is_vec = false;
+ static bool const is_mat = false;
+ static bool const is_quat = true;
+ static length_t const components = 8;
+ };
+
+ /// @}
+}//namespace glm
+
+#include "type_trait.inl"
diff --git a/3rdparty/glm/source/glm/gtx/type_trait.inl b/3rdparty/glm/source/glm/gtx/type_trait.inl
new file mode 100644
index 0000000..045de95
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/type_trait.inl
@@ -0,0 +1,61 @@
+/// @ref gtx_type_trait
+
+namespace glm
+{
+ template<typename T>
+ bool const type<T>::is_vec;
+ template<typename T>
+ bool const type<T>::is_mat;
+ template<typename T>
+ bool const type<T>::is_quat;
+ template<typename T>
+ length_t const type<T>::components;
+ template<typename T>
+ length_t const type<T>::cols;
+ template<typename T>
+ length_t const type<T>::rows;
+
+ // vec
+ template<length_t L, typename T, qualifier Q>
+ bool const type<vec<L, T, Q> >::is_vec;
+ template<length_t L, typename T, qualifier Q>
+ bool const type<vec<L, T, Q> >::is_mat;
+ template<length_t L, typename T, qualifier Q>
+ bool const type<vec<L, T, Q> >::is_quat;
+ template<length_t L, typename T, qualifier Q>
+ length_t const type<vec<L, T, Q> >::components;
+
+ // mat
+ template<length_t C, length_t R, typename T, qualifier Q>
+ bool const type<mat<C, R, T, Q> >::is_vec;
+ template<length_t C, length_t R, typename T, qualifier Q>
+ bool const type<mat<C, R, T, Q> >::is_mat;
+ template<length_t C, length_t R, typename T, qualifier Q>
+ bool const type<mat<C, R, T, Q> >::is_quat;
+ template<length_t C, length_t R, typename T, qualifier Q>
+ length_t const type<mat<C, R, T, Q> >::components;
+ template<length_t C, length_t R, typename T, qualifier Q>
+ length_t const type<mat<C, R, T, Q> >::cols;
+ template<length_t C, length_t R, typename T, qualifier Q>
+ length_t const type<mat<C, R, T, Q> >::rows;
+
+ // tquat
+ template<typename T, qualifier Q>
+ bool const type<qua<T, Q> >::is_vec;
+ template<typename T, qualifier Q>
+ bool const type<qua<T, Q> >::is_mat;
+ template<typename T, qualifier Q>
+ bool const type<qua<T, Q> >::is_quat;
+ template<typename T, qualifier Q>
+ length_t const type<qua<T, Q> >::components;
+
+ // tdualquat
+ template<typename T, qualifier Q>
+ bool const type<tdualquat<T, Q> >::is_vec;
+ template<typename T, qualifier Q>
+ bool const type<tdualquat<T, Q> >::is_mat;
+ template<typename T, qualifier Q>
+ bool const type<tdualquat<T, Q> >::is_quat;
+ template<typename T, qualifier Q>
+ length_t const type<tdualquat<T, Q> >::components;
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/vec_swizzle.hpp b/3rdparty/glm/source/glm/gtx/vec_swizzle.hpp
new file mode 100644
index 0000000..1c49abc
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/vec_swizzle.hpp
@@ -0,0 +1,2782 @@
+/// @ref gtx_vec_swizzle
+/// @file glm/gtx/vec_swizzle.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_vec_swizzle GLM_GTX_vec_swizzle
+/// @ingroup gtx
+///
+/// Include <glm/gtx/vec_swizzle.hpp> to use the features of this extension.
+///
+/// Functions to perform swizzle operation.
+
+#pragma once
+
+#include "../glm.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_vec_swizzle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_vec_swizzle extension included")
+# endif
+#endif
+
+namespace glm {
+ // xx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<1, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.x);
+ }
+
+ // xy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.y);
+ }
+
+ // xz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.z);
+ }
+
+ // xw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> xw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.x, v.w);
+ }
+
+ // yx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.x);
+ }
+
+ // yy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.y);
+ }
+
+ // yz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.z);
+ }
+
+ // yw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> yw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.y, v.w);
+ }
+
+ // zx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> zx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> zx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.z, v.x);
+ }
+
+ // zy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> zy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> zy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.z, v.y);
+ }
+
+ // zz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> zz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> zz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.z, v.z);
+ }
+
+ // zw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> zw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.z, v.w);
+ }
+
+ // wx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> wx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.w, v.x);
+ }
+
+ // wy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> wy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.w, v.y);
+ }
+
+ // wz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> wz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.w, v.z);
+ }
+
+ // ww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<2, T, Q> ww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<2, T, Q>(v.w, v.w);
+ }
+
+ // xxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<1, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.x);
+ }
+
+ // xxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.y);
+ }
+
+ // xxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.z);
+ }
+
+ // xxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.x, v.w);
+ }
+
+ // xyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.x);
+ }
+
+ // xyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.y);
+ }
+
+ // xyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.z);
+ }
+
+ // xyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.y, v.w);
+ }
+
+ // xzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.z, v.x);
+ }
+
+ // xzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.z, v.y);
+ }
+
+ // xzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.z, v.z);
+ }
+
+ // xzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.z, v.w);
+ }
+
+ // xwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.w, v.x);
+ }
+
+ // xwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.w, v.y);
+ }
+
+ // xwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.w, v.z);
+ }
+
+ // xww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> xww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.x, v.w, v.w);
+ }
+
+ // yxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.x);
+ }
+
+ // yxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.y);
+ }
+
+ // yxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.z);
+ }
+
+ // yxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.x, v.w);
+ }
+
+ // yyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.x);
+ }
+
+ // yyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.y);
+ }
+
+ // yyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.z);
+ }
+
+ // yyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.y, v.w);
+ }
+
+ // yzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.z, v.x);
+ }
+
+ // yzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.z, v.y);
+ }
+
+ // yzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.z, v.z);
+ }
+
+ // yzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.z, v.w);
+ }
+
+ // ywx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> ywx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.w, v.x);
+ }
+
+ // ywy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> ywy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.w, v.y);
+ }
+
+ // ywz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> ywz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.w, v.z);
+ }
+
+ // yww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> yww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.y, v.w, v.w);
+ }
+
+ // zxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.x, v.x);
+ }
+
+ // zxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.x, v.y);
+ }
+
+ // zxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.x, v.z);
+ }
+
+ // zxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.x, v.w);
+ }
+
+ // zyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.y, v.x);
+ }
+
+ // zyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.y, v.y);
+ }
+
+ // zyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.y, v.z);
+ }
+
+ // zyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.y, v.w);
+ }
+
+ // zzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.z, v.x);
+ }
+
+ // zzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.z, v.y);
+ }
+
+ // zzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.z, v.z);
+ }
+
+ // zzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.z, v.w);
+ }
+
+ // zwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.w, v.x);
+ }
+
+ // zwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.w, v.y);
+ }
+
+ // zwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.w, v.z);
+ }
+
+ // zww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> zww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.z, v.w, v.w);
+ }
+
+ // wxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.x, v.x);
+ }
+
+ // wxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.x, v.y);
+ }
+
+ // wxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.x, v.z);
+ }
+
+ // wxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.x, v.w);
+ }
+
+ // wyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.y, v.x);
+ }
+
+ // wyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.y, v.y);
+ }
+
+ // wyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.y, v.z);
+ }
+
+ // wyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.y, v.w);
+ }
+
+ // wzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.z, v.x);
+ }
+
+ // wzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.z, v.y);
+ }
+
+ // wzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.z, v.z);
+ }
+
+ // wzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.z, v.w);
+ }
+
+ // wwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.w, v.x);
+ }
+
+ // wwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.w, v.y);
+ }
+
+ // wwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> wwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.w, v.z);
+ }
+
+ // www
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<3, T, Q> www(const glm::vec<4, T, Q> &v) {
+ return glm::vec<3, T, Q>(v.w, v.w, v.w);
+ }
+
+ // xxxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<1, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x);
+ }
+
+ // xxxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y);
+ }
+
+ // xxxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z);
+ }
+
+ // xxxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.x, v.w);
+ }
+
+ // xxyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x);
+ }
+
+ // xxyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y);
+ }
+
+ // xxyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z);
+ }
+
+ // xxyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.y, v.w);
+ }
+
+ // xxzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x);
+ }
+
+ // xxzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y);
+ }
+
+ // xxzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z);
+ }
+
+ // xxzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.z, v.w);
+ }
+
+ // xxwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.w, v.x);
+ }
+
+ // xxwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.w, v.y);
+ }
+
+ // xxwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.w, v.z);
+ }
+
+ // xxww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xxww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.x, v.w, v.w);
+ }
+
+ // xyxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x);
+ }
+
+ // xyxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y);
+ }
+
+ // xyxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z);
+ }
+
+ // xyxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.x, v.w);
+ }
+
+ // xyyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x);
+ }
+
+ // xyyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y);
+ }
+
+ // xyyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z);
+ }
+
+ // xyyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.y, v.w);
+ }
+
+ // xyzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x);
+ }
+
+ // xyzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y);
+ }
+
+ // xyzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.z, v.z);
+ }
+
+ // xyzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.z, v.w);
+ }
+
+ // xywx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xywx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.w, v.x);
+ }
+
+ // xywy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xywy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.w, v.y);
+ }
+
+ // xywz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xywz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.w, v.z);
+ }
+
+ // xyww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xyww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.y, v.w, v.w);
+ }
+
+ // xzxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x);
+ }
+
+ // xzxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y);
+ }
+
+ // xzxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z);
+ }
+
+ // xzxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.x, v.w);
+ }
+
+ // xzyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x);
+ }
+
+ // xzyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y);
+ }
+
+ // xzyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z);
+ }
+
+ // xzyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.y, v.w);
+ }
+
+ // xzzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x);
+ }
+
+ // xzzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y);
+ }
+
+ // xzzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z);
+ }
+
+ // xzzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.z, v.w);
+ }
+
+ // xzwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.w, v.x);
+ }
+
+ // xzwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.w, v.y);
+ }
+
+ // xzwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.w, v.z);
+ }
+
+ // xzww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xzww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.z, v.w, v.w);
+ }
+
+ // xwxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.x, v.x);
+ }
+
+ // xwxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.x, v.y);
+ }
+
+ // xwxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.x, v.z);
+ }
+
+ // xwxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.x, v.w);
+ }
+
+ // xwyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.y, v.x);
+ }
+
+ // xwyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.y, v.y);
+ }
+
+ // xwyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.y, v.z);
+ }
+
+ // xwyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.y, v.w);
+ }
+
+ // xwzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.z, v.x);
+ }
+
+ // xwzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.z, v.y);
+ }
+
+ // xwzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.z, v.z);
+ }
+
+ // xwzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.z, v.w);
+ }
+
+ // xwwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.w, v.x);
+ }
+
+ // xwwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.w, v.y);
+ }
+
+ // xwwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.w, v.z);
+ }
+
+ // xwww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> xwww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.x, v.w, v.w, v.w);
+ }
+
+ // yxxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x);
+ }
+
+ // yxxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y);
+ }
+
+ // yxxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z);
+ }
+
+ // yxxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.x, v.w);
+ }
+
+ // yxyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x);
+ }
+
+ // yxyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y);
+ }
+
+ // yxyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z);
+ }
+
+ // yxyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.y, v.w);
+ }
+
+ // yxzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x);
+ }
+
+ // yxzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y);
+ }
+
+ // yxzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z);
+ }
+
+ // yxzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.z, v.w);
+ }
+
+ // yxwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.w, v.x);
+ }
+
+ // yxwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.w, v.y);
+ }
+
+ // yxwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.w, v.z);
+ }
+
+ // yxww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yxww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.x, v.w, v.w);
+ }
+
+ // yyxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x);
+ }
+
+ // yyxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y);
+ }
+
+ // yyxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z);
+ }
+
+ // yyxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.x, v.w);
+ }
+
+ // yyyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x);
+ }
+
+ // yyyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<2, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y);
+ }
+
+ // yyyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z);
+ }
+
+ // yyyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.y, v.w);
+ }
+
+ // yyzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x);
+ }
+
+ // yyzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y);
+ }
+
+ // yyzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z);
+ }
+
+ // yyzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.z, v.w);
+ }
+
+ // yywx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yywx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.w, v.x);
+ }
+
+ // yywy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yywy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.w, v.y);
+ }
+
+ // yywz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yywz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.w, v.z);
+ }
+
+ // yyww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yyww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.y, v.w, v.w);
+ }
+
+ // yzxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x);
+ }
+
+ // yzxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y);
+ }
+
+ // yzxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z);
+ }
+
+ // yzxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.x, v.w);
+ }
+
+ // yzyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x);
+ }
+
+ // yzyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y);
+ }
+
+ // yzyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z);
+ }
+
+ // yzyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.y, v.w);
+ }
+
+ // yzzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x);
+ }
+
+ // yzzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y);
+ }
+
+ // yzzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z);
+ }
+
+ // yzzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.z, v.w);
+ }
+
+ // yzwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.w, v.x);
+ }
+
+ // yzwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.w, v.y);
+ }
+
+ // yzwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.w, v.z);
+ }
+
+ // yzww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> yzww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.z, v.w, v.w);
+ }
+
+ // ywxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.x, v.x);
+ }
+
+ // ywxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.x, v.y);
+ }
+
+ // ywxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.x, v.z);
+ }
+
+ // ywxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.x, v.w);
+ }
+
+ // ywyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.y, v.x);
+ }
+
+ // ywyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.y, v.y);
+ }
+
+ // ywyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.y, v.z);
+ }
+
+ // ywyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.y, v.w);
+ }
+
+ // ywzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.z, v.x);
+ }
+
+ // ywzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.z, v.y);
+ }
+
+ // ywzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.z, v.z);
+ }
+
+ // ywzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.z, v.w);
+ }
+
+ // ywwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.w, v.x);
+ }
+
+ // ywwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.w, v.y);
+ }
+
+ // ywwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.w, v.z);
+ }
+
+ // ywww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> ywww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.y, v.w, v.w, v.w);
+ }
+
+ // zxxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x);
+ }
+
+ // zxxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y);
+ }
+
+ // zxxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z);
+ }
+
+ // zxxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.x, v.w);
+ }
+
+ // zxyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x);
+ }
+
+ // zxyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y);
+ }
+
+ // zxyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z);
+ }
+
+ // zxyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.y, v.w);
+ }
+
+ // zxzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x);
+ }
+
+ // zxzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y);
+ }
+
+ // zxzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z);
+ }
+
+ // zxzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.z, v.w);
+ }
+
+ // zxwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.w, v.x);
+ }
+
+ // zxwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.w, v.y);
+ }
+
+ // zxwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.w, v.z);
+ }
+
+ // zxww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zxww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.x, v.w, v.w);
+ }
+
+ // zyxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x);
+ }
+
+ // zyxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y);
+ }
+
+ // zyxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z);
+ }
+
+ // zyxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.x, v.w);
+ }
+
+ // zyyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x);
+ }
+
+ // zyyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y);
+ }
+
+ // zyyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z);
+ }
+
+ // zyyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.y, v.w);
+ }
+
+ // zyzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x);
+ }
+
+ // zyzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y);
+ }
+
+ // zyzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z);
+ }
+
+ // zyzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.z, v.w);
+ }
+
+ // zywx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zywx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.w, v.x);
+ }
+
+ // zywy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zywy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.w, v.y);
+ }
+
+ // zywz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zywz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.w, v.z);
+ }
+
+ // zyww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zyww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.y, v.w, v.w);
+ }
+
+ // zzxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzxx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x);
+ }
+
+ // zzxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzxy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y);
+ }
+
+ // zzxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzxz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z);
+ }
+
+ // zzxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.x, v.w);
+ }
+
+ // zzyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzyx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x);
+ }
+
+ // zzyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzyy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y);
+ }
+
+ // zzyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzyz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z);
+ }
+
+ // zzyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.y, v.w);
+ }
+
+ // zzzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzzx(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x);
+ }
+
+ // zzzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzzy(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y);
+ }
+
+ // zzzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzzz(const glm::vec<3, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z);
+ }
+
+ // zzzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.z, v.w);
+ }
+
+ // zzwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.w, v.x);
+ }
+
+ // zzwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.w, v.y);
+ }
+
+ // zzwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.w, v.z);
+ }
+
+ // zzww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zzww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.z, v.w, v.w);
+ }
+
+ // zwxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.x, v.x);
+ }
+
+ // zwxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.x, v.y);
+ }
+
+ // zwxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.x, v.z);
+ }
+
+ // zwxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.x, v.w);
+ }
+
+ // zwyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.y, v.x);
+ }
+
+ // zwyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.y, v.y);
+ }
+
+ // zwyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.y, v.z);
+ }
+
+ // zwyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.y, v.w);
+ }
+
+ // zwzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.z, v.x);
+ }
+
+ // zwzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.z, v.y);
+ }
+
+ // zwzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.z, v.z);
+ }
+
+ // zwzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.z, v.w);
+ }
+
+ // zwwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.w, v.x);
+ }
+
+ // zwwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.w, v.y);
+ }
+
+ // zwwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.w, v.z);
+ }
+
+ // zwww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> zwww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.z, v.w, v.w, v.w);
+ }
+
+ // wxxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.x, v.x);
+ }
+
+ // wxxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.x, v.y);
+ }
+
+ // wxxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.x, v.z);
+ }
+
+ // wxxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.x, v.w);
+ }
+
+ // wxyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.y, v.x);
+ }
+
+ // wxyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.y, v.y);
+ }
+
+ // wxyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.y, v.z);
+ }
+
+ // wxyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.y, v.w);
+ }
+
+ // wxzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.z, v.x);
+ }
+
+ // wxzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.z, v.y);
+ }
+
+ // wxzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.z, v.z);
+ }
+
+ // wxzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.z, v.w);
+ }
+
+ // wxwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.w, v.x);
+ }
+
+ // wxwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.w, v.y);
+ }
+
+ // wxwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.w, v.z);
+ }
+
+ // wxww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wxww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.x, v.w, v.w);
+ }
+
+ // wyxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.x, v.x);
+ }
+
+ // wyxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.x, v.y);
+ }
+
+ // wyxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.x, v.z);
+ }
+
+ // wyxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.x, v.w);
+ }
+
+ // wyyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.y, v.x);
+ }
+
+ // wyyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.y, v.y);
+ }
+
+ // wyyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.y, v.z);
+ }
+
+ // wyyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.y, v.w);
+ }
+
+ // wyzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.z, v.x);
+ }
+
+ // wyzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.z, v.y);
+ }
+
+ // wyzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.z, v.z);
+ }
+
+ // wyzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.z, v.w);
+ }
+
+ // wywx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wywx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.w, v.x);
+ }
+
+ // wywy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wywy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.w, v.y);
+ }
+
+ // wywz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wywz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.w, v.z);
+ }
+
+ // wyww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wyww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.y, v.w, v.w);
+ }
+
+ // wzxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.x, v.x);
+ }
+
+ // wzxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.x, v.y);
+ }
+
+ // wzxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.x, v.z);
+ }
+
+ // wzxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.x, v.w);
+ }
+
+ // wzyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.y, v.x);
+ }
+
+ // wzyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.y, v.y);
+ }
+
+ // wzyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.y, v.z);
+ }
+
+ // wzyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.y, v.w);
+ }
+
+ // wzzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.z, v.x);
+ }
+
+ // wzzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.z, v.y);
+ }
+
+ // wzzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.z, v.z);
+ }
+
+ // wzzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.z, v.w);
+ }
+
+ // wzwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.w, v.x);
+ }
+
+ // wzwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.w, v.y);
+ }
+
+ // wzwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.w, v.z);
+ }
+
+ // wzww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wzww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.z, v.w, v.w);
+ }
+
+ // wwxx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwxx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.x, v.x);
+ }
+
+ // wwxy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwxy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.x, v.y);
+ }
+
+ // wwxz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwxz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.x, v.z);
+ }
+
+ // wwxw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwxw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.x, v.w);
+ }
+
+ // wwyx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwyx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.y, v.x);
+ }
+
+ // wwyy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwyy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.y, v.y);
+ }
+
+ // wwyz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwyz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.y, v.z);
+ }
+
+ // wwyw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwyw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.y, v.w);
+ }
+
+ // wwzx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwzx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.z, v.x);
+ }
+
+ // wwzy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwzy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.z, v.y);
+ }
+
+ // wwzz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwzz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.z, v.z);
+ }
+
+ // wwzw
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwzw(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.z, v.w);
+ }
+
+ // wwwx
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwwx(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.w, v.x);
+ }
+
+ // wwwy
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwwy(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.w, v.y);
+ }
+
+ // wwwz
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwwz(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.w, v.z);
+ }
+
+ // wwww
+ template<typename T, qualifier Q>
+ GLM_INLINE glm::vec<4, T, Q> wwww(const glm::vec<4, T, Q> &v) {
+ return glm::vec<4, T, Q>(v.w, v.w, v.w, v.w);
+ }
+
+}
diff --git a/3rdparty/glm/source/glm/gtx/vector_angle.hpp b/3rdparty/glm/source/glm/gtx/vector_angle.hpp
new file mode 100644
index 0000000..9ae4371
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/vector_angle.hpp
@@ -0,0 +1,57 @@
+/// @ref gtx_vector_angle
+/// @file glm/gtx/vector_angle.hpp
+///
+/// @see core (dependence)
+/// @see gtx_quaternion (dependence)
+/// @see gtx_epsilon (dependence)
+///
+/// @defgroup gtx_vector_angle GLM_GTX_vector_angle
+/// @ingroup gtx
+///
+/// Include <glm/gtx/vector_angle.hpp> to use the features of this extension.
+///
+/// Compute angle between vectors
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../gtc/epsilon.hpp"
+#include "../gtx/quaternion.hpp"
+#include "../gtx/rotate_vector.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_vector_angle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_vector_angle extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_vector_angle
+ /// @{
+
+ //! Returns the absolute angle between two vectors.
+ //! Parameters need to be normalized.
+ /// @see gtx_vector_angle extension.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL T angle(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ //! Returns the oriented angle between two 2d vectors.
+ //! Parameters need to be normalized.
+ /// @see gtx_vector_angle extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y);
+
+ //! Returns the oriented angle between two 3d vectors based from a reference axis.
+ //! Parameters need to be normalized.
+ /// @see gtx_vector_angle extension.
+ template<typename T, qualifier Q>
+ GLM_FUNC_DECL T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref);
+
+ /// @}
+}// namespace glm
+
+#include "vector_angle.inl"
diff --git a/3rdparty/glm/source/glm/gtx/vector_angle.inl b/3rdparty/glm/source/glm/gtx/vector_angle.inl
new file mode 100644
index 0000000..878160d
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/vector_angle.inl
@@ -0,0 +1,45 @@
+/// @ref gtx_vector_angle
+
+namespace glm
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType angle
+ (
+ genType const& x,
+ genType const& y
+ )
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genType>::is_iec559, "'angle' only accept floating-point inputs");
+ return acos(clamp(dot(x, y), genType(-1), genType(1)));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T angle(vec<L, T, Q> const& x, vec<L, T, Q> const& y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'angle' only accept floating-point inputs");
+ return acos(clamp(dot(x, y), T(-1), T(1)));
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'orientedAngle' only accept floating-point inputs");
+ T const Angle(acos(clamp(dot(x, y), T(-1), T(1))));
+
+ T const partialCross = x.x * y.y - y.x * x.y;
+
+ if (partialCross > T(0))
+ return Angle;
+ else
+ return -Angle;
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'orientedAngle' only accept floating-point inputs");
+
+ T const Angle(acos(clamp(dot(x, y), T(-1), T(1))));
+ return mix(Angle, -Angle, dot(ref, cross(x, y)) < T(0));
+ }
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/vector_query.hpp b/3rdparty/glm/source/glm/gtx/vector_query.hpp
new file mode 100644
index 0000000..77c7b97
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/vector_query.hpp
@@ -0,0 +1,66 @@
+/// @ref gtx_vector_query
+/// @file glm/gtx/vector_query.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_vector_query GLM_GTX_vector_query
+/// @ingroup gtx
+///
+/// Include <glm/gtx/vector_query.hpp> to use the features of this extension.
+///
+/// Query informations of vector types
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include <cfloat>
+#include <limits>
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_vector_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_vector_query extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_vector_query
+ /// @{
+
+ //! Check whether two vectors are collinears.
+ /// @see gtx_vector_query extensions.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL bool areCollinear(vec<L, T, Q> const& v0, vec<L, T, Q> const& v1, T const& epsilon);
+
+ //! Check whether two vectors are orthogonals.
+ /// @see gtx_vector_query extensions.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL bool areOrthogonal(vec<L, T, Q> const& v0, vec<L, T, Q> const& v1, T const& epsilon);
+
+ //! Check whether a vector is normalized.
+ /// @see gtx_vector_query extensions.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL bool isNormalized(vec<L, T, Q> const& v, T const& epsilon);
+
+ //! Check whether a vector is null.
+ /// @see gtx_vector_query extensions.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL bool isNull(vec<L, T, Q> const& v, T const& epsilon);
+
+ //! Check whether a each component of a vector is null.
+ /// @see gtx_vector_query extensions.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, bool, Q> isCompNull(vec<L, T, Q> const& v, T const& epsilon);
+
+ //! Check whether two vectors are orthonormal.
+ /// @see gtx_vector_query extensions.
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL bool areOrthonormal(vec<L, T, Q> const& v0, vec<L, T, Q> const& v1, T const& epsilon);
+
+ /// @}
+}// namespace glm
+
+#include "vector_query.inl"
diff --git a/3rdparty/glm/source/glm/gtx/vector_query.inl b/3rdparty/glm/source/glm/gtx/vector_query.inl
new file mode 100644
index 0000000..d1a5c9b
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/vector_query.inl
@@ -0,0 +1,154 @@
+/// @ref gtx_vector_query
+
+#include <cassert>
+
+namespace glm{
+namespace detail
+{
+ template<length_t L, typename T, qualifier Q>
+ struct compute_areCollinear{};
+
+ template<typename T, qualifier Q>
+ struct compute_areCollinear<2, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static bool call(vec<2, T, Q> const& v0, vec<2, T, Q> const& v1, T const& epsilon)
+ {
+ return length(cross(vec<3, T, Q>(v0, static_cast<T>(0)), vec<3, T, Q>(v1, static_cast<T>(0)))) < epsilon;
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_areCollinear<3, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static bool call(vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, T const& epsilon)
+ {
+ return length(cross(v0, v1)) < epsilon;
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_areCollinear<4, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static bool call(vec<4, T, Q> const& v0, vec<4, T, Q> const& v1, T const& epsilon)
+ {
+ return length(cross(vec<3, T, Q>(v0), vec<3, T, Q>(v1))) < epsilon;
+ }
+ };
+
+ template<length_t L, typename T, qualifier Q>
+ struct compute_isCompNull{};
+
+ template<typename T, qualifier Q>
+ struct compute_isCompNull<2, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<2, bool, Q> call(vec<2, T, Q> const& v, T const& epsilon)
+ {
+ return vec<2, bool, Q>(
+ (abs(v.x) < epsilon),
+ (abs(v.y) < epsilon));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_isCompNull<3, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<3, bool, Q> call(vec<3, T, Q> const& v, T const& epsilon)
+ {
+ return vec<3, bool, Q>(
+ (abs(v.x) < epsilon),
+ (abs(v.y) < epsilon),
+ (abs(v.z) < epsilon));
+ }
+ };
+
+ template<typename T, qualifier Q>
+ struct compute_isCompNull<4, T, Q>
+ {
+ GLM_FUNC_QUALIFIER static vec<4, bool, Q> call(vec<4, T, Q> const& v, T const& epsilon)
+ {
+ return vec<4, bool, Q>(
+ (abs(v.x) < epsilon),
+ (abs(v.y) < epsilon),
+ (abs(v.z) < epsilon),
+ (abs(v.w) < epsilon));
+ }
+ };
+
+}//namespace detail
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool areCollinear(vec<L, T, Q> const& v0, vec<L, T, Q> const& v1, T const& epsilon)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'areCollinear' only accept floating-point inputs");
+
+ return detail::compute_areCollinear<L, T, Q>::call(v0, v1, epsilon);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool areOrthogonal(vec<L, T, Q> const& v0, vec<L, T, Q> const& v1, T const& epsilon)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'areOrthogonal' only accept floating-point inputs");
+
+ return abs(dot(v0, v1)) <= max(
+ static_cast<T>(1),
+ length(v0)) * max(static_cast<T>(1), length(v1)) * epsilon;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isNormalized(vec<L, T, Q> const& v, T const& epsilon)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isNormalized' only accept floating-point inputs");
+
+ return abs(length(v) - static_cast<T>(1)) <= static_cast<T>(2) * epsilon;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool isNull(vec<L, T, Q> const& v, T const& epsilon)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isNull' only accept floating-point inputs");
+
+ return length(v) <= epsilon;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<L, bool, Q> isCompNull(vec<L, T, Q> const& v, T const& epsilon)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_iec559, "'isCompNull' only accept floating-point inputs");
+
+ return detail::compute_isCompNull<L, T, Q>::call(v, epsilon);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<2, bool, Q> isCompNull(vec<2, T, Q> const& v, T const& epsilon)
+ {
+ return vec<2, bool, Q>(
+ abs(v.x) < epsilon,
+ abs(v.y) < epsilon);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<3, bool, Q> isCompNull(vec<3, T, Q> const& v, T const& epsilon)
+ {
+ return vec<3, bool, Q>(
+ abs(v.x) < epsilon,
+ abs(v.y) < epsilon,
+ abs(v.z) < epsilon);
+ }
+
+ template<typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER vec<4, bool, Q> isCompNull(vec<4, T, Q> const& v, T const& epsilon)
+ {
+ return vec<4, bool, Q>(
+ abs(v.x) < epsilon,
+ abs(v.y) < epsilon,
+ abs(v.z) < epsilon,
+ abs(v.w) < epsilon);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER bool areOrthonormal(vec<L, T, Q> const& v0, vec<L, T, Q> const& v1, T const& epsilon)
+ {
+ return isNormalized(v0, epsilon) && isNormalized(v1, epsilon) && (abs(dot(v0, v1)) <= epsilon);
+ }
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/gtx/wrap.hpp b/3rdparty/glm/source/glm/gtx/wrap.hpp
new file mode 100644
index 0000000..ad4eb3f
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/wrap.hpp
@@ -0,0 +1,37 @@
+/// @ref gtx_wrap
+/// @file glm/gtx/wrap.hpp
+///
+/// @see core (dependence)
+///
+/// @defgroup gtx_wrap GLM_GTX_wrap
+/// @ingroup gtx
+///
+/// Include <glm/gtx/wrap.hpp> to use the features of this extension.
+///
+/// Wrapping mode of texture coordinates.
+
+#pragma once
+
+// Dependency:
+#include "../glm.hpp"
+#include "../ext/scalar_common.hpp"
+#include "../ext/vector_common.hpp"
+#include "../gtc/vec1.hpp"
+
+#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+# ifndef GLM_ENABLE_EXPERIMENTAL
+# pragma message("GLM: GLM_GTX_wrap is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+# else
+# pragma message("GLM: GLM_GTX_wrap extension included")
+# endif
+#endif
+
+namespace glm
+{
+ /// @addtogroup gtx_wrap
+ /// @{
+
+ /// @}
+}// namespace glm
+
+#include "wrap.inl"
diff --git a/3rdparty/glm/source/glm/gtx/wrap.inl b/3rdparty/glm/source/glm/gtx/wrap.inl
new file mode 100644
index 0000000..4be3b4c
--- /dev/null
+++ b/3rdparty/glm/source/glm/gtx/wrap.inl
@@ -0,0 +1,6 @@
+/// @ref gtx_wrap
+
+namespace glm
+{
+
+}//namespace glm
diff --git a/3rdparty/glm/source/glm/integer.hpp b/3rdparty/glm/source/glm/integer.hpp
new file mode 100644
index 0000000..8817db3
--- /dev/null
+++ b/3rdparty/glm/source/glm/integer.hpp
@@ -0,0 +1,212 @@
+/// @ref core
+/// @file glm/integer.hpp
+///
+/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+///
+/// @defgroup core_func_integer Integer functions
+/// @ingroup core
+///
+/// Provides GLSL functions on integer types
+///
+/// These all operate component-wise. The description is per component.
+/// The notation [a, b] means the set of bits from bit-number a through bit-number
+/// b, inclusive. The lowest-order bit is bit 0.
+///
+/// Include <glm/integer.hpp> to use these core features.
+
+#pragma once
+
+#include "detail/qualifier.hpp"
+#include "common.hpp"
+#include "vector_relational.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_func_integer
+ /// @{
+
+ /// Adds 32-bit unsigned integer x and y, returning the sum
+ /// modulo pow(2, 32). The value carry is set to 0 if the sum was
+ /// less than pow(2, 32), or to 1 otherwise.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/uaddCarry.xml">GLSL uaddCarry man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL vec<L, uint, Q> uaddCarry(
+ vec<L, uint, Q> const& x,
+ vec<L, uint, Q> const& y,
+ vec<L, uint, Q> & carry);
+
+ /// Subtracts the 32-bit unsigned integer y from x, returning
+ /// the difference if non-negative, or pow(2, 32) plus the difference
+ /// otherwise. The value borrow is set to 0 if x >= y, or to 1 otherwise.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/usubBorrow.xml">GLSL usubBorrow man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL vec<L, uint, Q> usubBorrow(
+ vec<L, uint, Q> const& x,
+ vec<L, uint, Q> const& y,
+ vec<L, uint, Q> & borrow);
+
+ /// Multiplies 32-bit integers x and y, producing a 64-bit
+ /// result. The 32 least-significant bits are returned in lsb.
+ /// The 32 most-significant bits are returned in msb.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/umulExtended.xml">GLSL umulExtended man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL void umulExtended(
+ vec<L, uint, Q> const& x,
+ vec<L, uint, Q> const& y,
+ vec<L, uint, Q> & msb,
+ vec<L, uint, Q> & lsb);
+
+ /// Multiplies 32-bit integers x and y, producing a 64-bit
+ /// result. The 32 least-significant bits are returned in lsb.
+ /// The 32 most-significant bits are returned in msb.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/imulExtended.xml">GLSL imulExtended man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL void imulExtended(
+ vec<L, int, Q> const& x,
+ vec<L, int, Q> const& y,
+ vec<L, int, Q> & msb,
+ vec<L, int, Q> & lsb);
+
+ /// Extracts bits [offset, offset + bits - 1] from value,
+ /// returning them in the least significant bits of the result.
+ /// For unsigned data types, the most significant bits of the
+ /// result will be set to zero. For signed data types, the
+ /// most significant bits will be set to the value of bit offset + base - 1.
+ ///
+ /// If bits is zero, the result will be zero. The result will be
+ /// undefined if offset or bits is negative, or if the sum of
+ /// offset and bits is greater than the number of bits used
+ /// to store the operand.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Signed or unsigned integer scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/bitfieldExtract.xml">GLSL bitfieldExtract man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> bitfieldExtract(
+ vec<L, T, Q> const& Value,
+ int Offset,
+ int Bits);
+
+ /// Returns the insertion the bits least-significant bits of insert into base.
+ ///
+ /// The result will have bits [offset, offset + bits - 1] taken
+ /// from bits [0, bits - 1] of insert, and all other bits taken
+ /// directly from the corresponding bits of base. If bits is
+ /// zero, the result will simply be base. The result will be
+ /// undefined if offset or bits is negative, or if the sum of
+ /// offset and bits is greater than the number of bits used to
+ /// store the operand.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Signed or unsigned integer scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/bitfieldInsert.xml">GLSL bitfieldInsert man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> bitfieldInsert(
+ vec<L, T, Q> const& Base,
+ vec<L, T, Q> const& Insert,
+ int Offset,
+ int Bits);
+
+ /// Returns the reversal of the bits of value.
+ /// The bit numbered n of the result will be taken from bit (bits - 1) - n of value,
+ /// where bits is the total number of bits used to represent value.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Signed or unsigned integer scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/bitfieldReverse.xml">GLSL bitfieldReverse man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> bitfieldReverse(vec<L, T, Q> const& v);
+
+ /// Returns the number of bits set to 1 in the binary representation of value.
+ ///
+ /// @tparam genType Signed or unsigned integer scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/bitCount.xml">GLSL bitCount man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<typename genType>
+ GLM_FUNC_DECL int bitCount(genType v);
+
+ /// Returns the number of bits set to 1 in the binary representation of value.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Signed or unsigned integer scalar or vector types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/bitCount.xml">GLSL bitCount man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int, Q> bitCount(vec<L, T, Q> const& v);
+
+ /// Returns the bit number of the least significant bit set to
+ /// 1 in the binary representation of value.
+ /// If value is zero, -1 will be returned.
+ ///
+ /// @tparam genIUType Signed or unsigned integer scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/findLSB.xml">GLSL findLSB man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<typename genIUType>
+ GLM_FUNC_DECL int findLSB(genIUType x);
+
+ /// Returns the bit number of the least significant bit set to
+ /// 1 in the binary representation of value.
+ /// If value is zero, -1 will be returned.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Signed or unsigned integer scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/findLSB.xml">GLSL findLSB man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int, Q> findLSB(vec<L, T, Q> const& v);
+
+ /// Returns the bit number of the most significant bit in the binary representation of value.
+ /// For positive integers, the result will be the bit number of the most significant bit set to 1.
+ /// For negative integers, the result will be the bit number of the most significant
+ /// bit set to 0. For a value of zero or negative one, -1 will be returned.
+ ///
+ /// @tparam genIUType Signed or unsigned integer scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/findMSB.xml">GLSL findMSB man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<typename genIUType>
+ GLM_FUNC_DECL int findMSB(genIUType x);
+
+ /// Returns the bit number of the most significant bit in the binary representation of value.
+ /// For positive integers, the result will be the bit number of the most significant bit set to 1.
+ /// For negative integers, the result will be the bit number of the most significant
+ /// bit set to 0. For a value of zero or negative one, -1 will be returned.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T Signed or unsigned integer scalar types.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/findMSB.xml">GLSL findMSB man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.8 Integer Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, int, Q> findMSB(vec<L, T, Q> const& v);
+
+ /// @}
+}//namespace glm
+
+#include "detail/func_integer.inl"
diff --git a/3rdparty/glm/source/glm/mat2x2.hpp b/3rdparty/glm/source/glm/mat2x2.hpp
new file mode 100644
index 0000000..96bec96
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat2x2.hpp
@@ -0,0 +1,9 @@
+/// @ref core
+/// @file glm/mat2x2.hpp
+
+#pragma once
+#include "./ext/matrix_double2x2.hpp"
+#include "./ext/matrix_double2x2_precision.hpp"
+#include "./ext/matrix_float2x2.hpp"
+#include "./ext/matrix_float2x2_precision.hpp"
+
diff --git a/3rdparty/glm/source/glm/mat2x3.hpp b/3rdparty/glm/source/glm/mat2x3.hpp
new file mode 100644
index 0000000..d68dc25
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat2x3.hpp
@@ -0,0 +1,9 @@
+/// @ref core
+/// @file glm/mat2x3.hpp
+
+#pragma once
+#include "./ext/matrix_double2x3.hpp"
+#include "./ext/matrix_double2x3_precision.hpp"
+#include "./ext/matrix_float2x3.hpp"
+#include "./ext/matrix_float2x3_precision.hpp"
+
diff --git a/3rdparty/glm/source/glm/mat2x4.hpp b/3rdparty/glm/source/glm/mat2x4.hpp
new file mode 100644
index 0000000..b04b738
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat2x4.hpp
@@ -0,0 +1,9 @@
+/// @ref core
+/// @file glm/mat2x4.hpp
+
+#pragma once
+#include "./ext/matrix_double2x4.hpp"
+#include "./ext/matrix_double2x4_precision.hpp"
+#include "./ext/matrix_float2x4.hpp"
+#include "./ext/matrix_float2x4_precision.hpp"
+
diff --git a/3rdparty/glm/source/glm/mat3x2.hpp b/3rdparty/glm/source/glm/mat3x2.hpp
new file mode 100644
index 0000000..c853153
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat3x2.hpp
@@ -0,0 +1,9 @@
+/// @ref core
+/// @file glm/mat3x2.hpp
+
+#pragma once
+#include "./ext/matrix_double3x2.hpp"
+#include "./ext/matrix_double3x2_precision.hpp"
+#include "./ext/matrix_float3x2.hpp"
+#include "./ext/matrix_float3x2_precision.hpp"
+
diff --git a/3rdparty/glm/source/glm/mat3x3.hpp b/3rdparty/glm/source/glm/mat3x3.hpp
new file mode 100644
index 0000000..fd4fa31
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat3x3.hpp
@@ -0,0 +1,8 @@
+/// @ref core
+/// @file glm/mat3x3.hpp
+
+#pragma once
+#include "./ext/matrix_double3x3.hpp"
+#include "./ext/matrix_double3x3_precision.hpp"
+#include "./ext/matrix_float3x3.hpp"
+#include "./ext/matrix_float3x3_precision.hpp"
diff --git a/3rdparty/glm/source/glm/mat3x4.hpp b/3rdparty/glm/source/glm/mat3x4.hpp
new file mode 100644
index 0000000..6342bf5
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat3x4.hpp
@@ -0,0 +1,8 @@
+/// @ref core
+/// @file glm/mat3x4.hpp
+
+#pragma once
+#include "./ext/matrix_double3x4.hpp"
+#include "./ext/matrix_double3x4_precision.hpp"
+#include "./ext/matrix_float3x4.hpp"
+#include "./ext/matrix_float3x4_precision.hpp"
diff --git a/3rdparty/glm/source/glm/mat4x2.hpp b/3rdparty/glm/source/glm/mat4x2.hpp
new file mode 100644
index 0000000..e013e46
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat4x2.hpp
@@ -0,0 +1,9 @@
+/// @ref core
+/// @file glm/mat4x2.hpp
+
+#pragma once
+#include "./ext/matrix_double4x2.hpp"
+#include "./ext/matrix_double4x2_precision.hpp"
+#include "./ext/matrix_float4x2.hpp"
+#include "./ext/matrix_float4x2_precision.hpp"
+
diff --git a/3rdparty/glm/source/glm/mat4x3.hpp b/3rdparty/glm/source/glm/mat4x3.hpp
new file mode 100644
index 0000000..205725a
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat4x3.hpp
@@ -0,0 +1,8 @@
+/// @ref core
+/// @file glm/mat4x3.hpp
+
+#pragma once
+#include "./ext/matrix_double4x3.hpp"
+#include "./ext/matrix_double4x3_precision.hpp"
+#include "./ext/matrix_float4x3.hpp"
+#include "./ext/matrix_float4x3_precision.hpp"
diff --git a/3rdparty/glm/source/glm/mat4x4.hpp b/3rdparty/glm/source/glm/mat4x4.hpp
new file mode 100644
index 0000000..3515f7f
--- /dev/null
+++ b/3rdparty/glm/source/glm/mat4x4.hpp
@@ -0,0 +1,9 @@
+/// @ref core
+/// @file glm/mat4x4.hpp
+
+#pragma once
+#include "./ext/matrix_double4x4.hpp"
+#include "./ext/matrix_double4x4_precision.hpp"
+#include "./ext/matrix_float4x4.hpp"
+#include "./ext/matrix_float4x4_precision.hpp"
+
diff --git a/3rdparty/glm/source/glm/matrix.hpp b/3rdparty/glm/source/glm/matrix.hpp
new file mode 100644
index 0000000..4584c92
--- /dev/null
+++ b/3rdparty/glm/source/glm/matrix.hpp
@@ -0,0 +1,161 @@
+/// @ref core
+/// @file glm/matrix.hpp
+///
+/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+///
+/// @defgroup core_func_matrix Matrix functions
+/// @ingroup core
+///
+/// Provides GLSL matrix functions.
+///
+/// Include <glm/matrix.hpp> to use these core features.
+
+#pragma once
+
+// Dependencies
+#include "detail/qualifier.hpp"
+#include "detail/setup.hpp"
+#include "vec2.hpp"
+#include "vec3.hpp"
+#include "vec4.hpp"
+#include "mat2x2.hpp"
+#include "mat2x3.hpp"
+#include "mat2x4.hpp"
+#include "mat3x2.hpp"
+#include "mat3x3.hpp"
+#include "mat3x4.hpp"
+#include "mat4x2.hpp"
+#include "mat4x3.hpp"
+#include "mat4x4.hpp"
+
+namespace glm {
+namespace detail
+{
+ template<length_t C, length_t R, typename T, qualifier Q>
+ struct outerProduct_trait{};
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<2, 2, T, Q>
+ {
+ typedef mat<2, 2, T, Q> type;
+ };
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<2, 3, T, Q>
+ {
+ typedef mat<3, 2, T, Q> type;
+ };
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<2, 4, T, Q>
+ {
+ typedef mat<4, 2, T, Q> type;
+ };
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<3, 2, T, Q>
+ {
+ typedef mat<2, 3, T, Q> type;
+ };
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<3, 3, T, Q>
+ {
+ typedef mat<3, 3, T, Q> type;
+ };
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<3, 4, T, Q>
+ {
+ typedef mat<4, 3, T, Q> type;
+ };
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<4, 2, T, Q>
+ {
+ typedef mat<2, 4, T, Q> type;
+ };
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<4, 3, T, Q>
+ {
+ typedef mat<3, 4, T, Q> type;
+ };
+
+ template<typename T, qualifier Q>
+ struct outerProduct_trait<4, 4, T, Q>
+ {
+ typedef mat<4, 4, T, Q> type;
+ };
+}//namespace detail
+
+ /// @addtogroup core_func_matrix
+ /// @{
+
+ /// Multiply matrix x by matrix y component-wise, i.e.,
+ /// result[i][j] is the scalar product of x[i][j] and y[i][j].
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/matrixCompMult.xml">GLSL matrixCompMult man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL mat<C, R, T, Q> matrixCompMult(mat<C, R, T, Q> const& x, mat<C, R, T, Q> const& y);
+
+ /// Treats the first parameter c as a column vector
+ /// and the second parameter r as a row vector
+ /// and does a linear algebraic matrix multiply c * r.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/outerProduct.xml">GLSL outerProduct man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL typename detail::outerProduct_trait<C, R, T, Q>::type outerProduct(vec<C, T, Q> const& c, vec<R, T, Q> const& r);
+
+ /// Returns the transposed matrix of x
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/transpose.xml">GLSL transpose man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL typename mat<C, R, T, Q>::transpose_type transpose(mat<C, R, T, Q> const& x);
+
+ /// Return the determinant of a squared matrix.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/determinant.xml">GLSL determinant man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL T determinant(mat<C, R, T, Q> const& m);
+
+ /// Return the inverse of a squared matrix.
+ ///
+ /// @tparam C Integer between 1 and 4 included that qualify the number a column
+ /// @tparam R Integer between 1 and 4 included that qualify the number a row
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/inverse.xml">GLSL inverse man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.6 Matrix Functions</a>
+ template<length_t C, length_t R, typename T, qualifier Q>
+ GLM_FUNC_DECL mat<C, R, T, Q> inverse(mat<C, R, T, Q> const& m);
+
+ /// @}
+}//namespace glm
+
+#include "detail/func_matrix.inl"
diff --git a/3rdparty/glm/source/glm/packing.hpp b/3rdparty/glm/source/glm/packing.hpp
new file mode 100644
index 0000000..ca83ac1
--- /dev/null
+++ b/3rdparty/glm/source/glm/packing.hpp
@@ -0,0 +1,173 @@
+/// @ref core
+/// @file glm/packing.hpp
+///
+/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+/// @see gtc_packing
+///
+/// @defgroup core_func_packing Floating-Point Pack and Unpack Functions
+/// @ingroup core
+///
+/// Provides GLSL functions to pack and unpack half, single and double-precision floating point values into more compact integer types.
+///
+/// These functions do not operate component-wise, rather as described in each case.
+///
+/// Include <glm/packing.hpp> to use these core features.
+
+#pragma once
+
+#include "./ext/vector_uint2.hpp"
+#include "./ext/vector_float2.hpp"
+#include "./ext/vector_float4.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_func_packing
+ /// @{
+
+ /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values.
+ /// Then, the results are packed into the returned 32-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packUnorm2x16: round(clamp(c, 0, +1) * 65535.0)
+ ///
+ /// The first component of the vector will be written to the least significant bits of the output;
+ /// the last component will be written to the most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packUnorm2x16.xml">GLSL packUnorm2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint packUnorm2x16(vec2 const& v);
+
+ /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values.
+ /// Then, the results are packed into the returned 32-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packSnorm2x16: round(clamp(v, -1, +1) * 32767.0)
+ ///
+ /// The first component of the vector will be written to the least significant bits of the output;
+ /// the last component will be written to the most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packSnorm2x16.xml">GLSL packSnorm2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint packSnorm2x16(vec2 const& v);
+
+ /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values.
+ /// Then, the results are packed into the returned 32-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packUnorm4x8: round(clamp(c, 0, +1) * 255.0)
+ ///
+ /// The first component of the vector will be written to the least significant bits of the output;
+ /// the last component will be written to the most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packUnorm4x8.xml">GLSL packUnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint packUnorm4x8(vec4 const& v);
+
+ /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values.
+ /// Then, the results are packed into the returned 32-bit unsigned integer.
+ ///
+ /// The conversion for component c of v to fixed point is done as follows:
+ /// packSnorm4x8: round(clamp(c, -1, +1) * 127.0)
+ ///
+ /// The first component of the vector will be written to the least significant bits of the output;
+ /// the last component will be written to the most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packSnorm4x8.xml">GLSL packSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint packSnorm4x8(vec4 const& v);
+
+ /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackUnorm2x16: f / 65535.0
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackUnorm2x16.xml">GLSL unpackUnorm2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec2 unpackUnorm2x16(uint p);
+
+ /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackSnorm2x16: clamp(f / 32767.0, -1, +1)
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackSnorm2x16.xml">GLSL unpackSnorm2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec2 unpackSnorm2x16(uint p);
+
+ /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackUnorm4x8: f / 255.0
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackUnorm4x8.xml">GLSL unpackUnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec4 unpackUnorm4x8(uint p);
+
+ /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers.
+ /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector.
+ ///
+ /// The conversion for unpacked fixed-point value f to floating point is done as follows:
+ /// unpackSnorm4x8: clamp(f / 127.0, -1, +1)
+ ///
+ /// The first component of the returned vector will be extracted from the least significant bits of the input;
+ /// the last component will be extracted from the most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackSnorm4x8.xml">GLSL unpackSnorm4x8 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec4 unpackSnorm4x8(uint p);
+
+ /// Returns a double-qualifier value obtained by packing the components of v into a 64-bit value.
+ /// If an IEEE 754 Inf or NaN is created, it will not signal, and the resulting floating point value is unspecified.
+ /// Otherwise, the bit- level representation of v is preserved.
+ /// The first vector component specifies the 32 least significant bits;
+ /// the second component specifies the 32 most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packDouble2x32.xml">GLSL packDouble2x32 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL double packDouble2x32(uvec2 const& v);
+
+ /// Returns a two-component unsigned integer vector representation of v.
+ /// The bit-level representation of v is preserved.
+ /// The first component of the vector contains the 32 least significant bits of the double;
+ /// the second component consists the 32 most significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackDouble2x32.xml">GLSL unpackDouble2x32 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uvec2 unpackDouble2x32(double v);
+
+ /// Returns an unsigned integer obtained by converting the components of a two-component floating-point vector
+ /// to the 16-bit floating-point representation found in the OpenGL Specification,
+ /// and then packing these two 16- bit integers into a 32-bit unsigned integer.
+ /// The first vector component specifies the 16 least-significant bits of the result;
+ /// the second component specifies the 16 most-significant bits.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/packHalf2x16.xml">GLSL packHalf2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL uint packHalf2x16(vec2 const& v);
+
+ /// Returns a two-component floating-point vector with components obtained by unpacking a 32-bit unsigned integer into a pair of 16-bit values,
+ /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification,
+ /// and converting them to 32-bit floating-point values.
+ /// The first component of the vector is obtained from the 16 least-significant bits of v;
+ /// the second component is obtained from the 16 most-significant bits of v.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/unpackHalf2x16.xml">GLSL unpackHalf2x16 man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions</a>
+ GLM_FUNC_DECL vec2 unpackHalf2x16(uint v);
+
+ /// @}
+}//namespace glm
+
+#include "detail/func_packing.inl"
diff --git a/3rdparty/glm/source/glm/simd/common.h b/3rdparty/glm/source/glm/simd/common.h
new file mode 100644
index 0000000..9b017cb
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/common.h
@@ -0,0 +1,240 @@
+/// @ref simd
+/// @file glm/simd/common.h
+
+#pragma once
+
+#include "platform.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_add(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_add_ps(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_add(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_add_ss(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sub(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_sub_ps(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sub(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_sub_ss(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_mul(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_mul_ps(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_mul(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_mul_ss(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_div_ps(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_div(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_div_ss(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div_lowp(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return glm_vec4_mul(a, _mm_rcp_ps(b));
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_swizzle_xyzw(glm_f32vec4 a)
+{
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ return _mm_permute_ps(a, _MM_SHUFFLE(3, 2, 1, 0));
+# else
+ return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0));
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c)
+{
+# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG)
+ return _mm_fmadd_ss(a, b, c);
+# else
+ return _mm_add_ss(_mm_mul_ss(a, b), c);
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c)
+{
+# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG)
+ return _mm_fmadd_ps(a, b, c);
+# else
+ return glm_vec4_add(glm_vec4_mul(a, b), c);
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_abs(glm_f32vec4 x)
+{
+ return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF)));
+}
+
+GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x)
+{
+# if GLM_ARCH & GLM_ARCH_SSSE3_BIT
+ return _mm_sign_epi32(x, x);
+# else
+ glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31);
+ glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0);
+ glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0);
+ return sub0;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x)
+{
+ glm_vec4 const zro0 = _mm_setzero_ps();
+ glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0);
+ glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0);
+ glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f));
+ glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f));
+ glm_vec4 const or0 = _mm_or_ps(and0, and1);
+ return or0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x)
+{
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT);
+# else
+ glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000)));
+ glm_vec4 const and0 = _mm_and_ps(sgn0, x);
+ glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f));
+ glm_vec4 const add0 = glm_vec4_add(x, or0);
+ glm_vec4 const sub0 = glm_vec4_sub(add0, or0);
+ return sub0;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x)
+{
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ return _mm_floor_ps(x);
+# else
+ glm_vec4 const rnd0 = glm_vec4_round(x);
+ glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0);
+ glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f));
+ glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0);
+ return sub0;
+# endif
+}
+
+/* trunc TODO
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x)
+{
+ return glm_vec4();
+}
+*/
+
+//roundEven
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x)
+{
+ glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000)));
+ glm_vec4 const and0 = _mm_and_ps(sgn0, x);
+ glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f));
+ glm_vec4 const add0 = glm_vec4_add(x, or0);
+ glm_vec4 const sub0 = glm_vec4_sub(add0, or0);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x)
+{
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ return _mm_ceil_ps(x);
+# else
+ glm_vec4 const rnd0 = glm_vec4_round(x);
+ glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0);
+ glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f));
+ glm_vec4 const add0 = glm_vec4_add(rnd0, and0);
+ return add0;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x)
+{
+ glm_vec4 const flr0 = glm_vec4_floor(x);
+ glm_vec4 const sub0 = glm_vec4_sub(x, flr0);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y)
+{
+ glm_vec4 const div0 = glm_vec4_div(x, y);
+ glm_vec4 const flr0 = glm_vec4_floor(div0);
+ glm_vec4 const mul0 = glm_vec4_mul(y, flr0);
+ glm_vec4 const sub0 = glm_vec4_sub(x, mul0);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal)
+{
+ glm_vec4 const min0 = _mm_min_ps(v, maxVal);
+ glm_vec4 const max0 = _mm_max_ps(min0, minVal);
+ return max0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a)
+{
+ glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a);
+ glm_vec4 const mul0 = glm_vec4_mul(v1, sub0);
+ glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0);
+ return mad0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x)
+{
+ glm_vec4 const cmp = _mm_cmple_ps(x, edge);
+ return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps();
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x)
+{
+ glm_vec4 const sub0 = glm_vec4_sub(x, edge0);
+ glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0);
+ glm_vec4 const div0 = glm_vec4_sub(sub0, sub1);
+ glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f));
+ glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0);
+ glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0);
+ glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0);
+ glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2);
+ return mul2;
+}
+
+// Agner Fog method
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x)
+{
+ glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
+ glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
+ glm_ivec4 const t3 = _mm_set1_epi32(int(0xFF000000)); // exponent mask
+ glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent
+ glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction
+ glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4);
+ glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128());
+ glm_ivec4 const And = _mm_and_si128(Equal, Nequal);
+ return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0
+}
+
+// Agner Fog method
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x)
+{
+ glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
+ glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
+ return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(int(0xFF000000)))); // exponent is all 1s, fraction is 0
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/simd/exponential.h b/3rdparty/glm/source/glm/simd/exponential.h
new file mode 100644
index 0000000..bc351d0
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/exponential.h
@@ -0,0 +1,20 @@
+/// @ref simd
+/// @file glm/simd/experimental.h
+
+#pragma once
+
+#include "platform.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sqrt_lowp(glm_f32vec4 x)
+{
+ return _mm_mul_ss(_mm_rsqrt_ss(x), x);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sqrt_lowp(glm_f32vec4 x)
+{
+ return _mm_mul_ps(_mm_rsqrt_ps(x), x);
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/simd/geometric.h b/3rdparty/glm/source/glm/simd/geometric.h
new file mode 100644
index 0000000..07d7cbc
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/geometric.h
@@ -0,0 +1,124 @@
+/// @ref simd
+/// @file glm/simd/geometric.h
+
+#pragma once
+
+#include "common.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_DECL glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2);
+GLM_FUNC_DECL glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2);
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_length(glm_vec4 x)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(x, x);
+ glm_vec4 const sqt0 = _mm_sqrt_ps(dot0);
+ return sqt0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_distance(glm_vec4 p0, glm_vec4 p1)
+{
+ glm_vec4 const sub0 = _mm_sub_ps(p0, p1);
+ glm_vec4 const len0 = glm_vec4_length(sub0);
+ return len0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2)
+{
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ return _mm_dp_ps(v1, v2, 0xff);
+# elif GLM_ARCH & GLM_ARCH_SSE3_BIT
+ glm_vec4 const mul0 = _mm_mul_ps(v1, v2);
+ glm_vec4 const hadd0 = _mm_hadd_ps(mul0, mul0);
+ glm_vec4 const hadd1 = _mm_hadd_ps(hadd0, hadd0);
+ return hadd1;
+# else
+ glm_vec4 const mul0 = _mm_mul_ps(v1, v2);
+ glm_vec4 const swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1));
+ glm_vec4 const add0 = _mm_add_ps(mul0, swp0);
+ glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3));
+ glm_vec4 const add1 = _mm_add_ps(add0, swp1);
+ return add1;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2)
+{
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ return _mm_dp_ps(v1, v2, 0xff);
+# elif GLM_ARCH & GLM_ARCH_SSE3_BIT
+ glm_vec4 const mul0 = _mm_mul_ps(v1, v2);
+ glm_vec4 const had0 = _mm_hadd_ps(mul0, mul0);
+ glm_vec4 const had1 = _mm_hadd_ps(had0, had0);
+ return had1;
+# else
+ glm_vec4 const mul0 = _mm_mul_ps(v1, v2);
+ glm_vec4 const mov0 = _mm_movehl_ps(mul0, mul0);
+ glm_vec4 const add0 = _mm_add_ps(mov0, mul0);
+ glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, 1);
+ glm_vec4 const add1 = _mm_add_ss(add0, swp1);
+ return add1;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_cross(glm_vec4 v1, glm_vec4 v2)
+{
+ glm_vec4 const swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1));
+ glm_vec4 const swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2));
+ glm_vec4 const swp2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 0, 2, 1));
+ glm_vec4 const swp3 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 1, 0, 2));
+ glm_vec4 const mul0 = _mm_mul_ps(swp0, swp3);
+ glm_vec4 const mul1 = _mm_mul_ps(swp1, swp2);
+ glm_vec4 const sub0 = _mm_sub_ps(mul0, mul1);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_normalize(glm_vec4 v)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(v, v);
+ glm_vec4 const isr0 = _mm_rsqrt_ps(dot0);
+ glm_vec4 const mul0 = _mm_mul_ps(v, isr0);
+ return mul0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_faceforward(glm_vec4 N, glm_vec4 I, glm_vec4 Nref)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(Nref, I);
+ glm_vec4 const sgn0 = glm_vec4_sign(dot0);
+ glm_vec4 const mul0 = _mm_mul_ps(sgn0, _mm_set1_ps(-1.0f));
+ glm_vec4 const mul1 = _mm_mul_ps(N, mul0);
+ return mul1;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_reflect(glm_vec4 I, glm_vec4 N)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(N, I);
+ glm_vec4 const mul0 = _mm_mul_ps(N, dot0);
+ glm_vec4 const mul1 = _mm_mul_ps(mul0, _mm_set1_ps(2.0f));
+ glm_vec4 const sub0 = _mm_sub_ps(I, mul1);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER __m128 glm_vec4_refract(glm_vec4 I, glm_vec4 N, glm_vec4 eta)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(N, I);
+ glm_vec4 const mul0 = _mm_mul_ps(eta, eta);
+ glm_vec4 const mul1 = _mm_mul_ps(dot0, dot0);
+ glm_vec4 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), mul0);
+ glm_vec4 const sub1 = _mm_sub_ps(_mm_set1_ps(1.0f), mul1);
+ glm_vec4 const mul2 = _mm_mul_ps(sub0, sub1);
+
+ if(_mm_movemask_ps(_mm_cmplt_ss(mul2, _mm_set1_ps(0.0f))) == 0)
+ return _mm_set1_ps(0.0f);
+
+ glm_vec4 const sqt0 = _mm_sqrt_ps(mul2);
+ glm_vec4 const mad0 = glm_vec4_fma(eta, dot0, sqt0);
+ glm_vec4 const mul4 = _mm_mul_ps(mad0, N);
+ glm_vec4 const mul5 = _mm_mul_ps(eta, I);
+ glm_vec4 const sub2 = _mm_sub_ps(mul5, mul4);
+
+ return sub2;
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/simd/integer.h b/3rdparty/glm/source/glm/simd/integer.h
new file mode 100644
index 0000000..9381418
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/integer.h
@@ -0,0 +1,115 @@
+/// @ref simd
+/// @file glm/simd/integer.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave(glm_uvec4 x)
+{
+ glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF);
+ glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF);
+ glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+ glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333);
+ glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555);
+
+ glm_uvec4 Reg1;
+ glm_uvec4 Reg2;
+
+ // REG1 = x;
+ // REG2 = y;
+ //Reg1 = _mm_unpacklo_epi64(x, y);
+ Reg1 = x;
+
+ //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+ //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+ Reg2 = _mm_slli_si128(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask4);
+
+ //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+ //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+ Reg2 = _mm_slli_si128(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask3);
+
+ //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ Reg2 = _mm_slli_epi32(Reg1, 4);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask2);
+
+ //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
+ //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
+ Reg2 = _mm_slli_epi32(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask1);
+
+ //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
+ //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask0);
+
+ //return REG1 | (REG2 << 1);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg2 = _mm_srli_si128(Reg2, 8);
+ Reg1 = _mm_or_si128(Reg1, Reg2);
+
+ return Reg1;
+}
+
+GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave2(glm_uvec4 x, glm_uvec4 y)
+{
+ glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF);
+ glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF);
+ glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+ glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333);
+ glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555);
+
+ glm_uvec4 Reg1;
+ glm_uvec4 Reg2;
+
+ // REG1 = x;
+ // REG2 = y;
+ Reg1 = _mm_unpacklo_epi64(x, y);
+
+ //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+ //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+ Reg2 = _mm_slli_si128(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask4);
+
+ //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+ //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+ Reg2 = _mm_slli_si128(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask3);
+
+ //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ Reg2 = _mm_slli_epi32(Reg1, 4);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask2);
+
+ //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
+ //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
+ Reg2 = _mm_slli_epi32(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask1);
+
+ //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
+ //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask0);
+
+ //return REG1 | (REG2 << 1);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg2 = _mm_srli_si128(Reg2, 8);
+ Reg1 = _mm_or_si128(Reg1, Reg2);
+
+ return Reg1;
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/simd/matrix.h b/3rdparty/glm/source/glm/simd/matrix.h
new file mode 100644
index 0000000..b6c42ea
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/matrix.h
@@ -0,0 +1,1028 @@
+/// @ref simd
+/// @file glm/simd/matrix.h
+
+#pragma once
+
+#include "geometric.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_QUALIFIER void glm_mat4_matrixCompMult(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4])
+{
+ out[0] = _mm_mul_ps(in1[0], in2[0]);
+ out[1] = _mm_mul_ps(in1[1], in2[1]);
+ out[2] = _mm_mul_ps(in1[2], in2[2]);
+ out[3] = _mm_mul_ps(in1[3], in2[3]);
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_add(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4])
+{
+ out[0] = _mm_add_ps(in1[0], in2[0]);
+ out[1] = _mm_add_ps(in1[1], in2[1]);
+ out[2] = _mm_add_ps(in1[2], in2[2]);
+ out[3] = _mm_add_ps(in1[3], in2[3]);
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_sub(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4])
+{
+ out[0] = _mm_sub_ps(in1[0], in2[0]);
+ out[1] = _mm_sub_ps(in1[1], in2[1]);
+ out[2] = _mm_sub_ps(in1[2], in2[2]);
+ out[3] = _mm_sub_ps(in1[3], in2[3]);
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_mul_vec4(glm_vec4 const m[4], glm_vec4 v)
+{
+ __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(m[0], v0);
+ __m128 m1 = _mm_mul_ps(m[1], v1);
+ __m128 m2 = _mm_mul_ps(m[2], v2);
+ __m128 m3 = _mm_mul_ps(m[3], v3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ return a2;
+}
+
+GLM_FUNC_QUALIFIER __m128 glm_vec4_mul_mat4(glm_vec4 v, glm_vec4 const m[4])
+{
+ __m128 i0 = m[0];
+ __m128 i1 = m[1];
+ __m128 i2 = m[2];
+ __m128 i3 = m[3];
+
+ __m128 m0 = _mm_mul_ps(v, i0);
+ __m128 m1 = _mm_mul_ps(v, i1);
+ __m128 m2 = _mm_mul_ps(v, i2);
+ __m128 m3 = _mm_mul_ps(v, i3);
+
+ __m128 u0 = _mm_unpacklo_ps(m0, m1);
+ __m128 u1 = _mm_unpackhi_ps(m0, m1);
+ __m128 a0 = _mm_add_ps(u0, u1);
+
+ __m128 u2 = _mm_unpacklo_ps(m2, m3);
+ __m128 u3 = _mm_unpackhi_ps(m2, m3);
+ __m128 a1 = _mm_add_ps(u2, u3);
+
+ __m128 f0 = _mm_movelh_ps(a0, a1);
+ __m128 f1 = _mm_movehl_ps(a1, a0);
+ __m128 f2 = _mm_add_ps(f0, f1);
+
+ return f2;
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_mul(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4])
+{
+ {
+ __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(in1[0], e0);
+ __m128 m1 = _mm_mul_ps(in1[1], e1);
+ __m128 m2 = _mm_mul_ps(in1[2], e2);
+ __m128 m3 = _mm_mul_ps(in1[3], e3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ out[0] = a2;
+ }
+
+ {
+ __m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(in1[0], e0);
+ __m128 m1 = _mm_mul_ps(in1[1], e1);
+ __m128 m2 = _mm_mul_ps(in1[2], e2);
+ __m128 m3 = _mm_mul_ps(in1[3], e3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ out[1] = a2;
+ }
+
+ {
+ __m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(in1[0], e0);
+ __m128 m1 = _mm_mul_ps(in1[1], e1);
+ __m128 m2 = _mm_mul_ps(in1[2], e2);
+ __m128 m3 = _mm_mul_ps(in1[3], e3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ out[2] = a2;
+ }
+
+ {
+ //(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3))
+ __m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(in1[0], e0);
+ __m128 m1 = _mm_mul_ps(in1[1], e1);
+ __m128 m2 = _mm_mul_ps(in1[2], e2);
+ __m128 m3 = _mm_mul_ps(in1[3], e3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ out[3] = a2;
+ }
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_transpose(glm_vec4 const in[4], glm_vec4 out[4])
+{
+ __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44);
+ __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE);
+ __m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44);
+ __m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE);
+
+ out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88);
+ out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD);
+ out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88);
+ out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD);
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_highp(glm_vec4 const in[4])
+{
+ __m128 Fac0;
+ {
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac0 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac1;
+ {
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac1 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+
+ __m128 Fac2;
+ {
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac2 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac3;
+ {
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac3 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac4;
+ {
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac4 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac5;
+ {
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac5 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
+ __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
+
+ // m[1][0]
+ // m[0][0]
+ // m[0][0]
+ // m[0][0]
+ __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][1]
+ // m[0][1]
+ // m[0][1]
+ // m[0][1]
+ __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][2]
+ // m[0][2]
+ // m[0][2]
+ // m[0][2]
+ __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][3]
+ // m[0][3]
+ // m[0][3]
+ // m[0][3]
+ __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // col0
+ // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
+ // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
+ // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
+ // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
+ __m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
+ __m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
+ __m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
+ __m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
+ __m128 Add00 = _mm_add_ps(Sub00, Mul02);
+ __m128 Inv0 = _mm_mul_ps(SignB, Add00);
+
+ // col1
+ // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
+ // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
+ // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
+ // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
+ __m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
+ __m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
+ __m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
+ __m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
+ __m128 Add01 = _mm_add_ps(Sub01, Mul05);
+ __m128 Inv1 = _mm_mul_ps(SignA, Add01);
+
+ // col2
+ // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
+ // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
+ // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
+ // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
+ __m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
+ __m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
+ __m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
+ __m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
+ __m128 Add02 = _mm_add_ps(Sub02, Mul08);
+ __m128 Inv2 = _mm_mul_ps(SignB, Add02);
+
+ // col3
+ // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
+ // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
+ // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
+ // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
+ __m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
+ __m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
+ __m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
+ __m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
+ __m128 Add03 = _mm_add_ps(Sub03, Mul11);
+ __m128 Inv3 = _mm_mul_ps(SignA, Add03);
+
+ __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
+
+ // valType Determinant = m[0][0] * Inverse[0][0]
+ // + m[0][1] * Inverse[1][0]
+ // + m[0][2] * Inverse[2][0]
+ // + m[0][3] * Inverse[3][0];
+ __m128 Det0 = glm_vec4_dot(in[0], Row2);
+ return Det0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_lowp(glm_vec4 const m[4])
+{
+ // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(
+
+ //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+
+ // First 2 columns
+ __m128 Swp2A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 1, 1, 2)));
+ __m128 Swp3A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(3, 2, 3, 3)));
+ __m128 MulA = _mm_mul_ps(Swp2A, Swp3A);
+
+ // Second 2 columns
+ __m128 Swp2B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(3, 2, 3, 3)));
+ __m128 Swp3B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(0, 1, 1, 2)));
+ __m128 MulB = _mm_mul_ps(Swp2B, Swp3B);
+
+ // Columns subtraction
+ __m128 SubE = _mm_sub_ps(MulA, MulB);
+
+ // Last 2 rows
+ __m128 Swp2C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 0, 1, 2)));
+ __m128 Swp3C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(1, 2, 0, 0)));
+ __m128 MulC = _mm_mul_ps(Swp2C, Swp3C);
+ __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC);
+
+ //vec<4, T, Q> DetCof(
+ // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02),
+ // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04),
+ // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05),
+ // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05));
+
+ __m128 SubFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubE), _MM_SHUFFLE(2, 1, 0, 0)));
+ __m128 SwpFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(0, 0, 0, 1)));
+ __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA);
+
+ __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1));
+ __m128 SubFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpB), _MM_SHUFFLE(3, 1, 1, 0)));//SubF[0], SubE[3], SubE[3], SubE[1];
+ __m128 SwpFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(1, 1, 2, 2)));
+ __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB);
+
+ __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB);
+
+ __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2));
+ __m128 SubFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpC), _MM_SHUFFLE(3, 3, 2, 0)));
+ __m128 SwpFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(2, 3, 3, 3)));
+ __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC);
+
+ __m128 AddRes = _mm_add_ps(SubRes, MulFacC);
+ __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f));
+
+ //return m[0][0] * DetCof[0]
+ // + m[0][1] * DetCof[1]
+ // + m[0][2] * DetCof[2]
+ // + m[0][3] * DetCof[3];
+
+ return glm_vec4_dot(m[0], DetCof);
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant(glm_vec4 const m[4])
+{
+ // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add)
+
+ //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+
+ // First 2 columns
+ __m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2));
+ __m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3));
+ __m128 MulA = _mm_mul_ps(Swp2A, Swp3A);
+
+ // Second 2 columns
+ __m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3));
+ __m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2));
+ __m128 MulB = _mm_mul_ps(Swp2B, Swp3B);
+
+ // Columns subtraction
+ __m128 SubE = _mm_sub_ps(MulA, MulB);
+
+ // Last 2 rows
+ __m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2));
+ __m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0));
+ __m128 MulC = _mm_mul_ps(Swp2C, Swp3C);
+ __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC);
+
+ //vec<4, T, Q> DetCof(
+ // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02),
+ // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04),
+ // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05),
+ // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05));
+
+ __m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0));
+ __m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1));
+ __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA);
+
+ __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1));
+ __m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1];
+ __m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2));
+ __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB);
+
+ __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB);
+
+ __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2));
+ __m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0));
+ __m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3));
+ __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC);
+
+ __m128 AddRes = _mm_add_ps(SubRes, MulFacC);
+ __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f));
+
+ //return m[0][0] * DetCof[0]
+ // + m[0][1] * DetCof[1]
+ // + m[0][2] * DetCof[2]
+ // + m[0][3] * DetCof[3];
+
+ return glm_vec4_dot(m[0], DetCof);
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_inverse(glm_vec4 const in[4], glm_vec4 out[4])
+{
+ __m128 Fac0;
+ {
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac0 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac1;
+ {
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac1 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+
+ __m128 Fac2;
+ {
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac2 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac3;
+ {
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac3 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac4;
+ {
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac4 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac5;
+ {
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac5 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
+ __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
+
+ // m[1][0]
+ // m[0][0]
+ // m[0][0]
+ // m[0][0]
+ __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][1]
+ // m[0][1]
+ // m[0][1]
+ // m[0][1]
+ __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][2]
+ // m[0][2]
+ // m[0][2]
+ // m[0][2]
+ __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][3]
+ // m[0][3]
+ // m[0][3]
+ // m[0][3]
+ __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // col0
+ // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
+ // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
+ // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
+ // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
+ __m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
+ __m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
+ __m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
+ __m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
+ __m128 Add00 = _mm_add_ps(Sub00, Mul02);
+ __m128 Inv0 = _mm_mul_ps(SignB, Add00);
+
+ // col1
+ // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
+ // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
+ // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
+ // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
+ __m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
+ __m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
+ __m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
+ __m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
+ __m128 Add01 = _mm_add_ps(Sub01, Mul05);
+ __m128 Inv1 = _mm_mul_ps(SignA, Add01);
+
+ // col2
+ // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
+ // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
+ // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
+ // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
+ __m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
+ __m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
+ __m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
+ __m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
+ __m128 Add02 = _mm_add_ps(Sub02, Mul08);
+ __m128 Inv2 = _mm_mul_ps(SignB, Add02);
+
+ // col3
+ // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
+ // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
+ // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
+ // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
+ __m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
+ __m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
+ __m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
+ __m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
+ __m128 Add03 = _mm_add_ps(Sub03, Mul11);
+ __m128 Inv3 = _mm_mul_ps(SignA, Add03);
+
+ __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
+
+ // valType Determinant = m[0][0] * Inverse[0][0]
+ // + m[0][1] * Inverse[1][0]
+ // + m[0][2] * Inverse[2][0]
+ // + m[0][3] * Inverse[3][0];
+ __m128 Det0 = glm_vec4_dot(in[0], Row2);
+ __m128 Rcp0 = _mm_div_ps(_mm_set1_ps(1.0f), Det0);
+ //__m128 Rcp0 = _mm_rcp_ps(Det0);
+
+ // Inverse /= Determinant;
+ out[0] = _mm_mul_ps(Inv0, Rcp0);
+ out[1] = _mm_mul_ps(Inv1, Rcp0);
+ out[2] = _mm_mul_ps(Inv2, Rcp0);
+ out[3] = _mm_mul_ps(Inv3, Rcp0);
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_inverse_lowp(glm_vec4 const in[4], glm_vec4 out[4])
+{
+ __m128 Fac0;
+ {
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac0 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac1;
+ {
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac1 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+
+ __m128 Fac2;
+ {
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac2 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac3;
+ {
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac3 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac4;
+ {
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac4 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac5;
+ {
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac5 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
+ __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
+
+ // m[1][0]
+ // m[0][0]
+ // m[0][0]
+ // m[0][0]
+ __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][1]
+ // m[0][1]
+ // m[0][1]
+ // m[0][1]
+ __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][2]
+ // m[0][2]
+ // m[0][2]
+ // m[0][2]
+ __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][3]
+ // m[0][3]
+ // m[0][3]
+ // m[0][3]
+ __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // col0
+ // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
+ // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
+ // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
+ // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
+ __m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
+ __m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
+ __m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
+ __m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
+ __m128 Add00 = _mm_add_ps(Sub00, Mul02);
+ __m128 Inv0 = _mm_mul_ps(SignB, Add00);
+
+ // col1
+ // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
+ // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
+ // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
+ // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
+ __m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
+ __m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
+ __m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
+ __m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
+ __m128 Add01 = _mm_add_ps(Sub01, Mul05);
+ __m128 Inv1 = _mm_mul_ps(SignA, Add01);
+
+ // col2
+ // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
+ // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
+ // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
+ // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
+ __m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
+ __m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
+ __m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
+ __m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
+ __m128 Add02 = _mm_add_ps(Sub02, Mul08);
+ __m128 Inv2 = _mm_mul_ps(SignB, Add02);
+
+ // col3
+ // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
+ // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
+ // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
+ // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
+ __m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
+ __m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
+ __m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
+ __m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
+ __m128 Add03 = _mm_add_ps(Sub03, Mul11);
+ __m128 Inv3 = _mm_mul_ps(SignA, Add03);
+
+ __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
+
+ // valType Determinant = m[0][0] * Inverse[0][0]
+ // + m[0][1] * Inverse[1][0]
+ // + m[0][2] * Inverse[2][0]
+ // + m[0][3] * Inverse[3][0];
+ __m128 Det0 = glm_vec4_dot(in[0], Row2);
+ __m128 Rcp0 = _mm_rcp_ps(Det0);
+ //__m128 Rcp0 = _mm_div_ps(one, Det0);
+ // Inverse /= Determinant;
+ out[0] = _mm_mul_ps(Inv0, Rcp0);
+ out[1] = _mm_mul_ps(Inv1, Rcp0);
+ out[2] = _mm_mul_ps(Inv2, Rcp0);
+ out[3] = _mm_mul_ps(Inv3, Rcp0);
+}
+/*
+GLM_FUNC_QUALIFIER void glm_mat4_rotate(__m128 const in[4], float Angle, float const v[3], __m128 out[4])
+{
+ float a = glm::radians(Angle);
+ float c = cos(a);
+ float s = sin(a);
+
+ glm::vec4 AxisA(v[0], v[1], v[2], float(0));
+ __m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x);
+ __m128 AxisC = detail::sse_nrm_ps(AxisB);
+
+ __m128 Cos0 = _mm_set_ss(c);
+ __m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Sin0 = _mm_set_ss(s);
+ __m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0));
+
+ // vec<3, T, Q> temp = (valType(1) - c) * axis;
+ __m128 Temp0 = _mm_sub_ps(one, CosA);
+ __m128 Temp1 = _mm_mul_ps(Temp0, AxisC);
+
+ //Rotate[0][0] = c + temp[0] * axis[0];
+ //Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2];
+ //Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1];
+ __m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 TmpA0 = _mm_mul_ps(Axis0, AxisC);
+ __m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0));
+ __m128 TmpA1 = _mm_add_ps(CosA0, TmpA0);
+ __m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f);
+ __m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3));
+ __m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2);
+ __m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3);
+
+ //Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2];
+ //Rotate[1][1] = c + temp[1] * axis[1];
+ //Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0];
+ __m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 TmpB0 = _mm_mul_ps(Axis1, AxisC);
+ __m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1));
+ __m128 TmpB1 = _mm_add_ps(CosA1, TmpB0);
+ __m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f);
+ __m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2));
+ __m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2);
+ __m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3);
+
+ //Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1];
+ //Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0];
+ //Rotate[2][2] = c + temp[2] * axis[2];
+ __m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 TmpC0 = _mm_mul_ps(Axis2, AxisC);
+ __m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1));
+ __m128 TmpC1 = _mm_add_ps(CosA2, TmpC0);
+ __m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f);
+ __m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1));
+ __m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2);
+ __m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3);
+
+ __m128 Result[4];
+ Result[0] = TmpA4;
+ Result[1] = TmpB4;
+ Result[2] = TmpC4;
+ Result[3] = _mm_set_ps(1, 0, 0, 0);
+
+ //mat<4, 4, valType> Result;
+ //Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2];
+ //Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2];
+ //Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2];
+ //Result[3] = m[3];
+ //return Result;
+ sse_mul_ps(in, Result, out);
+}
+*/
+GLM_FUNC_QUALIFIER void glm_mat4_outerProduct(__m128 const& c, __m128 const& r, __m128 out[4])
+{
+ out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0)));
+ out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1)));
+ out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2)));
+ out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3)));
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/simd/neon.h b/3rdparty/glm/source/glm/simd/neon.h
new file mode 100644
index 0000000..f85947f
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/neon.h
@@ -0,0 +1,155 @@
+/// @ref simd_neon
+/// @file glm/simd/neon.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_NEON_BIT
+#include <arm_neon.h>
+
+namespace glm {
+ namespace neon {
+ static inline float32x4_t dupq_lane(float32x4_t vsrc, int lane) {
+ switch(lane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ case 0: return vdupq_laneq_f32(vsrc, 0);
+ case 1: return vdupq_laneq_f32(vsrc, 1);
+ case 2: return vdupq_laneq_f32(vsrc, 2);
+ case 3: return vdupq_laneq_f32(vsrc, 3);
+#else
+ case 0: return vdupq_n_f32(vgetq_lane_f32(vsrc, 0));
+ case 1: return vdupq_n_f32(vgetq_lane_f32(vsrc, 1));
+ case 2: return vdupq_n_f32(vgetq_lane_f32(vsrc, 2));
+ case 3: return vdupq_n_f32(vgetq_lane_f32(vsrc, 3));
+#endif
+ }
+ assert(!"Unreachable code executed!");
+ return vdupq_n_f32(0.0f);
+ }
+
+ static inline float32x2_t dup_lane(float32x4_t vsrc, int lane) {
+ switch(lane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ case 0: return vdup_laneq_f32(vsrc, 0);
+ case 1: return vdup_laneq_f32(vsrc, 1);
+ case 2: return vdup_laneq_f32(vsrc, 2);
+ case 3: return vdup_laneq_f32(vsrc, 3);
+#else
+ case 0: return vdup_n_f32(vgetq_lane_f32(vsrc, 0));
+ case 1: return vdup_n_f32(vgetq_lane_f32(vsrc, 1));
+ case 2: return vdup_n_f32(vgetq_lane_f32(vsrc, 2));
+ case 3: return vdup_n_f32(vgetq_lane_f32(vsrc, 3));
+#endif
+ }
+ assert(!"Unreachable code executed!");
+ return vdup_n_f32(0.0f);
+ }
+
+ static inline float32x4_t copy_lane(float32x4_t vdst, int dlane, float32x4_t vsrc, int slane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ switch(dlane) {
+ case 0:
+ switch(slane) {
+ case 0: return vcopyq_laneq_f32(vdst, 0, vsrc, 0);
+ case 1: return vcopyq_laneq_f32(vdst, 0, vsrc, 1);
+ case 2: return vcopyq_laneq_f32(vdst, 0, vsrc, 2);
+ case 3: return vcopyq_laneq_f32(vdst, 0, vsrc, 3);
+ }
+ assert(!"Unreachable code executed!");
+ case 1:
+ switch(slane) {
+ case 0: return vcopyq_laneq_f32(vdst, 1, vsrc, 0);
+ case 1: return vcopyq_laneq_f32(vdst, 1, vsrc, 1);
+ case 2: return vcopyq_laneq_f32(vdst, 1, vsrc, 2);
+ case 3: return vcopyq_laneq_f32(vdst, 1, vsrc, 3);
+ }
+ assert(!"Unreachable code executed!");
+ case 2:
+ switch(slane) {
+ case 0: return vcopyq_laneq_f32(vdst, 2, vsrc, 0);
+ case 1: return vcopyq_laneq_f32(vdst, 2, vsrc, 1);
+ case 2: return vcopyq_laneq_f32(vdst, 2, vsrc, 2);
+ case 3: return vcopyq_laneq_f32(vdst, 2, vsrc, 3);
+ }
+ assert(!"Unreachable code executed!");
+ case 3:
+ switch(slane) {
+ case 0: return vcopyq_laneq_f32(vdst, 3, vsrc, 0);
+ case 1: return vcopyq_laneq_f32(vdst, 3, vsrc, 1);
+ case 2: return vcopyq_laneq_f32(vdst, 3, vsrc, 2);
+ case 3: return vcopyq_laneq_f32(vdst, 3, vsrc, 3);
+ }
+ assert(!"Unreachable code executed!");
+ }
+#else
+
+ float l;
+ switch(slane) {
+ case 0: l = vgetq_lane_f32(vsrc, 0); break;
+ case 1: l = vgetq_lane_f32(vsrc, 1); break;
+ case 2: l = vgetq_lane_f32(vsrc, 2); break;
+ case 3: l = vgetq_lane_f32(vsrc, 3); break;
+ default:
+ assert(!"Unreachable code executed!");
+ }
+ switch(dlane) {
+ case 0: return vsetq_lane_f32(l, vdst, 0);
+ case 1: return vsetq_lane_f32(l, vdst, 1);
+ case 2: return vsetq_lane_f32(l, vdst, 2);
+ case 3: return vsetq_lane_f32(l, vdst, 3);
+ }
+#endif
+ assert(!"Unreachable code executed!");
+ return vdupq_n_f32(0.0f);
+ }
+
+ static inline float32x4_t mul_lane(float32x4_t v, float32x4_t vlane, int lane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ switch(lane) {
+ case 0: return vmulq_laneq_f32(v, vlane, 0); break;
+ case 1: return vmulq_laneq_f32(v, vlane, 1); break;
+ case 2: return vmulq_laneq_f32(v, vlane, 2); break;
+ case 3: return vmulq_laneq_f32(v, vlane, 3); break;
+ default:
+ assert(!"Unreachable code executed!");
+ }
+ assert(!"Unreachable code executed!");
+ return vdupq_n_f32(0.0f);
+#else
+ return vmulq_f32(v, dupq_lane(vlane, lane));
+#endif
+ }
+
+ static inline float32x4_t madd_lane(float32x4_t acc, float32x4_t v, float32x4_t vlane, int lane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+#ifdef GLM_CONFIG_FORCE_FMA
+# define FMADD_LANE(acc, x, y, L) do { asm volatile ("fmla %0.4s, %1.4s, %2.4s" : "+w"(acc) : "w"(x), "w"(dup_lane(y, L))); } while(0)
+#else
+# define FMADD_LANE(acc, x, y, L) do { acc = vmlaq_laneq_f32(acc, x, y, L); } while(0)
+#endif
+
+ switch(lane) {
+ case 0:
+ FMADD_LANE(acc, v, vlane, 0);
+ return acc;
+ case 1:
+ FMADD_LANE(acc, v, vlane, 1);
+ return acc;
+ case 2:
+ FMADD_LANE(acc, v, vlane, 2);
+ return acc;
+ case 3:
+ FMADD_LANE(acc, v, vlane, 3);
+ return acc;
+ default:
+ assert(!"Unreachable code executed!");
+ }
+ assert(!"Unreachable code executed!");
+ return vdupq_n_f32(0.0f);
+# undef FMADD_LANE
+#else
+ return vaddq_f32(acc, vmulq_f32(v, dupq_lane(vlane, lane)));
+#endif
+ }
+ } //namespace neon
+} // namespace glm
+#endif // GLM_ARCH & GLM_ARCH_NEON_BIT
diff --git a/3rdparty/glm/source/glm/simd/packing.h b/3rdparty/glm/source/glm/simd/packing.h
new file mode 100644
index 0000000..609163e
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/packing.h
@@ -0,0 +1,8 @@
+/// @ref simd
+/// @file glm/simd/packing.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/simd/platform.h b/3rdparty/glm/source/glm/simd/platform.h
new file mode 100644
index 0000000..12a7b72
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/platform.h
@@ -0,0 +1,408 @@
+#pragma once
+
+///////////////////////////////////////////////////////////////////////////////////
+// Platform
+
+#define GLM_PLATFORM_UNKNOWN 0x00000000
+#define GLM_PLATFORM_WINDOWS 0x00010000
+#define GLM_PLATFORM_LINUX 0x00020000
+#define GLM_PLATFORM_APPLE 0x00040000
+//#define GLM_PLATFORM_IOS 0x00080000
+#define GLM_PLATFORM_ANDROID 0x00100000
+#define GLM_PLATFORM_CHROME_NACL 0x00200000
+#define GLM_PLATFORM_UNIX 0x00400000
+#define GLM_PLATFORM_QNXNTO 0x00800000
+#define GLM_PLATFORM_WINCE 0x01000000
+#define GLM_PLATFORM_CYGWIN 0x02000000
+
+#ifdef GLM_FORCE_PLATFORM_UNKNOWN
+# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN
+#elif defined(__CYGWIN__)
+# define GLM_PLATFORM GLM_PLATFORM_CYGWIN
+#elif defined(__QNXNTO__)
+# define GLM_PLATFORM GLM_PLATFORM_QNXNTO
+#elif defined(__APPLE__)
+# define GLM_PLATFORM GLM_PLATFORM_APPLE
+#elif defined(WINCE)
+# define GLM_PLATFORM GLM_PLATFORM_WINCE
+#elif defined(_WIN32)
+# define GLM_PLATFORM GLM_PLATFORM_WINDOWS
+#elif defined(__native_client__)
+# define GLM_PLATFORM GLM_PLATFORM_CHROME_NACL
+#elif defined(__ANDROID__)
+# define GLM_PLATFORM GLM_PLATFORM_ANDROID
+#elif defined(__linux)
+# define GLM_PLATFORM GLM_PLATFORM_LINUX
+#elif defined(__unix)
+# define GLM_PLATFORM GLM_PLATFORM_UNIX
+#else
+# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN
+#endif//
+
+///////////////////////////////////////////////////////////////////////////////////
+// Compiler
+
+#define GLM_COMPILER_UNKNOWN 0x00000000
+
+// Intel
+#define GLM_COMPILER_INTEL 0x00100000
+#define GLM_COMPILER_INTEL14 0x00100040
+#define GLM_COMPILER_INTEL15 0x00100050
+#define GLM_COMPILER_INTEL16 0x00100060
+#define GLM_COMPILER_INTEL17 0x00100070
+
+// Visual C++ defines
+#define GLM_COMPILER_VC 0x01000000
+#define GLM_COMPILER_VC12 0x01000001
+#define GLM_COMPILER_VC14 0x01000002
+#define GLM_COMPILER_VC15 0x01000003
+#define GLM_COMPILER_VC15_3 0x01000004
+#define GLM_COMPILER_VC15_5 0x01000005
+#define GLM_COMPILER_VC15_6 0x01000006
+#define GLM_COMPILER_VC15_7 0x01000007
+#define GLM_COMPILER_VC15_8 0x01000008
+#define GLM_COMPILER_VC15_9 0x01000009
+#define GLM_COMPILER_VC16 0x0100000A
+
+// GCC defines
+#define GLM_COMPILER_GCC 0x02000000
+#define GLM_COMPILER_GCC46 0x020000D0
+#define GLM_COMPILER_GCC47 0x020000E0
+#define GLM_COMPILER_GCC48 0x020000F0
+#define GLM_COMPILER_GCC49 0x02000100
+#define GLM_COMPILER_GCC5 0x02000200
+#define GLM_COMPILER_GCC6 0x02000300
+#define GLM_COMPILER_GCC7 0x02000400
+#define GLM_COMPILER_GCC8 0x02000500
+
+// CUDA
+#define GLM_COMPILER_CUDA 0x10000000
+#define GLM_COMPILER_CUDA75 0x10000001
+#define GLM_COMPILER_CUDA80 0x10000002
+#define GLM_COMPILER_CUDA90 0x10000004
+#define GLM_COMPILER_CUDA_RTC 0x10000100
+
+// SYCL
+#define GLM_COMPILER_SYCL 0x00300000
+
+// Clang
+#define GLM_COMPILER_CLANG 0x20000000
+#define GLM_COMPILER_CLANG34 0x20000050
+#define GLM_COMPILER_CLANG35 0x20000060
+#define GLM_COMPILER_CLANG36 0x20000070
+#define GLM_COMPILER_CLANG37 0x20000080
+#define GLM_COMPILER_CLANG38 0x20000090
+#define GLM_COMPILER_CLANG39 0x200000A0
+#define GLM_COMPILER_CLANG40 0x200000B0
+#define GLM_COMPILER_CLANG41 0x200000C0
+#define GLM_COMPILER_CLANG42 0x200000D0
+
+// HIP
+#define GLM_COMPILER_HIP 0x40000000
+
+// Build model
+#define GLM_MODEL_32 0x00000010
+#define GLM_MODEL_64 0x00000020
+
+// Force generic C++ compiler
+#ifdef GLM_FORCE_COMPILER_UNKNOWN
+# define GLM_COMPILER GLM_COMPILER_UNKNOWN
+
+#elif defined(__INTEL_COMPILER)
+# if __INTEL_COMPILER >= 1700
+# define GLM_COMPILER GLM_COMPILER_INTEL17
+# elif __INTEL_COMPILER >= 1600
+# define GLM_COMPILER GLM_COMPILER_INTEL16
+# elif __INTEL_COMPILER >= 1500
+# define GLM_COMPILER GLM_COMPILER_INTEL15
+# elif __INTEL_COMPILER >= 1400
+# define GLM_COMPILER GLM_COMPILER_INTEL14
+# elif __INTEL_COMPILER < 1400
+# error "GLM requires ICC 2013 SP1 or newer"
+# endif
+
+// CUDA
+#elif defined(__CUDACC__)
+# if !defined(CUDA_VERSION) && !defined(GLM_FORCE_CUDA)
+# include <cuda.h> // make sure version is defined since nvcc does not define it itself!
+# endif
+# if defined(__CUDACC_RTC__)
+# define GLM_COMPILER GLM_COMPILER_CUDA_RTC
+# elif CUDA_VERSION >= 8000
+# define GLM_COMPILER GLM_COMPILER_CUDA80
+# elif CUDA_VERSION >= 7500
+# define GLM_COMPILER GLM_COMPILER_CUDA75
+# elif CUDA_VERSION >= 7000
+# define GLM_COMPILER GLM_COMPILER_CUDA70
+# elif CUDA_VERSION < 7000
+# error "GLM requires CUDA 7.0 or higher"
+# endif
+
+// HIP
+#elif defined(__HIP__)
+# define GLM_COMPILER GLM_COMPILER_HIP
+
+// SYCL
+#elif defined(__SYCL_DEVICE_ONLY__)
+# define GLM_COMPILER GLM_COMPILER_SYCL
+
+// Clang
+#elif defined(__clang__)
+# if defined(__apple_build_version__)
+# if (__clang_major__ < 6)
+# error "GLM requires Clang 3.4 / Apple Clang 6.0 or higher"
+# elif __clang_major__ == 6 && __clang_minor__ == 0
+# define GLM_COMPILER GLM_COMPILER_CLANG35
+# elif __clang_major__ == 6 && __clang_minor__ >= 1
+# define GLM_COMPILER GLM_COMPILER_CLANG36
+# elif __clang_major__ >= 7
+# define GLM_COMPILER GLM_COMPILER_CLANG37
+# endif
+# else
+# if ((__clang_major__ == 3) && (__clang_minor__ < 4)) || (__clang_major__ < 3)
+# error "GLM requires Clang 3.4 or higher"
+# elif __clang_major__ == 3 && __clang_minor__ == 4
+# define GLM_COMPILER GLM_COMPILER_CLANG34
+# elif __clang_major__ == 3 && __clang_minor__ == 5
+# define GLM_COMPILER GLM_COMPILER_CLANG35
+# elif __clang_major__ == 3 && __clang_minor__ == 6
+# define GLM_COMPILER GLM_COMPILER_CLANG36
+# elif __clang_major__ == 3 && __clang_minor__ == 7
+# define GLM_COMPILER GLM_COMPILER_CLANG37
+# elif __clang_major__ == 3 && __clang_minor__ == 8
+# define GLM_COMPILER GLM_COMPILER_CLANG38
+# elif __clang_major__ == 3 && __clang_minor__ >= 9
+# define GLM_COMPILER GLM_COMPILER_CLANG39
+# elif __clang_major__ == 4 && __clang_minor__ == 0
+# define GLM_COMPILER GLM_COMPILER_CLANG40
+# elif __clang_major__ == 4 && __clang_minor__ == 1
+# define GLM_COMPILER GLM_COMPILER_CLANG41
+# elif __clang_major__ == 4 && __clang_minor__ >= 2
+# define GLM_COMPILER GLM_COMPILER_CLANG42
+# elif __clang_major__ >= 4
+# define GLM_COMPILER GLM_COMPILER_CLANG42
+# endif
+# endif
+
+// Visual C++
+#elif defined(_MSC_VER)
+# if _MSC_VER >= 1920
+# define GLM_COMPILER GLM_COMPILER_VC16
+# elif _MSC_VER >= 1916
+# define GLM_COMPILER GLM_COMPILER_VC15_9
+# elif _MSC_VER >= 1915
+# define GLM_COMPILER GLM_COMPILER_VC15_8
+# elif _MSC_VER >= 1914
+# define GLM_COMPILER GLM_COMPILER_VC15_7
+# elif _MSC_VER >= 1913
+# define GLM_COMPILER GLM_COMPILER_VC15_6
+# elif _MSC_VER >= 1912
+# define GLM_COMPILER GLM_COMPILER_VC15_5
+# elif _MSC_VER >= 1911
+# define GLM_COMPILER GLM_COMPILER_VC15_3
+# elif _MSC_VER >= 1910
+# define GLM_COMPILER GLM_COMPILER_VC15
+# elif _MSC_VER >= 1900
+# define GLM_COMPILER GLM_COMPILER_VC14
+# elif _MSC_VER >= 1800
+# define GLM_COMPILER GLM_COMPILER_VC12
+# elif _MSC_VER < 1800
+# error "GLM requires Visual C++ 12 - 2013 or higher"
+# endif//_MSC_VER
+
+// G++
+#elif defined(__GNUC__) || defined(__MINGW32__)
+# if __GNUC__ >= 8
+# define GLM_COMPILER GLM_COMPILER_GCC8
+# elif __GNUC__ >= 7
+# define GLM_COMPILER GLM_COMPILER_GCC7
+# elif __GNUC__ >= 6
+# define GLM_COMPILER GLM_COMPILER_GCC6
+# elif __GNUC__ >= 5
+# define GLM_COMPILER GLM_COMPILER_GCC5
+# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
+# define GLM_COMPILER GLM_COMPILER_GCC49
+# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 8
+# define GLM_COMPILER GLM_COMPILER_GCC48
+# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 7
+# define GLM_COMPILER GLM_COMPILER_GCC47
+# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 6
+# define GLM_COMPILER GLM_COMPILER_GCC46
+# elif ((__GNUC__ == 4) && (__GNUC_MINOR__ < 6)) || (__GNUC__ < 4)
+# error "GLM requires GCC 4.6 or higher"
+# endif
+
+#else
+# define GLM_COMPILER GLM_COMPILER_UNKNOWN
+#endif
+
+#ifndef GLM_COMPILER
+# error "GLM_COMPILER undefined, your compiler may not be supported by GLM. Add #define GLM_COMPILER 0 to ignore this message."
+#endif//GLM_COMPILER
+
+///////////////////////////////////////////////////////////////////////////////////
+// Instruction sets
+
+// User defines: GLM_FORCE_PURE GLM_FORCE_INTRINSICS GLM_FORCE_SSE2 GLM_FORCE_SSE3 GLM_FORCE_AVX GLM_FORCE_AVX2 GLM_FORCE_AVX2
+
+#define GLM_ARCH_MIPS_BIT (0x10000000)
+#define GLM_ARCH_PPC_BIT (0x20000000)
+#define GLM_ARCH_ARM_BIT (0x40000000)
+#define GLM_ARCH_ARMV8_BIT (0x01000000)
+#define GLM_ARCH_X86_BIT (0x80000000)
+
+#define GLM_ARCH_SIMD_BIT (0x00001000)
+
+#define GLM_ARCH_NEON_BIT (0x00000001)
+#define GLM_ARCH_SSE_BIT (0x00000002)
+#define GLM_ARCH_SSE2_BIT (0x00000004)
+#define GLM_ARCH_SSE3_BIT (0x00000008)
+#define GLM_ARCH_SSSE3_BIT (0x00000010)
+#define GLM_ARCH_SSE41_BIT (0x00000020)
+#define GLM_ARCH_SSE42_BIT (0x00000040)
+#define GLM_ARCH_AVX_BIT (0x00000080)
+#define GLM_ARCH_AVX2_BIT (0x00000100)
+
+#define GLM_ARCH_UNKNOWN (0)
+#define GLM_ARCH_X86 (GLM_ARCH_X86_BIT)
+#define GLM_ARCH_SSE (GLM_ARCH_SSE_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_X86)
+#define GLM_ARCH_SSE2 (GLM_ARCH_SSE2_BIT | GLM_ARCH_SSE)
+#define GLM_ARCH_SSE3 (GLM_ARCH_SSE3_BIT | GLM_ARCH_SSE2)
+#define GLM_ARCH_SSSE3 (GLM_ARCH_SSSE3_BIT | GLM_ARCH_SSE3)
+#define GLM_ARCH_SSE41 (GLM_ARCH_SSE41_BIT | GLM_ARCH_SSSE3)
+#define GLM_ARCH_SSE42 (GLM_ARCH_SSE42_BIT | GLM_ARCH_SSE41)
+#define GLM_ARCH_AVX (GLM_ARCH_AVX_BIT | GLM_ARCH_SSE42)
+#define GLM_ARCH_AVX2 (GLM_ARCH_AVX2_BIT | GLM_ARCH_AVX)
+#define GLM_ARCH_ARM (GLM_ARCH_ARM_BIT)
+#define GLM_ARCH_ARMV8 (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM | GLM_ARCH_ARMV8_BIT)
+#define GLM_ARCH_NEON (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM)
+#define GLM_ARCH_MIPS (GLM_ARCH_MIPS_BIT)
+#define GLM_ARCH_PPC (GLM_ARCH_PPC_BIT)
+
+#if defined(GLM_FORCE_ARCH_UNKNOWN) || defined(GLM_FORCE_PURE)
+# define GLM_ARCH GLM_ARCH_UNKNOWN
+#elif defined(GLM_FORCE_NEON)
+# if __ARM_ARCH >= 8
+# define GLM_ARCH (GLM_ARCH_ARMV8)
+# else
+# define GLM_ARCH (GLM_ARCH_NEON)
+# endif
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_AVX2)
+# define GLM_ARCH (GLM_ARCH_AVX2)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_AVX)
+# define GLM_ARCH (GLM_ARCH_AVX)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE42)
+# define GLM_ARCH (GLM_ARCH_SSE42)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE41)
+# define GLM_ARCH (GLM_ARCH_SSE41)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSSE3)
+# define GLM_ARCH (GLM_ARCH_SSSE3)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE3)
+# define GLM_ARCH (GLM_ARCH_SSE3)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE2)
+# define GLM_ARCH (GLM_ARCH_SSE2)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE)
+# define GLM_ARCH (GLM_ARCH_SSE)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_INTRINSICS) && !defined(GLM_FORCE_XYZW_ONLY)
+# if defined(__AVX2__)
+# define GLM_ARCH (GLM_ARCH_AVX2)
+# elif defined(__AVX__)
+# define GLM_ARCH (GLM_ARCH_AVX)
+# elif defined(__SSE4_2__)
+# define GLM_ARCH (GLM_ARCH_SSE42)
+# elif defined(__SSE4_1__)
+# define GLM_ARCH (GLM_ARCH_SSE41)
+# elif defined(__SSSE3__)
+# define GLM_ARCH (GLM_ARCH_SSSE3)
+# elif defined(__SSE3__)
+# define GLM_ARCH (GLM_ARCH_SSE3)
+# elif defined(__SSE2__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86_FP)
+# define GLM_ARCH (GLM_ARCH_SSE2)
+# elif defined(__i386__)
+# define GLM_ARCH (GLM_ARCH_X86)
+# elif defined(__ARM_ARCH) && (__ARM_ARCH >= 8)
+# define GLM_ARCH (GLM_ARCH_ARMV8)
+# elif defined(__ARM_NEON)
+# define GLM_ARCH (GLM_ARCH_ARM | GLM_ARCH_NEON)
+# elif defined(__arm__ ) || defined(_M_ARM)
+# define GLM_ARCH (GLM_ARCH_ARM)
+# elif defined(__mips__ )
+# define GLM_ARCH (GLM_ARCH_MIPS)
+# elif defined(__powerpc__ ) || defined(_M_PPC)
+# define GLM_ARCH (GLM_ARCH_PPC)
+# else
+# define GLM_ARCH (GLM_ARCH_UNKNOWN)
+# endif
+#else
+# if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) || defined(__i386__)
+# define GLM_ARCH (GLM_ARCH_X86)
+# elif defined(__arm__) || defined(_M_ARM)
+# define GLM_ARCH (GLM_ARCH_ARM)
+# elif defined(__powerpc__) || defined(_M_PPC)
+# define GLM_ARCH (GLM_ARCH_PPC)
+# elif defined(__mips__)
+# define GLM_ARCH (GLM_ARCH_MIPS)
+# else
+# define GLM_ARCH (GLM_ARCH_UNKNOWN)
+# endif
+#endif
+
+#if GLM_ARCH & GLM_ARCH_AVX2_BIT
+# include <immintrin.h>
+#elif GLM_ARCH & GLM_ARCH_AVX_BIT
+# include <immintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSE42_BIT
+# if GLM_COMPILER & GLM_COMPILER_CLANG
+# include <popcntintrin.h>
+# endif
+# include <nmmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSE41_BIT
+# include <smmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSSE3_BIT
+# include <tmmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSE3_BIT
+# include <pmmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSE2_BIT
+# include <emmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_NEON_BIT
+# include "neon.h"
+#endif//GLM_ARCH
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+ typedef __m128 glm_f32vec4;
+ typedef __m128i glm_i32vec4;
+ typedef __m128i glm_u32vec4;
+ typedef __m128d glm_f64vec2;
+ typedef __m128i glm_i64vec2;
+ typedef __m128i glm_u64vec2;
+
+ typedef glm_f32vec4 glm_vec4;
+ typedef glm_i32vec4 glm_ivec4;
+ typedef glm_u32vec4 glm_uvec4;
+ typedef glm_f64vec2 glm_dvec2;
+#endif
+
+#if GLM_ARCH & GLM_ARCH_AVX_BIT
+ typedef __m256d glm_f64vec4;
+ typedef glm_f64vec4 glm_dvec4;
+#endif
+
+#if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ typedef __m256i glm_i64vec4;
+ typedef __m256i glm_u64vec4;
+#endif
+
+#if GLM_ARCH & GLM_ARCH_NEON_BIT
+ typedef float32x4_t glm_f32vec4;
+ typedef int32x4_t glm_i32vec4;
+ typedef uint32x4_t glm_u32vec4;
+#endif
diff --git a/3rdparty/glm/source/glm/simd/trigonometric.h b/3rdparty/glm/source/glm/simd/trigonometric.h
new file mode 100644
index 0000000..739b796
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/trigonometric.h
@@ -0,0 +1,9 @@
+/// @ref simd
+/// @file glm/simd/trigonometric.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+
diff --git a/3rdparty/glm/source/glm/simd/vector_relational.h b/3rdparty/glm/source/glm/simd/vector_relational.h
new file mode 100644
index 0000000..f7385e9
--- /dev/null
+++ b/3rdparty/glm/source/glm/simd/vector_relational.h
@@ -0,0 +1,8 @@
+/// @ref simd
+/// @file glm/simd/vector_relational.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/3rdparty/glm/source/glm/trigonometric.hpp b/3rdparty/glm/source/glm/trigonometric.hpp
new file mode 100644
index 0000000..51d49c1
--- /dev/null
+++ b/3rdparty/glm/source/glm/trigonometric.hpp
@@ -0,0 +1,210 @@
+/// @ref core
+/// @file glm/trigonometric.hpp
+///
+/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+///
+/// @defgroup core_func_trigonometric Angle and Trigonometry Functions
+/// @ingroup core
+///
+/// Function parameters specified as angle are assumed to be in units of radians.
+/// In no case will any of these functions result in a divide by zero error. If
+/// the divisor of a ratio is 0, then results will be undefined.
+///
+/// These all operate component-wise. The description is per component.
+///
+/// Include <glm/trigonometric.hpp> to use these core features.
+///
+/// @see ext_vector_trigonometric
+
+#pragma once
+
+#include "detail/setup.hpp"
+#include "detail/qualifier.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_func_trigonometric
+ /// @{
+
+ /// Converts degrees to radians and returns the result.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/radians.xml">GLSL radians man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> radians(vec<L, T, Q> const& degrees);
+
+ /// Converts radians to degrees and returns the result.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/degrees.xml">GLSL degrees man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> degrees(vec<L, T, Q> const& radians);
+
+ /// The standard trigonometric sine function.
+ /// The values returned by this function will range from [-1, 1].
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/sin.xml">GLSL sin man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> sin(vec<L, T, Q> const& angle);
+
+ /// The standard trigonometric cosine function.
+ /// The values returned by this function will range from [-1, 1].
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/cos.xml">GLSL cos man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> cos(vec<L, T, Q> const& angle);
+
+ /// The standard trigonometric tangent function.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/tan.xml">GLSL tan man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> tan(vec<L, T, Q> const& angle);
+
+ /// Arc sine. Returns an angle whose sine is x.
+ /// The range of values returned by this function is [-PI/2, PI/2].
+ /// Results are undefined if |x| > 1.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/asin.xml">GLSL asin man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> asin(vec<L, T, Q> const& x);
+
+ /// Arc cosine. Returns an angle whose cosine is x.
+ /// The range of values returned by this function is [0, PI].
+ /// Results are undefined if |x| > 1.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/acos.xml">GLSL acos man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> acos(vec<L, T, Q> const& x);
+
+ /// Arc tangent. Returns an angle whose tangent is y/x.
+ /// The signs of x and y are used to determine what
+ /// quadrant the angle is in. The range of values returned
+ /// by this function is [-PI, PI]. Results are undefined
+ /// if x and y are both 0.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/atan.xml">GLSL atan man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> atan(vec<L, T, Q> const& y, vec<L, T, Q> const& x);
+
+ /// Arc tangent. Returns an angle whose tangent is y_over_x.
+ /// The range of values returned by this function is [-PI/2, PI/2].
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/atan.xml">GLSL atan man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> atan(vec<L, T, Q> const& y_over_x);
+
+ /// Returns the hyperbolic sine function, (exp(x) - exp(-x)) / 2
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/sinh.xml">GLSL sinh man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> sinh(vec<L, T, Q> const& angle);
+
+ /// Returns the hyperbolic cosine function, (exp(x) + exp(-x)) / 2
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/cosh.xml">GLSL cosh man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> cosh(vec<L, T, Q> const& angle);
+
+ /// Returns the hyperbolic tangent function, sinh(angle) / cosh(angle)
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/tanh.xml">GLSL tanh man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> tanh(vec<L, T, Q> const& angle);
+
+ /// Arc hyperbolic sine; returns the inverse of sinh.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/asinh.xml">GLSL asinh man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> asinh(vec<L, T, Q> const& x);
+
+ /// Arc hyperbolic cosine; returns the non-negative inverse
+ /// of cosh. Results are undefined if x < 1.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/acosh.xml">GLSL acosh man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> acosh(vec<L, T, Q> const& x);
+
+ /// Arc hyperbolic tangent; returns the inverse of tanh.
+ /// Results are undefined if abs(x) >= 1.
+ ///
+ /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
+ /// @tparam T Floating-point scalar types
+ /// @tparam Q Value from qualifier enum
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/atanh.xml">GLSL atanh man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL vec<L, T, Q> atanh(vec<L, T, Q> const& x);
+
+ /// @}
+}//namespace glm
+
+#include "detail/func_trigonometric.inl"
diff --git a/3rdparty/glm/source/glm/vec2.hpp b/3rdparty/glm/source/glm/vec2.hpp
new file mode 100644
index 0000000..cd4e070
--- /dev/null
+++ b/3rdparty/glm/source/glm/vec2.hpp
@@ -0,0 +1,14 @@
+/// @ref core
+/// @file glm/vec2.hpp
+
+#pragma once
+#include "./ext/vector_bool2.hpp"
+#include "./ext/vector_bool2_precision.hpp"
+#include "./ext/vector_float2.hpp"
+#include "./ext/vector_float2_precision.hpp"
+#include "./ext/vector_double2.hpp"
+#include "./ext/vector_double2_precision.hpp"
+#include "./ext/vector_int2.hpp"
+#include "./ext/vector_int2_sized.hpp"
+#include "./ext/vector_uint2.hpp"
+#include "./ext/vector_uint2_sized.hpp"
diff --git a/3rdparty/glm/source/glm/vec3.hpp b/3rdparty/glm/source/glm/vec3.hpp
new file mode 100644
index 0000000..f5a927d
--- /dev/null
+++ b/3rdparty/glm/source/glm/vec3.hpp
@@ -0,0 +1,14 @@
+/// @ref core
+/// @file glm/vec3.hpp
+
+#pragma once
+#include "./ext/vector_bool3.hpp"
+#include "./ext/vector_bool3_precision.hpp"
+#include "./ext/vector_float3.hpp"
+#include "./ext/vector_float3_precision.hpp"
+#include "./ext/vector_double3.hpp"
+#include "./ext/vector_double3_precision.hpp"
+#include "./ext/vector_int3.hpp"
+#include "./ext/vector_int3_sized.hpp"
+#include "./ext/vector_uint3.hpp"
+#include "./ext/vector_uint3_sized.hpp"
diff --git a/3rdparty/glm/source/glm/vec4.hpp b/3rdparty/glm/source/glm/vec4.hpp
new file mode 100644
index 0000000..c6ea9f1
--- /dev/null
+++ b/3rdparty/glm/source/glm/vec4.hpp
@@ -0,0 +1,15 @@
+/// @ref core
+/// @file glm/vec4.hpp
+
+#pragma once
+#include "./ext/vector_bool4.hpp"
+#include "./ext/vector_bool4_precision.hpp"
+#include "./ext/vector_float4.hpp"
+#include "./ext/vector_float4_precision.hpp"
+#include "./ext/vector_double4.hpp"
+#include "./ext/vector_double4_precision.hpp"
+#include "./ext/vector_int4.hpp"
+#include "./ext/vector_int4_sized.hpp"
+#include "./ext/vector_uint4.hpp"
+#include "./ext/vector_uint4_sized.hpp"
+
diff --git a/3rdparty/glm/source/glm/vector_relational.hpp b/3rdparty/glm/source/glm/vector_relational.hpp
new file mode 100644
index 0000000..a0fe17e
--- /dev/null
+++ b/3rdparty/glm/source/glm/vector_relational.hpp
@@ -0,0 +1,121 @@
+/// @ref core
+/// @file glm/vector_relational.hpp
+///
+/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+///
+/// @defgroup core_func_vector_relational Vector Relational Functions
+/// @ingroup core
+///
+/// Relational and equality operators (<, <=, >, >=, ==, !=) are defined to
+/// operate on scalars and produce scalar Boolean results. For vector results,
+/// use the following built-in functions.
+///
+/// In all cases, the sizes of all the input and return vectors for any particular
+/// call must match.
+///
+/// Include <glm/vector_relational.hpp> to use these core features.
+///
+/// @see ext_vector_relational
+
+#pragma once
+
+#include "detail/qualifier.hpp"
+#include "detail/setup.hpp"
+
+namespace glm
+{
+ /// @addtogroup core_func_vector_relational
+ /// @{
+
+ /// Returns the component-wise comparison result of x < y.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T A floating-point or integer scalar type.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/lessThan.xml">GLSL lessThan man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> lessThan(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns the component-wise comparison of result x <= y.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T A floating-point or integer scalar type.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/lessThanEqual.xml">GLSL lessThanEqual man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> lessThanEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns the component-wise comparison of result x > y.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T A floating-point or integer scalar type.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/greaterThan.xml">GLSL greaterThan man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> greaterThan(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns the component-wise comparison of result x >= y.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T A floating-point or integer scalar type.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/greaterThanEqual.xml">GLSL greaterThanEqual man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> greaterThanEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns the component-wise comparison of result x == y.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T A floating-point, integer or bool scalar type.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/equal.xml">GLSL equal man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> equal(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns the component-wise comparison of result x != y.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ /// @tparam T A floating-point, integer or bool scalar type.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/notEqual.xml">GLSL notEqual man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> notEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
+ /// Returns true if any component of x is true.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/any.xml">GLSL any man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool any(vec<L, bool, Q> const& v);
+
+ /// Returns true if all components of x are true.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/all.xml">GLSL all man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR bool all(vec<L, bool, Q> const& v);
+
+ /// Returns the component-wise logical complement of x.
+ /// /!\ Because of language incompatibilities between C++ and GLSL, GLM defines the function not but not_ instead.
+ ///
+ /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector.
+ ///
+ /// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/not.xml">GLSL not man page</a>
+ /// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.7 Vector Relational Functions</a>
+ template<length_t L, qualifier Q>
+ GLM_FUNC_DECL GLM_CONSTEXPR vec<L, bool, Q> not_(vec<L, bool, Q> const& v);
+
+ /// @}
+}//namespace glm
+
+#include "detail/func_vector_relational.inl"
diff --git a/3rdparty/glm/source/manual.md b/3rdparty/glm/source/manual.md
new file mode 100644
index 0000000..f98c24b
--- /dev/null
+++ b/3rdparty/glm/source/manual.md
@@ -0,0 +1,2430 @@
+![Alt](./doc/manual/logo-mini.png "GLM Logo")
+
+# GLM 0.9.9 Manual
+
+![Alt](./doc/manual/g-truc.png "G-Truc Logo")
+
+---
+<div style="page-break-after: always;"> </div>
+
+## Table of Contents
++ [0. Licenses](#section0)
++ [1. Getting started](#section1)
++ [1.1. Using global headers](#section1_1)
++ [1.2. Using separated headers](#section1_2)
++ [1.3. Using extension headers](#section1_3)
++ [1.4. Dependencies](#section1_4)
++ [1.5. Finding GLM with CMake](#section1_5)
++ [2. Preprocessor configurations](#section2)
++ [2.1. GLM\_FORCE\_MESSAGES: Platform auto detection and default configuration](#section2_1)
++ [2.2. GLM\_FORCE\_PLATFORM\_UNKNOWN: Force GLM to no detect the build platform](#section2_2)
++ [2.3. GLM\_FORCE\_COMPILER\_UNKNOWN: Force GLM to no detect the C++ compiler](#section2_3)
++ [2.4. GLM\_FORCE\_ARCH\_UNKNOWN: Force GLM to no detect the build architecture](#section2_4)
++ [2.5. GLM\_FORCE\_CXX\_UNKNOWN: Force GLM to no detect the C++ standard](#section2_5)
++ [2.6. GLM\_FORCE\_CXX**: C++ language detection](#section2_6)
++ [2.7. GLM\_FORCE\_EXPLICIT\_CTOR: Requiring explicit conversions](#section2_7)
++ [2.8. GLM\_FORCE\_INLINE: Force inline](#section2_8)
++ [2.9. GLM\_FORCE\_ALIGNED\_GENTYPES: Force GLM to enable aligned types](#section2_9)
++ [2.10. GLM\_FORCE\_DEFAULT\_ALIGNED\_GENTYPES: Force GLM to use aligned types by default](#section2_10)
++ [2.11. GLM\_FORCE\_INTRINSICS: Using SIMD optimizations](#section2_11)
++ [2.12. GLM\_FORCE\_PRECISION\_**: Default precision](#section2_12)
++ [2.13. GLM\_FORCE\_SINGLE\_ONLY: Removed explicit 64-bits floating point types](#section2_13)
++ [2.14. GLM\_FORCE\_SWIZZLE: Enable swizzle operators](#section2_14)
++ [2.15. GLM\_FORCE\_XYZW\_ONLY: Only exposes x, y, z and w components](#section2_15)
++ [2.16. GLM\_FORCE\_LEFT\_HANDED: Force left handed coordinate system](#section2_16)
++ [2.17. GLM\_FORCE\_DEPTH\_ZERO\_TO\_ONE: Force the use of a clip space between 0 to 1](#section2_17)
++ [2.18. GLM\_FORCE\_SIZE\_T\_LENGTH: Vector and matrix static size type](#section2_18)
++ [2.19. GLM\_FORCE\_UNRESTRICTED\_GENTYPE: Removing genType restriction](#section2_19)
++ [2.20. GLM\_FORCE\_SILENT\_WARNINGS: Silent C++ warnings from language extensions](#section2_20)
++ [2.21. GLM\_FORCE\_QUAT\_DATA\_WXYZ: Force GLM to store quat data as w,x,y,z instead of x,y,z,w](#section2_21)
++ [3. Stable extensions](#section3)
++ [3.1. Scalar types](#section3_1)
++ [3.2. Scalar functions](#section3_2)
++ [3.3. Vector types](#section3_3)
++ [3.4. Vector types with precision qualifiers](#section3_4)
++ [3.5. Vector functions](#section3_5)
++ [3.6. Matrix types](#section3_6)
++ [3.7. Matrix types with precision qualifiers](#section3_7)
++ [3.8. Matrix functions](#section3_8)
++ [3.9. Quaternion types](#section3_9)
++ [3.10. Quaternion types with precision qualifiers](#section3_10)
++ [3.11. Quaternion functions](#section3_11)
++ [4. Recommended extensions](#section4)
++ [4.1. GLM_GTC_bitfield](#section4_1)
++ [4.2. GLM_GTC_color_space](#section4_2)
++ [4.3. GLM_GTC_constants](#section4_3)
++ [4.4. GLM_GTC_epsilon](#section4_4)
++ [4.5. GLM_GTC_integer](#section4_5)
++ [4.6. GLM_GTC_matrix_access](#section4_6)
++ [4.7. GLM_GTC_matrix_integer](#section4_7)
++ [4.8. GLM_GTC_matrix_inverse](#section4_8)
++ [4.9. GLM_GTC_matrix_transform](#section4_9)
++ [4.10. GLM_GTC_noise](#section4_10)
++ [4.11. GLM_GTC_packing](#section4_11)
++ [4.12. GLM_GTC_quaternion](#section4_12)
++ [4.13. GLM_GTC_random](#section4_13)
++ [4.14. GLM_GTC_reciprocal](#section4_14)
++ [4.15. GLM_GTC_round](#section4_15)
++ [4.16. GLM_GTC_type_alignment](#section4_16)
++ [4.17. GLM_GTC_type_precision](#section4_17)
++ [4.18. GLM_GTC_type_ptr](#section4_18)
++ [4.19. GLM_GTC_ulp](#section4_19)
++ [4.20. GLM_GTC_vec1](#section4_20)
++ [5. OpenGL interoperability](#section5)
++ [5.1. GLM Replacements for deprecated OpenGL functions](#section5_1)
++ [5.2. GLM Replacements for GLU functions](#section5_2)
++ [6. Known issues](#section6)
++ [6.1. Not function](#section6_1)
++ [6.2. Precision qualifiers support](#section6_2)
++ [7. FAQ](#section7)
++ [7.1 Why GLM follows GLSL specification and conventions?](#section7_1)
++ [7.2. Does GLM run GLSL programs?](#section7_2)
++ [7.3. Does a GLSL compiler build GLM codes?](#section7_3)
++ [7.4. Should I use ‘GTX’ extensions?](#section7_4)
++ [7.5. Where can I ask my questions?](#section7_5)
++ [7.6. Where can I find the documentation of extensions?](#section7_6)
++ [7.7. Should I use 'using namespace glm;'?](#section7_7)
++ [7.8. Is GLM fast?](#section7_8)
++ [7.9. When I build with Visual C++ with /w4 warning level, I have warnings...](#section7_9)
++ [7.10. Why some GLM functions can crash because of division by zero?](#section7_10)
++ [7.11. What unit for angles us used in GLM?](#section7_11)
++ [7.12. Windows headers cause build errors...](#section7_12)
++ [7.13. Constant expressions support](#section7_13)
++ [8. Code samples](#section8)
++ [8.1. Compute a triangle normal](#section8_1)
++ [8.2. Matrix transform](#section8_2)
++ [8.3. Vector types](#section8_3)
++ [8.4. Lighting](#section8_4)
++ [9. Contributing to GLM](#section9)
++ [9.1. Submitting bug reports](#section9_1)
++ [9.2. Contributing to GLM with pull request](#section9_2)
++ [9.3. Coding style](#section9_3)
++ [10. References](#section10)
++ [10.1. OpenGL specifications](#section10_1)
++ [10.2. External links](#section10_2)
++ [10.3. Projects using GLM](#section10_3)
++ [10.4. Tutorials using GLM](#section10_4)
++ [10.5. Equivalent for other languages](#section10_5)
++ [10.6. Alternatives to GLM](#section10_6)
++ [10.7. Acknowledgements](#section10_7)
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section0"></a> Licenses
+
+### The Happy Bunny License (Modified MIT License)
+
+Copyright (c) 2005 - G-Truc Creation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+Restrictions: By making use of the Software for military purposes, you
+choose to make a Bunny unhappy.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+![](./doc/manual/frontpage1.png)
+
+### The MIT License
+
+Copyright (c) 2005 - G-Truc Creation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+![](./doc/manual/frontpage2.png)
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section1"></a> 1. Getting started
+### <a name="section1_1"></a> 1.1. Using global headers
+
+GLM is a header-only library, and thus does not need to be compiled. We can use GLM's implementation of GLSL's mathematics functionality by including the `<glm/glm.hpp>` header:
+
+```cpp
+#include <glm/glm.hpp>
+```
+
+To extend the feature set supported by GLM and keeping the library as close to GLSL as possible, new features are implemented as extensions that can be included thought a separated header:
+```cpp
+// Include all GLM core / GLSL features
+#include <glm/glm.hpp> // vec2, vec3, mat4, radians
+
+// Include all GLM extensions
+#include <glm/ext.hpp> // perspective, translate, rotate
+
+glm::mat4 transform(glm::vec2 const& Orientation, glm::vec3 const& Translate, glm::vec3 const& Up)
+{
+ glm::mat4 Proj = glm::perspective(glm::radians(45.f), 1.33f, 0.1f, 10.f);
+ glm::mat4 ViewTranslate = glm::translate(glm::mat4(1.f), Translate);
+ glm::mat4 ViewRotateX = glm::rotate(ViewTranslate, Orientation.y, Up);
+ glm::mat4 View = glm::rotate(ViewRotateX, Orientation.x, Up);
+ glm::mat4 Model = glm::mat4(1.0f);
+ return Proj * View * Model;
+}
+```
+
+*Note: Including `<glm/glm.hpp>` and `<glm/ext.hpp>` is convenient but pull a lot of code which will significantly increase build time, particularly if these files are included in all source files. We may prefer to use the approaches describe in the two following sections to keep the project build fast.*
+
+### <a name="section1_2"></a> 1.2. Using separated headers
+
+GLM relies on C++ templates heavily, and may significantly increase compilation times for projects that use it. Hence, user projects could only include the features they actually use. Following is the list of all the core features, based on GLSL specification, headers:
+```cpp
+#include <glm/vec2.hpp> // vec2, bvec2, dvec2, ivec2 and uvec2
+#include <glm/vec3.hpp> // vec3, bvec3, dvec3, ivec3 and uvec3
+#include <glm/vec4.hpp> // vec4, bvec4, dvec4, ivec4 and uvec4
+#include <glm/mat2x2.hpp> // mat2, dmat2
+#include <glm/mat2x3.hpp> // mat2x3, dmat2x3
+#include <glm/mat2x4.hpp> // mat2x4, dmat2x4
+#include <glm/mat3x2.hpp> // mat3x2, dmat3x2
+#include <glm/mat3x3.hpp> // mat3, dmat3
+#include <glm/mat3x4.hpp> // mat3x4, dmat2
+#include <glm/mat4x2.hpp> // mat4x2, dmat4x2
+#include <glm/mat4x3.hpp> // mat4x3, dmat4x3
+#include <glm/mat4x4.hpp> // mat4, dmat4
+#include <glm/common.hpp> // all the GLSL common functions: abs, min, mix, isnan, fma, etc.
+#include <glm/exponential.hpp> // all the GLSL exponential functions: pow, log, exp2, sqrt, etc.
+#include <glm/geometric.hpp> // all the GLSL geometry functions: dot, cross, reflect, etc.
+#include <glm/integer.hpp> // all the GLSL integer functions: findMSB, bitfieldExtract, etc.
+#include <glm/matrix.hpp> // all the GLSL matrix functions: transpose, inverse, etc.
+#include <glm/packing.hpp> // all the GLSL packing functions: packUnorm4x8, unpackHalf2x16, etc.
+#include <glm/trigonometric.hpp> // all the GLSL trigonometric functions: radians, cos, asin, etc.
+#include <glm/vector_relational.hpp> // all the GLSL vector relational functions: equal, less, etc.
+```
+
+The following is a code sample using separated core headers and an extension:
+```cpp
+// Include GLM core features
+#include <glm/vec2.hpp> // vec2
+#include <glm/vec3.hpp> // vec3
+#include <glm/mat4x4.hpp> // mat4
+#include <glm/trigonometric.hpp> //radians
+
+// Include GLM extension
+#include <glm/ext/matrix_transform.hpp> // perspective, translate, rotate
+
+glm::mat4 transform(glm::vec2 const& Orientation, glm::vec3 const& Translate, glm::vec3 const& Up)
+{
+ glm::mat4 Proj = glm::perspective(glm::radians(45.f), 1.33f, 0.1f, 10.f);
+ glm::mat4 ViewTranslate = glm::translate(glm::mat4(1.f), Translate);
+ glm::mat4 ViewRotateX = glm::rotate(ViewTranslate, Orientation.y, Up);
+ glm::mat4 View = glm::rotate(ViewRotateX, Orientation.x, Up);
+ glm::mat4 Model = glm::mat4(1.0f);
+ return Proj * View * Model;
+}
+```
+
+### <a name="section1_3"></a> 1.3. Using extension headers
+
+Using GLM through split headers to minimize the project build time:
+```cpp
+// Include GLM vector extensions:
+#include <glm/ext/vector_float2.hpp> // vec2
+#include <glm/ext/vector_float3.hpp> // vec3
+#include <glm/ext/vector_trigonometric.hpp> // radians
+
+// Include GLM matrix extensions:
+#include <glm/ext/matrix_float4x4.hpp> // mat4
+#include <glm/ext/matrix_transform.hpp> // perspective, translate, rotate
+
+glm::mat4 transform(glm::vec2 const& Orientation, glm::vec3 const& Translate, glm::vec3 const& Up)
+{
+ glm::mat4 Proj = glm::perspective(glm::radians(45.f), 1.33f, 0.1f, 10.f);
+ glm::mat4 ViewTranslate = glm::translate(glm::mat4(1.f), Translate);
+ glm::mat4 ViewRotateX = glm::rotate(ViewTranslate, Orientation.y, Up);
+ glm::mat4 View = glm::rotate(ViewRotateX, Orientation.x, Up);
+ glm::mat4 Model = glm::mat4(1.0f);
+ return Proj * View * Model;
+}
+```
+
+### <a name="section1_4"></a> 1.4. Dependencies
+
+GLM does not depend on external libraries or headers such as `<GL/gl.h>`, [`<GL/glcorearb.h>`](http://www.opengl.org/registry/api/GL/glcorearb.h), `<GLES3/gl3.h>`, `<GL/glu.h>`, or `<windows.h>`.
+
+### <a name="section1_5"></a> 1.5. Finding GLM with CMake
+
+When installed, GLM provides the CMake package configuration files `glmConfig.cmake` and `glmConfigVersion.cmake`.
+
+To use these configurations files, you may need to set the `glm_DIR` variable to the directory containing the configuration files `<installation prefix>/lib/cmake/glm/`.
+
+Use the `find_package` CMake command to load the configurations into your project. Lastly, either link your executable against the `glm::glm` target or add `${GLM_INCLUDE_DIRS}` to your target's include directories:
+
+```cmake
+set(glm_DIR <installation prefix>/lib/cmake/glm) # if necessary
+find_package(glm REQUIRED)
+target_link_libraries(<your executable> glm::glm)
+```
+
+To use GLM as a submodule in a project instead, use `add_subdirectory` to expose the same target, or add the directory to your target's
+
+```cmake
+add_subdirectory(glm)
+target_link_libraries(<your executable> glm::glm)
+# or
+target_include_directories(<your executable> glm)
+```
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section2"></a> 2. Preprocessor configurations
+
+### <a name="section2_1"></a> 2.1. GLM\_FORCE\_MESSAGES: Platform auto detection and default configuration
+
+When included, GLM will first automatically detect the compiler used, the C++ standard supported, the compiler arguments used to configure itself matching the build environment.
+
+For example, if the compiler arguments request AVX code generation, GLM will rely on its code path providing AVX optimizations when available.
+
+We can change GLM configuration using specific C++ preprocessor defines that must be declared before including any GLM headers.
+
+Using `GLM_FORCE_MESSAGES`, GLM will report the configuration as part of the build log.
+
+```cpp
+#define GLM_FORCE_MESSAGES // Or defined when building (e.g. -DGLM_FORCE_SWIZZLE)
+#include <glm/glm.hpp>
+```
+
+Example of configuration log generated by `GLM_FORCE_MESSAGES`:
+```cpp
+GLM: version 0.9.9.1
+GLM: C++ 17 with extensions
+GLM: Clang compiler detected
+GLM: x86 64 bits with AVX instruction set build target
+GLM: Linux platform detected
+GLM: GLM_FORCE_SWIZZLE is undefined. swizzling functions or operators are disabled.
+GLM: GLM_FORCE_SIZE_T_LENGTH is undefined. .length() returns a glm::length_t, a typedef of int following GLSL.
+GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is undefined. Follows strictly GLSL on valid function genTypes.
+GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is undefined. Using negative one to one depth clip space.
+GLM: GLM_FORCE_LEFT_HANDED is undefined. Using right handed coordinate system.
+```
+
+The following subsections describe each configurations and defines.
+
+### <a name="section2_2"></a> 2.2. GLM\_FORCE\_PLATFORM\_UNKNOWN: Force GLM to no detect the build platform
+
+`GLM_FORCE_PLATFORM_UNKNOWN` prevents GLM from detecting the build platform.
+
+### <a name="section2_3"></a> 2.3. GLM\_FORCE\_COMPILER\_UNKNOWN: Force GLM to no detect the C++ compiler
+
+`GLM_FORCE_COMPILER_UNKNOWN` prevents GLM from detecting the C++ compiler.
+
+### <a name="section2_4"></a> 2.4. GLM\_FORCE\_ARCH\_UNKNOWN: Force GLM to no detect the build architecture
+
+`GLM_FORCE_ARCH_UNKNOWN` prevents GLM from detecting the build target architecture.
+
+### <a name="section2_5"></a> 2.5. GLM\_FORCE\_CXX\_UNKNOWN: Force GLM to no detect the C++ standard
+
+`GLM_FORCE_CSS_UNKNOWN` prevents GLM from detecting the C++ compiler standard support.
+
+### <a name="section2_6"></a> 2.6. GLM\_FORCE\_CXX**: C++ language detection
+
+GLM will automatically take advantage of compilers’ language extensions when enabled. To increase cross platform compatibility and to avoid compiler extensions, a programmer can define `GLM_FORCE_CXX98` before
+any inclusion of `<glm/glm.hpp>` to restrict the language feature set C++98:
+
+```cpp
+#define GLM_FORCE_CXX98
+#include <glm/glm.hpp>
+```
+
+For C++11, C++14, and C++17 equivalent defines are available:
+* `GLM_FORCE_CXX11`
+* `GLM_FORCE_CXX14`
+* `GLM_FORCE_CXX17`
+
+```cpp
+#define GLM_FORCE_CXX11
+#include <glm/glm.hpp>
+
+// If the compiler doesn’t support C++11, compiler errors will happen.
+```
+
+`GLM_FORCE_CXX17` overrides `GLM_FORCE_CXX14`; `GLM_FORCE_CXX14` overrides `GLM_FORCE_CXX11`; and `GLM_FORCE_CXX11` overrides `GLM_FORCE_CXX98` defines.
+
+### <a name="section2_7"></a> 2.7. GLM\_FORCE\_EXPLICIT\_CTOR: Requiring explicit conversions
+
+GLSL supports implicit conversions of vector and matrix types. For example, an ivec4 can be implicitly converted into `vec4`.
+
+Often, this behaviour is not desirable but following the spirit of the library, this is the default behavior in GLM. However, GLM 0.9.6 introduced the define `GLM_FORCE_EXPLICIT_CTOR` to require explicit conversion for GLM types.
+
+```cpp
+#include <glm/glm.hpp>
+
+void foo()
+{
+ glm::ivec4 a;
+ ...
+
+ glm::vec4 b(a); // Explicit conversion, OK
+ glm::vec4 c = a; // Implicit conversion, OK
+ ...
+}
+```
+
+With `GLM_FORCE_EXPLICIT_CTOR` define, implicit conversions are not allowed:
+
+```cpp
+#define GLM_FORCE_EXPLICIT_CTOR
+#include <glm/glm.hpp>
+
+void foo()
+{
+ glm::ivec4 a;
+ {
+ glm::vec4 b(a); // Explicit conversion, OK
+ glm::vec4 c = a; // Implicit conversion, ERROR
+ ...
+}
+```
+
+### <a name="section2_8"></a> 2.8. GLM\_FORCE\_INLINE: Force inline
+
+To push further the software performance, a programmer can define `GLM_FORCE_INLINE` before any inclusion of `<glm/glm.hpp>` to force the compiler to inline GLM code.
+
+```cpp
+#define GLM_FORCE_INLINE
+#include <glm/glm.hpp>
+```
+
+### <a name="section2_9"></a> 2.9. GLM\_FORCE\_ALIGNED\_GENTYPES: Force GLM to enable aligned types
+
+Every object type has the property called alignment requirement, which is an integer value (of type `std::size_t`, always a power of 2) representing the number of bytes between successive addresses at which objects of this type can be allocated. The alignment requirement of a type can be queried with alignof or `std::alignment_of`. The pointer alignment function `std::align` can be used to obtain a suitably-aligned pointer within some buffer, and `std::aligned_storage` can be used to obtain suitably-aligned storage.
+
+Each object type imposes its alignment requirement on every object of that type; stricter alignment (with larger alignment requirement) can be requested using C++11 `alignas`.
+
+In order to satisfy alignment requirements of all non-static members of a class, padding may be inserted after some of its members.
+
+GLM supports both packed and aligned types. Packed types allow filling data structure without inserting extra padding. Aligned GLM types align addresses based on the size of the value type of a GLM type.
+
+```cpp
+#define GLM_FORCE_ALIGNED_GENTYPES
+#include <glm/glm.hpp>
+#include <glm/gtc/type_aligned.hpp>
+
+typedef glm::aligned_vec4 vec4a;
+typedef glm::packed_vec4 vec4p;
+```
+
+### <a name="section2_10"></a> 2.10. GLM\_FORCE\_DEFAULT\_ALIGNED\_GENTYPES: Force GLM to use aligned types by default
+
+GLM allows using aligned types by default for vector types using `GLM_FORCE_DEFAULT_ALIGNED_GENTYPES`:
+
+```cpp
+#define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES
+#include <glm/glm.hpp>
+
+struct MyStruct
+{
+ glm::vec4 a;
+ float b;
+ glm::vec3 c;
+};
+
+void foo()
+{
+ printf("MyStruct requires memory padding: %d bytes\n", sizeof(MyStruct));
+}
+
+>>> MyStruct requires memory padding: 48 bytes
+```
+
+```cpp
+#include <glm/glm.hpp>
+
+struct MyStruct
+{
+ glm::vec4 a;
+ float b;
+ glm::vec3 c;
+};
+
+void foo()
+{
+ printf("MyStruct is tightly packed: %d bytes\n", sizeof(MyStruct));
+}
+
+>>> MyStruct is tightly packed: 32 bytes
+```
+
+*Note: GLM SIMD optimizations require the use of aligned types*
+
+### <a name="section2_11"></a> 2.11. GLM\_FORCE\_INTRINSICS: Using SIMD optimizations
+
+GLM provides some SIMD optimizations based on [compiler intrinsics](https://msdn.microsoft.com/en-us/library/26td21ds.aspx).
+These optimizations will be automatically thanks to compiler arguments when `GLM_FORCE_INTRINSICS` is defined before including GLM files.
+For example, if a program is compiled with Visual Studio using `/arch:AVX`, GLM will detect this argument and generate code using AVX instructions automatically when available.
+
+It’s possible to avoid the instruction set detection by forcing the use of a specific instruction set with one of the fallowing define:
+`GLM_FORCE_SSE2`, `GLM_FORCE_SSE3`, `GLM_FORCE_SSSE3`, `GLM_FORCE_SSE41`, `GLM_FORCE_SSE42`, `GLM_FORCE_AVX`, `GLM_FORCE_AVX2` or `GLM_FORCE_AVX512`.
+
+The use of intrinsic functions by GLM implementation can be avoided using the define `GLM_FORCE_PURE` before any inclusion of GLM headers. This can be particularly useful if we want to rely on C++14 `constexpr`.
+
+```cpp
+#define GLM_FORCE_PURE
+#include <glm/glm.hpp>
+
+static_assert(glm::vec4::length() == 4, "Using GLM C++ 14 constexpr support for compile time tests");
+
+// GLM code will be compiled using pure C++ code without any intrinsics
+```
+
+```cpp
+#define GLM_FORCE_SIMD_AVX2
+#include <glm/glm.hpp>
+
+// If the compiler doesn’t support AVX2 instrinsics, compiler errors will happen.
+```
+
+Additionally, GLM provides a low level SIMD API in glm/simd directory for users who are really interested in writing fast algorithms.
+
+### <a name="section2_12"></a> 2.12. GLM\_FORCE\_PRECISION\_**: Default precision
+
+C++ does not provide a way to implement GLSL default precision selection (as defined in GLSL 4.10 specification section 4.5.3) with GLSL-like syntax.
+
+```glsl
+precision mediump int;
+precision highp float;
+```
+
+To use the default precision functionality, GLM provides some defines that need to added before any include of `glm.hpp`:
+
+```cpp
+#define GLM_FORCE_PRECISION_MEDIUMP_INT
+#define GLM_FORCE_PRECISION_HIGHP_FLOAT
+#include <glm/glm.hpp>
+```
+
+Available defines for floating point types (`glm::vec\*`, `glm::mat\*`):
+
+* `GLM_FORCE_PRECISION_LOWP_FLOAT`: Low precision
+* `GLM_FORCE_PRECISION_MEDIUMP_FLOAT`: Medium precision
+* `GLM_FORCE_PRECISION_HIGHP_FLOAT`: High precision (default)
+
+Available defines for floating point types (`glm::dvec\*`, `glm::dmat\*`):
+
+* `GLM_FORCE_PRECISION_LOWP_DOUBLE`: Low precision
+* `GLM_FORCE_PRECISION_MEDIUMP_DOUBLE`: Medium precision
+* `GLM_FORCE_PRECISION_HIGHP_DOUBLE`: High precision (default)
+
+Available defines for signed integer types (`glm::ivec\*`):
+
+* `GLM_FORCE_PRECISION_LOWP_INT`: Low precision
+* `GLM_FORCE_PRECISION_MEDIUMP_INT`: Medium precision
+* `GLM_FORCE_PRECISION_HIGHP_INT`: High precision (default)
+
+Available defines for unsigned integer types (`glm::uvec\*`):
+
+* `GLM_FORCE_PRECISION_LOWP_UINT`: Low precision
+* `GLM_FORCE_PRECISION_MEDIUMP_UINT`: Medium precision
+* `GLM_FORCE_PRECISION_HIGHP_UINT`: High precision (default)
+
+### <a name="section2_13"></a> 2.13. GLM\_FORCE\_SINGLE\_ONLY: Removed explicit 64-bits floating point types
+
+Some platforms (Dreamcast) doesn't support double precision floating point values. To compile on such platforms, GCC has the `--m4-single-only` build argument. When defining `GLM_FORCE_SINGLE_ONLY` before including GLM headers, GLM releases the requirement of double precision floating point values support. Effectivement, all the float64 types are no longer defined and double behaves like float.
+
+### <a name="section2_14"></a> 2.14. GLM\_FORCE\_SWIZZLE: Enable swizzle operators
+
+Shader languages like GLSL often feature so-called swizzle expressions, which may be used to freely select and arrange a vector's components. For example, `variable.x`, `variable.xzy` and `variable.zxyy` respectively form a scalar, a 3D vector and a 4D vector. The result of a swizzle expression in GLSL can be either an R-value or an L-value. Swizzle expressions can be written with characters from exactly one of `xyzw` (usually for positions), `rgba` (usually for colors), and `stpq` (usually for texture coordinates).
+
+```glsl
+vec4 A;
+vec2 B;
+
+B.yx = A.wy;
+B = A.xx;
+vec3 C = A.bgr;
+vec3 D = B.rsz; // Invalid, won't compile
+```
+
+GLM supports some of this functionality. Swizzling can be enabled by defining `GLM_FORCE_SWIZZLE`.
+
+*Note: Enabling swizzle expressions will massively increase the size of your binaries and the time it takes to compile them!*
+
+GLM has two levels of swizzling support described in the following subsections.
+
+#### 2.14.1. Swizzle functions for standard C++ 98
+
+When compiling GLM as C++98, R-value swizzle expressions are simulated through member functions of each vector type.
+
+```cpp
+#define GLM_FORCE_SWIZZLE // Or defined when building (e.g. -DGLM_FORCE_SWIZZLE)
+#include <glm/glm.hpp>
+
+void foo()
+{
+ glm::vec4 const ColorRGBA = glm::vec4(1.0f, 0.5f, 0.0f, 1.0f);
+ glm::vec3 const ColorBGR = ColorRGBA.bgr();
+
+ glm::vec3 const PositionA = glm::vec3(1.0f, 0.5f, 0.0f);
+ glm::vec3 const PositionB = PositionXYZ.xyz() * 2.0f;
+
+ glm::vec2 const TexcoordST = glm::vec2(1.0f, 0.5f);
+ glm::vec4 const TexcoordSTPQ = TexcoordST.stst();
+}
+```
+
+Swizzle operators return a **copy** of the component values, and thus *can't* be used as L-values to change a vector's values.
+
+```cpp
+#define GLM_FORCE_SWIZZLE
+#include <glm/glm.hpp>
+
+void foo()
+{
+ glm::vec3 const A = glm::vec3(1.0f, 0.5f, 0.0f);
+
+ // No compiler error, but A is not modified.
+ // An anonymous copy is being modified (and then discarded).
+ A.bgr() = glm::vec3(2.0f, 1.5f, 1.0f); // A is not modified!
+}
+```
+
+#### 2.14.2. Swizzle operations for C++ 98 with language extensions
+
+Visual C++, GCC and Clang support, as a _non-standard language extension_, anonymous `struct`s as `union` members. This permits a powerful swizzling implementation that both allows L-value swizzle expressions and GLSL-like syntax. To use this feature, the language extension must be enabled by a supporting compiler and `GLM_FORCE_SWIZZLE` must be `#define`d.
+
+```cpp
+#define GLM_FORCE_SWIZZLE
+#include <glm/glm.hpp>
+
+// Only guaranteed to work with Visual C++!
+// Some compilers that support Microsoft extensions may compile this.
+void foo()
+{
+ glm::vec4 ColorRGBA = glm::vec4(1.0f, 0.5f, 0.0f, 1.0f);
+
+ // l-value:
+ glm::vec4 ColorBGRA = ColorRGBA.bgra;
+
+ // r-value:
+ ColorRGBA.bgra = ColorRGBA;
+
+ // Both l-value and r-value
+ ColorRGBA.bgra = ColorRGBA.rgba;
+}
+```
+
+This version returns implementation-specific objects that _implicitly convert_ to their respective vector types. As a consequence of this design, these extra types **can't be directly used** as C++ function arguments; they must be converted through constructors or `operator()`.
+
+```cpp
+#define GLM_FORCE_SWIZZLE
+#include <glm/glm.hpp>
+
+using namespace glm;
+
+void foo()
+{
+ vec4 Color = vec4(1.0f, 0.5f, 0.0f, 1.0f);
+
+ // Generates compiler errors. Color.rgba is not a vector type.
+ vec4 ClampedA = clamp(Color.rgba, 0.f, 1.f); // ERROR
+
+ // Explicit conversion through a constructor
+ vec4 ClampedB = clamp(vec4(Color.rgba), 0.f, 1.f); // OK
+
+ // Explicit conversion through operator()
+ vec4 ClampedC = clamp(Color.rgba(), 0.f, 1.f); // OK
+}
+```
+
+*Note: The implementation has a caveat: Swizzle operator types must be different on both size of the equal operator or the operation will fail. There is no known fix for this issue to date*
+
+### <a name="section2_15"></a> 2.15. GLM\_FORCE\_XYZW\_ONLY: Only exposes x, y, z and w components
+
+Following GLSL specifications, GLM supports three sets of components to access vector types member: x, y, z, w; r, g, b, a; and s, t, p, q.
+Also, this is making vector component very expressive in the code, it may make debugging vector types a little cubersom as the debuggers will typically display three time the values for each compoenents due to the existence of the three sets.
+
+To simplify vector types, GLM allows exposing only x, y, z and w components thanks to `GLM_FORCE_XYZW_ONLY` define.
+
+### <a name="section2_16"></a> 2.16. GLM\_FORCE\_LEFT\_HANDED: Force left handed coordinate system
+
+By default, OpenGL is using a right handed coordinate system. However, others APIs such as Direct3D have done different choice and relies on the left handed coordinate system.
+
+GLM allows switching the coordinate system to left handed by defining `GLM_FORCE_LEFT_HANDED`.
+
+### <a name="section2_17"></a> 2.17. GLM\_FORCE\_DEPTH\_ZERO\_TO\_ONE: Force the use of a clip space between 0 to 1
+
+By default, OpenGL is using a -1 to 1 clip space in Z-axis. However, others APIs such as Direct3D relies on a clip space between 0 to 1 in Z-axis.
+
+GLM allows switching the clip space in Z-axis to 0 to 1 by defining `GLM_FORCE_DEPTH_ZERO_TO_ONE`.
+
+### <a name="section2_18"></a> 2.18. GLM\_FORCE\_SIZE\_T\_LENGTH: Vector and matrix static size
+
+GLSL supports the member function .length() for all vector and matrix types.
+
+```cpp
+#include <glm/glm.hpp>
+
+void foo(vec4 const& v)
+{
+ int Length = v.length();
+ ...
+}
+```
+
+This function returns an `int` however this function typically interacts with STL `size_t` based code. GLM provides `GLM_FORCE_SIZE_T_LENGTH` pre-processor configuration so that member functions `length()` return a `size_t`.
+
+Additionally, GLM defines the type `glm::length_t` to identify `length()` returned type, independently from `GLM_FORCE_SIZE_T_LENGTH`.
+
+```cpp
+#define GLM_FORCE_SIZE_T_LENGTH
+#include <glm/glm.hpp>
+
+void foo(vec4 const& v)
+{
+ glm::length_t Length = v.length();
+ ...
+}
+```
+
+### <a name="section2_19"></a> 2.19. GLM\_FORCE\_UNRESTRICTED\_GENTYPE: Removing genType restriction
+
+GLSL has restrictions on types supported by certain functions that may appear excessive.
+By default, GLM follows the GLSL specification as accurately as possible however it's possible to relax these rules using `GLM_FORCE_UNRESTRICTED_GENTYPE` define.
+
+```cpp
+#include <glm/glm.hpp>
+
+float average(float const A, float const B)
+{
+ return glm::mix(A, B, 0.5f); // By default glm::mix only supports floating-point types
+}
+```
+
+By defining GLM\_FORCE\_UNRESTRICTED\_GENTYPE, we allow using integer types:
+
+```cpp
+#define GLM_FORCE_UNRESTRICTED_GENTYPE
+#include <glm/glm.hpp>
+
+int average(int const A, int const B)
+{
+ return glm::mix(A, B, 0.5f); // integers are ok thanks to GLM_FORCE_UNRESTRICTED_GENTYPE
+}
+```
+
+### <a name="section2_20"></a> 2.20. GLM\_FORCE\_SILENT\_WARNINGS: Silent C++ warnings from language extensions
+
+When using /W4 on Visual C++ or -Wpedantic on GCC, for example, the compilers will generate warnings for using C++ language extensions (/Za with Visual C++) such as anonymous struct.
+GLM relies on anonymous structs for swizzle operators and aligned vector types. To silent those warnings define `GLM_FORCE_SILENT_WARNINGS` before including GLM headers.
+
+
+### <a name="section2_21"></a> 2.21. GLM\_FORCE\_QUAT\_DATA\_WXYZ: Force GLM to store quat data as w,x,y,z instead of x,y,z,w
+
+By default GLM store quaternion components with the x, y, z, w order. `GLM_FORCE_QUAT_DATA_WXYZ` allows switching the quaternion data storage to the w, x, y, z order.
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section3"></a> 3. Stable extensions
+
+### <a name="section3_1"></a> 3.1. Scalar types
+
+#### 3.1.1. GLM_EXT_scalar_int_sized
+
+This extension exposes sized and signed integer types.
+
+Include `<glm/ext/scalar_int_sized.hpp>` to use these features.
+
+#### 3.1.2. GLM_EXT_scalar_uint_sized
+
+This extension exposes sized and unsigned integer types.
+
+```cpp
+#include <glm/ext/scalar_common.hpp>
+
+glm::uint64 pack(glm::uint32 A, glm::uint16 B, glm::uint8 C, glm::uint8 D)
+{
+ glm::uint64 ShiftA = 0;
+ glm::uint64 ShiftB = sizeof(glm::uint32) * 8;
+ glm::uint64 ShiftC = (sizeof(glm::uint32) + sizeof(glm::uint16)) * 8;
+ glm::uint64 ShiftD = (sizeof(glm::uint32) + sizeof(glm::uint16) + sizeof(glm::uint8)) * 8;
+ return (glm::uint64(A) << ShiftA) | (glm::uint64(B) << ShiftB) | (glm::uint64(C) << ShiftC) | (glm::uint64(D) << ShiftD);
+}
+```
+
+Include `<glm/ext/scalar_uint_sized.hpp>` to use these features.
+
+### <a name="section3_2"></a> 3.2. Scalar functions
+
+#### 3.2.1. GLM_EXT_scalar_common
+
+This extension exposes support for `min` and `max` functions taking more than two scalar arguments. Also, it adds `fmin` and `fmax` variants which prevents `NaN` propagation.
+
+```cpp
+#include <glm/ext/scalar_common.hpp>
+
+float positiveMax(float const a, float const b)
+{
+ return glm::fmax(a, b, 0.0f);
+}
+```
+
+Include `<glm/ext/scalar_common.hpp>` to use these features.
+
+#### 3.2.2. GLM_EXT_scalar_relational
+
+This extension exposes `equal` and `notEqual` scalar variants which takes an epsilon argument.
+
+```cpp
+#include <glm/ext/scalar_relational.hpp>
+
+bool epsilonEqual(float const a, float const b)
+{
+ float const CustomEpsilon = 0.0001f;
+ return glm::equal(a, b, CustomEpsilon);
+}
+```
+
+Include `<glm/ext/scalar_relational.hpp>` to use these features.
+
+#### 3.2.3. GLM_EXT_scalar_constants
+
+This extension exposes useful constants such as `epsilon` and `pi`.
+
+```cpp
+#include <glm/ext/scalar_constants.hpp>
+
+float circumference(float const Diameter)
+{
+ return glm::pi<float>() * Diameter;
+}
+```
+
+```cpp
+#include <glm/common.hpp> // abs
+#include <glm/ext/scalar_constants.hpp> // epsilon
+
+bool equalULP1(float const a, float const b)
+{
+ return glm::abs(a - b) <= glm::epsilon<float>();
+}
+```
+
+Include `<glm/ext/scalar_constants.hpp>` to use these features.
+
+#### 3.2.4. GLM_EXT_scalar_ulp
+
+This extension exposes function that measure of accuracy in numeric calculations.
+
+```cpp
+#include <glm/ext/scalar_ulp.hpp>
+
+bool test_ulp(float x)
+{
+ float const a = glm::next_float(x); // return a float a ULP away from the float argument.
+ return float_distance(a, x) == 1; // check both float are a single ULP away.
+}
+```
+
+Include `<glm/ext/scalar_ulp.hpp>` to use these features.
+
+### <a name="section3_3"></a> 3.3. Vector types
+
+#### 3.3.1. GLM_EXT_vector_float1
+
+This extension exposes single-precision floating point vector with 1 component: `vec1`.
+
+Include `<glm/ext/vector_float1.hpp>` to use these features.
+
+#### 3.3.2. GLM_EXT_vector_float2
+
+This extension exposes single-precision floating point vector with 2 components: `vec2`.
+
+Include `<glm/ext/vector_float2.hpp>` to use these features.
+
+#### 3.3.3. GLM_EXT_vector_float3
+
+This extension exposes single-precision floating point vector with 3 components: `vec3`.
+
+Include `<glm/ext/vector_float3.hpp>` to use these features.
+
+#### 3.3.4. GLM_EXT_vector_float4
+
+This extension exposes single-precision floating point vector with 4 components: `vec4`.
+
+Include `<glm/ext/vector_float4.hpp>` to use these features.
+
+#### 3.3.5. GLM_EXT_vector_double1
+
+This extension exposes double-precision floating point vector with 1 component: `dvec1`.
+
+Include `<glm/ext/vector_double1.hpp>` to use these features.
+
+#### 3.3.6. GLM_EXT_vector_double2
+
+This extension exposes double-precision floating point vector with 2 components: `dvec2`.
+
+Include `<glm/ext/vector_double2.hpp>` to use these features.
+
+#### 3.3.7. GLM_EXT_vector_double3
+
+This extension exposes double-precision floating point vector with 3 components: `dvec3`.
+
+Include `<glm/ext/vector_double3.hpp>` to use these features.
+
+#### 3.3.8. GLM_EXT_vector_double4
+
+This extension exposes double-precision floating point vector with 4 components: `dvec4`.
+
+Include `<glm/ext/vector_double4.hpp>` to use these features.
+
+#### 3.3.9. GLM_EXT_vector_int1
+
+This extension exposes signed integer vector with 1 component: `ivec1`.
+
+Include `<glm/ext/vector_int1.hpp>` to use these features.
+
+#### 3.3.10. GLM_EXT_vector_int2
+
+This extension exposes signed integer vector with 2 components: `ivec2`.
+
+Include `<glm/ext/vector_int2.hpp>` to use these features.
+
+#### 3.3.11. GLM_EXT_vector_int3
+
+This extension exposes signed integer vector with 3 components: `ivec3`.
+
+Include `<glm/ext/vector_int3.hpp>` to use these features.
+
+#### 3.3.12. GLM_EXT_vector_int4
+
+This extension exposes signed integer vector with 4 components: `ivec4`.
+
+Include `<glm/ext/vector_int4.hpp>` to use these features.
+
+#### 3.3.13. GLM_EXT_vector_int1
+
+This extension exposes unsigned integer vector with 1 component: `uvec1`.
+
+Include `<glm/ext/vector_uint1.hpp>` to use these features.
+
+#### 3.3.14. GLM_EXT_vector_uint2
+
+This extension exposes unsigned integer vector with 2 components: `uvec2`.
+
+Include `<glm/ext/vector_uint2.hpp>` to use these features.
+
+#### 3.3.15. GLM_EXT_vector_uint3
+
+This extension exposes unsigned integer vector with 3 components: `uvec3`.
+
+Include `<glm/ext/vector_uint3.hpp>` to use these features.
+
+#### 3.3.16. GLM_EXT_vector_uint4
+
+This extension exposes unsigned integer vector with 4 components: `uvec4`.
+
+Include `<glm/ext/vector_uint4.hpp>` to use these features.
+
+#### 3.3.17. GLM_EXT_vector_bool1
+
+This extension exposes boolean vector with 1 component: `bvec1`.
+
+Include `<glm/ext/vector_bool1.hpp>` to use these features.
+
+#### 3.3.18. GLM_EXT_vector_bool2
+
+This extension exposes boolean vector with 2 components: `bvec2`.
+
+Include `<glm/ext/vector_bool2.hpp>` to use these features.
+
+#### 3.3.19. GLM_EXT_vector_bool3
+
+This extension exposes boolean vector with 3 components: `bvec3`.
+
+Include `<glm/ext/vector_bool3.hpp>` to use these features.
+
+#### 3.3.20. GLM_EXT_vector_bool4
+
+This extension exposes boolean vector with 4 components: `bvec4`.
+
+Include `<glm/ext/vector_bool4.hpp>` to use these features.
+
+### <a name="section3_4"></a> 3.4. Vector types with precision qualifiers
+
+#### 3.4.1. GLM_EXT_vector_float1_precision
+
+This extension exposes single-precision floating point vector with 1 component using various precision in term of ULPs: `lowp_vec1`, `mediump_vec1` and `highp_vec1`.
+
+Include `<glm/ext/vector_float1_precision.hpp>` to use these features.
+
+#### 3.4.2. GLM_EXT_vector_float2_precision
+
+This extension exposes single-precision floating point vector with 2 components using various precision in term of ULPs: `lowp_vec2`, `mediump_vec2` and `highp_vec2`.
+
+Include `<glm/ext/vector_float2_precision.hpp>` to use these features.
+
+#### 3.4.3. GLM_EXT_vector_float3_precision
+
+This extension exposes single-precision floating point vector with 3 components using various precision in term of ULPs: `lowp_vec3`, `mediump_vec3` and `highp_vec3`.
+
+Include `<glm/ext/vector_float3_precision.hpp>` to use these features.
+
+#### 3.4.4. GLM_EXT_vector_float4_precision
+
+This extension exposes single-precision floating point vector with 4 components using various precision in term of ULPs: `lowp_vec4`, `mediump_vec4` and `highp_vec4`.
+
+Include `<glm/ext/vector_float4_precision.hpp>` to use these features.
+
+#### 3.4.5. GLM_EXT_vector_double1_precision
+
+This extension exposes double-precision floating point vector with 1 component using various precision in term of ULPs: `lowp_dvec1`, `mediump_dvec1` and `highp_dvec1`.
+
+Include `<glm/ext/vector_double1_precision.hpp>` to use these features.
+
+#### 3.4.6. GLM_EXT_vector_double2_precision
+
+This extension exposes double-precision floating point vector with 2 components using various precision in term of ULPs: `lowp_dvec2`, `mediump_dvec2` and `highp_dvec2`.
+
+Include `<glm/ext/vector_double2_precision.hpp>` to use these features.
+
+#### 3.4.7. GLM_EXT_vector_double3_precision
+
+This extension exposes double-precision floating point vector with 3 components using various precision in term of ULPs: `lowp_dvec3`, `mediump_dvec3` and `highp_dvec3`.
+
+Include `<glm/ext/vector_double3_precision.hpp>` to use these features.
+
+#### 3.4.8. GLM_EXT_vector_double4_precision
+
+This extension exposes double-precision floating point vector with 4 components using various precision in term of ULPs: `lowp_dvec4`, `mediump_dvec4` and `highp_dvec4`.
+
+Include `<glm/ext/vector_double4_precision.hpp>` to use these features.
+
+### <a name="section3_4"></a> 3.5. Vector functions
+
+#### 3.5.1. GLM_EXT_vector_common
+
+This extension exposes support for `min` and `max` functions taking more than two vector arguments. Also, it adds `fmin` and `fmax` variants which prevents `NaN` propagation.
+
+```cpp
+#include <glm/ext/vector_float2.hpp> // vec2
+#include <glm/ext/vector_common.hpp> // fmax
+
+float positiveMax(float const a, float const b)
+{
+ return glm::fmax(a, b, 0.0f);
+}
+```
+
+Include `<glm/ext/vector_common.hpp>` to use these features.
+
+#### 3.5.2. GLM_EXT_vector_relational
+
+This extension exposes `equal` and `notEqual` vector variants which takes an epsilon argument.
+
+```cpp
+#include <glm/ext/vector_float2.hpp> // vec2
+#include <glm/ext/vector_relational.hpp> // equal, all
+
+bool epsilonEqual(glm::vec2 const& A, glm::vec2 const& B)
+{
+ float const CustomEpsilon = 0.0001f;
+ return glm::all(glm::equal(A, B, CustomEpsilon));
+}
+```
+
+Include `<glm/ext/vector_relational.hpp>` to use these features.
+
+#### 3.5.3. GLM_EXT_vector_ulp
+
+This extension exposes function that measure of accuracy in numeric calculations.
+
+```cpp
+#include <glm/ext/vector_ulp.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/vector_int4.hpp>
+
+bool test_ulp(glm::vec4 const& x)
+{
+ glm::vec4 const a = glm::next_float(x); // return a float a ULP away from the float argument.
+ return glm::all(float_distance(a, x) == glm::ivec4(1)); // check both float are a single ULP away.
+}
+```
+
+Include `<glm/ext/vector_ulp.hpp>` to use these features.
+
+### <a name="section3_6"></a> 3.6. Matrix types
+
+#### 3.6.1. GLM_EXT_matrix_float2x2
+
+This extension exposes single-precision floating point vector with 2 columns by 2 rows: `mat2x2`.
+
+Include `<glm/ext/matrix_float2x2.hpp>` to use these features.
+
+#### 3.6.2. GLM_EXT_matrix_float2x3
+
+This extension exposes single-precision floating point vector with 2 columns by 3 rows: `mat2x3`.
+
+Include `<glm/ext/matrix_float2x3.hpp>` to use these features.
+
+#### 3.6.3. GLM_EXT_matrix_float2x4
+
+This extension exposes single-precision floating point vector with 2 columns by 4 rows: `mat2x4`.
+
+Include `<glm/ext/matrix_float2x4.hpp>` to use these features.
+
+#### 3.6.4. GLM_EXT_matrix_float3x2
+
+This extension exposes single-precision floating point vector with 3 columns by 2 rows: `mat3x2`.
+
+Include `<glm/ext/matrix_float3x2.hpp>` to use these features.
+
+#### 3.6.5. GLM_EXT_matrix_float3x3
+
+This extension exposes single-precision floating point vector with 3 columns by 3 rows: `mat3x3`.
+
+Include `<glm/ext/matrix_float3x3.hpp>` to use these features.
+
+#### 3.6.6. GLM_EXT_matrix_float3x4
+
+This extension exposes single-precision floating point vector with 3 columns by 4 rows: `mat3x4`.
+
+Include `<glm/ext/matrix_float3x4.hpp>` to use these features.
+
+#### 3.6.7. GLM_EXT_matrix_float4x2
+
+This extension exposes single-precision floating point vector with 4 columns by 2 rows: `mat4x2`.
+
+Include `<glm/ext/matrix_float4x2.hpp>` to use these features.
+
+#### 3.6.8. GLM_EXT_matrix_float4x3
+
+This extension exposes single-precision floating point vector with 4 columns by 3 rows: `mat4x3`.
+
+Include `<glm/ext/matrix_float4x3.hpp>` to use these features.
+
+#### 3.6.9. GLM_EXT_matrix_float4x4
+
+This extension exposes single-precision floating point vector with 4 columns by 4 rows: `mat4x4`.
+
+Include `<glm/ext/matrix_float4x4.hpp>` to use these features.
+
+#### 3.6.10. GLM_EXT_matrix_double2x2
+
+This extension exposes double-precision floating point vector with 2 columns by 2 rows: `dmat2x2`.
+
+Include `<glm/ext/matrix_double2x2.hpp>` to use these features.
+
+#### 3.6.11. GLM_EXT_matrix_double2x3
+
+This extension exposes double-precision floating point vector with 2 columns by 3 rows: `dmat2x3`.
+
+Include `<glm/ext/matrix_double2x3.hpp>` to use these features.
+
+#### 3.6.12. GLM_EXT_matrix_double2x4
+
+This extension exposes double-precision floating point vector with 2 columns by 4 rows: `dmat2x4`.
+
+Include `<glm/ext/matrix_double2x4.hpp>` to use these features.
+
+#### 3.6.13. GLM_EXT_matrix_double3x2
+
+This extension exposes double-precision floating point vector with 3 columns by 2 rows: `dmat3x2`.
+
+Include `<glm/ext/matrix_double3x2.hpp>` to use these features.
+
+#### 3.6.14. GLM_EXT_matrix_double3x3
+
+This extension exposes double-precision floating point vector with 3 columns by 3 rows: `dmat3x3`.
+
+Include `<glm/ext/matrix_double3x3.hpp>` to use these features.
+
+#### 3.6.15. GLM_EXT_matrix_double3x4
+
+This extension exposes double-precision floating point vector with 3 columns by 4 rows: `dmat3x4`.
+
+Include `<glm/ext/matrix_double3x4.hpp>` to use these features.
+
+#### 3.6.16. GLM_EXT_matrix_double4x2
+
+This extension exposes double-precision floating point vector with 4 columns by 2 rows: `dmat4x2`.
+
+Include `<glm/ext/matrix_double4x2.hpp>` to use these features.
+
+#### 3.6.17. GLM_EXT_matrix_double4x3
+
+This extension exposes double-precision floating point vector with 4 columns by 3 rows: `dmat4x3`.
+
+Include `<glm/ext/matrix_double4x3.hpp>` to use these features.
+
+#### 3.6.18. GLM_EXT_matrix_double4x4
+
+This extension exposes double-precision floating point vector with 4 columns by 4 rows: `dmat4x4`.
+
+Include `<glm/ext/matrix_double4x4.hpp>` to use these features.
+
+### <a name="section3_7"></a> 3.7. Matrix types with precision qualifiers
+
+#### 3.7.1. GLM_EXT_matrix_float2x2_precision
+
+This extension exposes single-precision floating point vector with 2 columns by 2 rows using various precision in term of ULPs: `lowp_mat2x2`, `mediump_mat2x2` and `highp_mat2x2`.
+
+Include `<glm/ext/matrix_float2x2_precision.hpp>` to use these features.
+
+#### 3.7.2. GLM_EXT_matrix_float2x3_precision
+
+This extension exposes single-precision floating point vector with 2 columns by 3 rows using various precision in term of ULPs: `lowp_mat2x3`, `mediump_mat2x3` and `highp_mat2x3`.
+
+Include `<glm/ext/matrix_float2x3_precision.hpp>` to use these features.
+
+#### 3.7.3. GLM_EXT_matrix_float2x4_precision
+
+This extension exposes single-precision floating point vector with 2 columns by 4 rows using various precision in term of ULPs: `lowp_mat2x4`, `mediump_mat2x4` and `highp_mat2x4`.
+
+Include `<glm/ext/matrix_float2x4_precision.hpp>` to use these features.
+
+#### 3.7.4. GLM_EXT_matrix_float3x2_precision
+
+This extension exposes single-precision floating point vector with 3 columns by 2 rows using various precision in term of ULPs: `lowp_mat3x2`, `mediump_mat3x2` and `highp_mat3x2`.
+
+Include `<glm/ext/matrix_float3x2_precision.hpp>` to use these features.
+
+#### 3.7.5. GLM_EXT_matrix_float3x3_precision
+
+This extension exposes single-precision floating point vector with 3 columns by 3 rows using various precision in term of ULPs: `lowp_mat3x3`, `mediump_mat3x3` and `highp_mat3x3`.
+
+Include `<glm/ext/matrix_float3x3_precision.hpp>` to use these features.
+
+#### 3.7.6. GLM_EXT_matrix_float3x4_precision
+
+This extension exposes single-precision floating point vector with 3 columns by 4 rows using various precision in term of ULPs: `lowp_mat3x4`, `mediump_mat3x4` and `highp_mat3x4`.
+
+Include `<glm/ext/matrix_float3x4_precision.hpp>` to use these features.
+
+#### 3.7.7. GLM_EXT_matrix_float4x2_precision
+
+This extension exposes single-precision floating point vector with 4 columns by 2 rows using various precision in term of ULPs: `lowp_mat4x2`, `mediump_mat4x2` and `highp_mat4x2`.
+
+Include `<glm/ext/matrix_float4x2_precision.hpp>` to use these features.
+
+#### 3.7.8. GLM_EXT_matrix_float4x3_precision
+
+This extension exposes single-precision floating point vector with 4 columns by 3 rows using various precision in term of ULPs: `lowp_mat4x3`, `mediump_mat4x3` and `highp_mat4x3`.
+
+Include `<glm/ext/matrix_float4x3_precision.hpp>` to use these features.
+
+#### 3.7.9. GLM_EXT_matrix_float4x4_precision
+
+This extension exposes single-precision floating point vector with 4 columns by 4 rows using various precision in term of ULPs: `lowp_mat4x4`, `mediump_mat4x4` and `highp_mat4x4`.
+
+Include `<glm/ext/matrix_float4x4_precision.hpp>` to use these features.
+
+#### 3.7.10. GLM_EXT_matrix_double2x2_precision
+
+This extension exposes double-precision floating point vector with 2 columns by 2 rows using various precision in term of ULPs: `lowp_dmat2x2`, `mediump_dmat2x2` and `highp_dmat2x2`.
+
+Include `<glm/ext/matrix_double2x2_precision.hpp>` to use these features.
+
+#### 3.7.11. GLM_EXT_matrix_double2x3_precision
+
+This extension exposes double-precision floating point vector with 2 columns by 3 rows using various precision in term of ULPs: `lowp_dmat2x3`, `mediump_dmat2x3` and `highp_dmat2x3`.
+
+Include `<glm/ext/matrix_double2x3_precision.hpp>` to use these features.
+
+#### 3.7.12. GLM_EXT_matrix_double2x4_precision
+
+This extension exposes double-precision floating point vector with 2 columns by 4 rows using various precision in term of ULPs: `lowp_dmat2x4`, `mediump_dmat2x4` and `highp_dmat2x4`.
+
+Include `<glm/ext/matrix_double2x4_precision.hpp>` to use these features.
+
+#### 3.7.13. GLM_EXT_matrix_double3x2_precision
+
+This extension exposes double-precision floating point vector with 3 columns by 2 rows using various precision in term of ULPs: `lowp_dmat3x2`, `mediump_dmat3x2` and `highp_dmat3x2`.
+
+Include `<glm/ext/matrix_double3x2_precision.hpp>` to use these features.
+
+#### 3.7.14. GLM_EXT_matrix_double3x3_precision
+
+This extension exposes double-precision floating point vector with 3 columns by 3 rows using various precision in term of ULPs: `lowp_dmat3x3`, `mediump_dmat3x3` and `highp_dmat3x3`.
+
+Include `<glm/ext/matrix_double3x3_precision.hpp>` to use these features.
+
+#### 3.7.15. GLM_EXT_matrix_double3x4_precision
+
+This extension exposes double-precision floating point vector with 3 columns by 4 rows using various precision in term of ULPs: `lowp_dmat3x4`, `mediump_dmat3x4` and `highp_dmat3x4`.
+
+Include `<glm/ext/matrix_double3x4_precision.hpp>` to use these features.
+
+#### 3.7.16. GLM_EXT_matrix_double4x2_precision
+
+This extension exposes double-precision floating point vector with 4 columns by 2 rows using various precision in term of ULPs: `lowp_dmat4x2`, `mediump_dmat4x2` and `highp_dmat4x2`.
+
+Include `<glm/ext/matrix_double4x2_precision.hpp>` to use these features.
+
+#### 3.7.17. GLM_EXT_matrix_double4x3_precision
+
+This extension exposes double-precision floating point vector with 4 columns by 3 rows using various precision in term of ULPs: `lowp_dmat4x3`, `mediump_dmat4x3` and `highp_dmat4x3`.
+
+Include `<glm/ext/matrix_double4x3_precision.hpp>` to use these features.
+
+#### 3.7.18. GLM_EXT_matrix_double4x4_precision
+
+This extension exposes double-precision floating point vector with 4 columns by 4 rows using various precision in term of ULPs: `lowp_dmat4x4`, `mediump_dmat4x4` and `highp_dmat4x4`.
+
+Include `<glm/ext/matrix_double4x4_precision.hpp>` to use these features.
+
+### <a name="section3_8"></a> 3.8. Matrix functions
+
+#### 3.8.1. GLM_EXT_matrix_relational
+
+This extension exposes `equal` and `notEqual` matrix variants which takes an optional epsilon argument.
+
+```cpp
+#include <glm/ext/vector_bool4.hpp> // bvec4
+#include <glm/ext/matrix_float4x4.hpp> // mat4
+#include <glm/ext/matrix_relational.hpp> // equal, all
+
+bool epsilonEqual(glm::mat4 const& A, glm::mat4 const& B)
+{
+ float const CustomEpsilon = 0.0001f;
+ glm::bvec4 const ColumnEqual = glm::equal(A, B, CustomEpsilon); // Evaluation per column
+ return glm::all(ColumnEqual);
+}
+```
+
+Include `<glm/ext/matrix_relational.hpp>` to use these features.
+
+#### 3.8.2. GLM_EXT_matrix_transform
+
+This extension exposes matrix transformation functions: `translate`, `rotate` and `scale`.
+
+```cpp
+#include <glm/ext/vector_float2.hpp> // vec2
+#include <glm/ext/vector_float3.hpp> // vec3
+#include <glm/ext/matrix_float4x4.hpp> // mat4x4
+#include <glm/ext/matrix_transform.hpp> // translate, rotate, scale, identity
+
+glm::mat4 computeModelViewMatrix(float Translate, glm::vec2 const & Rotate)
+{
+ glm::mat4 View = glm::translate(glm::identity(), glm::vec3(0.0f, 0.0f, -Translate));
+ View = glm::rotate(View, Rotate.y, glm::vec3(-1.0f, 0.0f, 0.0f));
+ View = glm::rotate(View, Rotate.x, glm::vec3(0.0f, 1.0f, 0.0f));
+ glm::mat4 Model = glm::scale(glm::identity(), glm::vec3(0.5f));
+ return View * Model;
+}
+```
+
+Include `<glm/ext/matrix_transform.hpp>` to use these features.
+
+#### 3.8.3. GLM_EXT_matrix_clip_space
+
+This extension exposes functions to transform scenes into the clip space.
+
+```cpp
+#include <glm/ext/matrix_float4x4.hpp> // mat4x4
+#include <glm/ext/matrix_clip_space.hpp> // perspective
+#include <glm/trigonometric.hpp> // radians
+
+glm::mat4 computeProjection(float Width, float Height)
+{
+ return glm::perspective(glm::radians(45.0f), Width / Height, 0.1f, 100.f);
+}
+```
+
+Include `<glm/ext/matrix_clip_space.hpp>` to use these features.
+
+#### 3.8.4. GLM_EXT_matrix_projection
+
+This extension exposes functions to map object coordinates into window coordinates and reverse
+
+Include `<glm/ext/matrix_projection.hpp>` to use these features.
+
+### <a name="section3_9"></a> 3.9. Quaternion types
+
+#### 3.9.1. GLM_EXT_quaternion_float
+
+This extension exposes single-precision floating point quaternion: `quat`.
+
+Include `<glm/ext/quaternion_float.hpp>` to use these features.
+
+#### 3.9.2. GLM_EXT_quaternion_double
+
+This extension exposes double-precision floating point quaternion: `dquat`.
+
+Include `<glm/ext/quaternion_double.hpp>` to use these features.
+
+### <a name="section3_10"></a> 3.10. Quaternion types with precision qualifiers
+
+#### 3.10.1. GLM_EXT_quaternion_float_precision
+
+This extension exposes single-precision floating point quaternion using various precision in term of ULPs: `lowp_quat`, `mediump_quat` and `highp_quat`.
+
+Include `<glm/ext/quaternion_float_precision.hpp>` to use these features.
+
+#### 3.10.2. GLM_EXT_quaternion_double_precision
+
+This extension exposes double-precision floating point quaternion using various precision in term of ULPs: `lowp_dquat`, `mediump_dquat` and `highp_dquat`.
+
+Include `<glm/ext/quaternion_double_precision.hpp>` to use these features.
+
+### <a name="section3_11"></a> 3.11. Quaternion functions
+
+#### 3.11.1. GLM_EXT_quaternion_common
+
+This extension exposes common quaternion functions such as `slerp`, `conjugate` and `inverse`.
+
+Include `<glm/ext/quaternion_common.hpp>` to use these features.
+
+#### 3.11.2. GLM_EXT_quaternion_geometric
+
+This extension exposes geometric quaternion functions such as `length`, `normalize`, `dot` and `cross`.
+
+Include `<glm/ext/quaternion_geometric.hpp>` to use these features.
+
+#### 3.11.3. GLM_EXT_quaternion_trigonometric
+
+This extension exposes trigonometric quaternion functions such as `angle` and `axis`.
+
+Include `<glm/ext/quaternion_trigonometric.hpp>` to use these features.
+
+#### 3.11.4. GLM_EXT_quaternion_exponential
+
+This extensions expose exponential functions for quaternions such as `exp`, `log`, `pow` and `sqrt`.
+
+Include `<glm/ext/quaternion_exponential.hpp>` to use these features.
+
+#### 3.11.5. GLM_EXT_quaternion_relational
+
+This extension exposes relational functions to compare quaternions.
+
+Include `<glm/ext/quaternion_relational.hpp>` to use these features.
+
+#### 3.11.6. GLM_EXT_quaternion_transform
+
+This extension exposes functions to transform objects.
+
+Include `<glm/ext/quaternion_transform.hpp>` to use these features.
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section4"></a> 4. Recommended extensions
+
+GLM extends the core GLSL feature set with extensions. These extensions include: quaternion, transformation, spline, matrix inverse, color spaces, etc.
+
+To include an extension, we only need to include the dedicated header file. Once included, the features are added to the GLM namespace.
+
+```cpp
+#include <glm/glm.hpp>
+#include <glm/gtc/matrix_transform.hpp>
+
+int foo()
+{
+ glm::vec4 Position = glm::vec4(glm:: vec3(0.0f), 1.0f);
+ glm::mat4 Model = glm::translate(glm::mat4(1.0f), glm::vec3(1.0f));
+
+ glm::vec4 Transformed = Model * Position;
+ ...
+
+ return 0;
+}
+```
+
+When an extension is included, all the dependent core functionalities and extensions will be included as well.
+
+### <a name="section4_1"></a> 4.1. GLM_GTC_bitfield
+
+Fast bitfield operations on scalar and vector variables.
+
+`<glm/gtc/bitfield.hpp>` need to be included to use these features.
+
+### <a name="section4_2"></a> 4.2. GLM_GTC_color_space
+
+Conversion between linear RGB and sRGB color spaces.
+
+`<glm/gtc/color_space.hpp>` need to be included to use these features.
+
+### <a name="section4_3"></a> 4.3. GLM_GTC_constants
+
+Provide a list of built-in constants.
+
+`<glm/gtc/constants.hpp>` need to be included to use these features.
+
+### <a name="section4_4"></a> 4.4. GLM\_GTC\_epsilon
+
+Approximate equality comparisons for floating-point numbers, possibly with a user-defined epsilon.
+
+`<glm/gtc/epsilon.hpp>` need to be included to use these features.
+
+### <a name="section4_5"></a> 4.5. GLM\_GTC\_integer
+
+Integer variants of core GLM functions.
+
+`<glm/gtc/integer.hpp>` need to be included to use these features.
+
+### <a name="section4_6"></a> 4.6. GLM\_GTC\_matrix\_access
+
+Functions to conveniently access the individual rows or columns of a matrix.
+
+`<glm/gtc/matrix_access.hpp>` need to be included to use these features.
+
+### <a name="section4_7"></a> 4.7. GLM\_GTC\_matrix\_integer
+
+Integer matrix types similar to the core floating-point matrices. Some operations (such as inverse and determinant) are not supported.
+
+`<glm/gtc/matrix_integer.hpp>` need to be included to use these features.
+
+### <a name="section4_8"></a> 4.8. GLM\_GTC\_matrix\_inverse
+
+Additional matrix inverse functions.
+
+`<glm/gtc/matrix_inverse.hpp>` need to be included to use these features.
+
+### <a name="section4_9"></a> 4.9. GLM\_GTC\_matrix\_transform
+
+Matrix transformation functions that follow the OpenGL fixed-function conventions.
+
+For example, the `lookAt` function generates a transformation matrix that projects world coordinates into eye coordinates suitable for projection matrices (e.g. `perspective`, `ortho`). See the OpenGL compatibility specifications for more information about the layout of these generated matrices.
+
+The matrices generated by this extension use standard OpenGL fixed-function conventions. For example, the `lookAt` function generates a transform from world space into the specific eye space that the
+projective matrix functions (`perspective`, `ortho`, etc) are designed to expect. The OpenGL compatibility specifications define the particular layout of this eye space.
+
+`<glm/gtc/matrix_transform.hpp>` need to be included to use these features.
+
+### <a name="section4_10"></a> 4.10. GLM\_GTC\_noise
+
+Define 2D, 3D and 4D procedural noise functions.
+
+<`glm/gtc/noise.hpp>` need to be included to use these features.
+
+![](/doc/manual/noise-simplex1.jpg)
+
+Figure 4.10.1: glm::simplex(glm::vec2(x / 16.f, y / 16.f));
+
+![](/doc/manual/noise-simplex2.jpg)
+
+Figure 4.10.2: glm::simplex(glm::vec3(x / 16.f, y / 16.f, 0.5f));
+
+![](/doc/manual/noise-simplex3.jpg)
+
+Figure 4.10.3: glm::simplex(glm::vec4(x / 16.f, y / 16.f, 0.5f, 0.5f));
+
+![](/doc/manual/noise-perlin1.jpg)
+
+Figure 4.10.4: glm::perlin(glm::vec2(x / 16.f, y / 16.f));
+
+![](/doc/manual/noise-perlin2.jpg)
+
+Figure 4.10.5: glm::perlin(glm::vec3(x / 16.f, y / 16.f, 0.5f));
+
+![](/doc/manual/noise-perlin3.jpg)
+
+Figure 4.10.6: glm::perlin(glm::vec4(x / 16.f, y / 16.f, 0.5f, 0.5f)));
+
+![](/doc/manual/noise-perlin4.png)
+
+Figure 4.10.7: glm::perlin(glm::vec2(x / 16.f, y / 16.f), glm::vec2(2.0f));
+
+![](/doc/manual/noise-perlin5.png)
+
+Figure 4.10.8: glm::perlin(glm::vec3(x / 16.f, y / 16.f, 0.5f), glm::vec3(2.0f));
+
+![](/doc/manual/noise-perlin6.png)
+
+Figure 4.10.9: glm::perlin(glm::vec4(x / 16.f, y / 16.f, glm::vec2(0.5f)), glm::vec4(2.0f));
+
+### <a name="section4_11"></a> 4.11. GLM\_GTC\_packing
+
+Convert scalar and vector types to and from packed formats, saving space at the cost of precision. However, packing a value into a format that it was previously unpacked from is guaranteed to be lossless.
+
+`<glm/gtc/packing.hpp>` need to be included to use these features.
+
+### <a name="section4_12"></a> 4.12. GLM\_GTC\_quaternion
+
+Quaternions and operations upon thereof.
+
+`<glm/gtc/quaternion.hpp>` need to be included to use these features.
+
+### <a name="section4_13"></a> 4.13. GLM\_GTC\_random
+
+Probability distributions in up to four dimensions.
+
+`<glm/gtc/random.hpp>` need to be included to use these features.
+
+![](/doc/manual/random-linearrand.png)
+
+Figure 4.13.1: glm::vec4(glm::linearRand(glm::vec2(-1), glm::vec2(1)), 0, 1);
+
+![](/doc/manual/random-circularrand.png)
+
+Figure 4.13.2: glm::vec4(glm::circularRand(1.0f), 0, 1);
+
+![](/doc/manual/random-sphericalrand.png)
+
+Figure 4.13.3: glm::vec4(glm::sphericalRand(1.0f), 1);
+
+![](/doc/manual/random-diskrand.png)
+
+Figure 4.13.4: glm::vec4(glm::diskRand(1.0f), 0, 1);
+
+![](/doc/manual/random-ballrand.png)
+
+Figure 4.13.5: glm::vec4(glm::ballRand(1.0f), 1);
+
+![](/doc/manual/random-gaussrand.png)
+
+Figure 4.13.6: glm::vec4(glm::gaussRand(glm::vec3(0), glm::vec3(1)), 1);
+
+### <a name="section4_14"></a> 4.14. GLM\_GTC\_reciprocal
+
+Reciprocal trigonometric functions (e.g. secant, cosecant, tangent).
+
+`<glm/gtc/reciprocal.hpp>` need to be included to use the features of this extension.
+
+### <a name="section4_15"></a> 4.15. GLM\_GTC\_round
+
+Various rounding operations and common special cases thereof.
+
+`<glm/gtc/round.hpp>` need to be included to use the features of this extension.
+
+### <a name="section4_16"></a> 4.16. GLM\_GTC\_type\_aligned
+
+Aligned vector types.
+
+`<glm/gtc/type_aligned.hpp>` need to be included to use the features of this extension.
+
+### <a name="section4_17"></a> 4.17. GLM\_GTC\_type\_precision
+
+Vector and matrix types with defined precisions, e.g. `i8vec4`, which is a 4D vector of signed 8-bit integers.
+
+`<glm/gtc/type\_precision.hpp>` need to be included to use the features of this extension.
+
+### <a name="section4_18"></a> 4.18. GLM\_GTC\_type\_ptr
+
+Facilitate interactions between pointers to basic types (e.g. `float*`) and GLM types (e.g. `mat4`).
+
+This extension defines an overloaded function, `glm::value_ptr`, which returns a pointer to the memory layout of any GLM vector or matrix (`vec3`, `mat4`, etc.). Matrix types store their values in column-major order. This is useful for uploading data to matrices or for copying data to buffer objects.
+
+```cpp
+// GLM_GTC_type_ptr provides a safe solution:
+#include <glm/glm.hpp>
+#include <glm/gtc/type_ptr.hpp>
+
+void foo()
+{
+ glm::vec4 v(0.0f);
+ glm::mat4 m(1.0f);
+ ...
+ glVertex3fv(glm::value_ptr(v))
+ glLoadMatrixfv(glm::value_ptr(m));
+}
+
+// Another solution, this one inspired by the STL:
+#include <glm/glm.hpp>
+
+void foo()
+{
+ glm::vec4 v(0.0f);
+ glm::mat4 m(1.0f);
+ ...
+ glVertex3fv(&v[0]);
+ glLoadMatrixfv(&m[0][0]);
+}
+```
+
+*Note: It would be possible to implement [`glVertex3fv`](http://www.opengl.org/sdk/docs/man2/xhtml/glVertex.xml)(glm::vec3(0)) in C++ with the appropriate cast operator that would result as an
+implicit cast in this example. However cast operators may produce programs running with unexpected behaviours without build error or any form of notification. *
+
+`<glm/gtc/type_ptr.hpp>` need to be included to use these features.
+
+### <a name="section4_19"></a> 4.19. GLM\_GTC\_ulp
+
+Measure a function's accuracy given a reference implementation of it. This extension works on floating-point data and provides results in [ULP](http://ljk.imag.fr/membres/Carine.Lucas/TPScilab/JMMuller/ulp-toms.pdf).
+
+`<glm/gtc/ulp.hpp>` need to be included to use these features.
+
+### <a name="section4_20"></a> 4.20. GLM\_GTC\_vec1
+
+Add \*vec1 types.
+
+`<glm/gtc/vec1.hpp>` need to be included to use these features.
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section5"></a> 5. OpenGL interoperability
+
+### <a name="section5_1"></a> 5.1. GLM replacements for deprecated OpenGL functions
+
+OpenGL 3.1 specification has deprecated some features that have been removed from OpenGL 3.2 core profile specification. GLM provides some replacement functions.
+
+[***glRotate{f, d}:***](https://www.opengl.org/sdk/docs/man2/xhtml/glRotate.xml)
+
+```cpp
+glm::mat4 glm::rotate(glm::mat4 const& m, float angle, glm::vec3 const& axis);
+glm::dmat4 glm::rotate(glm::dmat4 const& m, double angle, glm::dvec3 const& axis);
+```
+
+From `GLM_GTC_matrix_transform` extension: &lt;glm/gtc/matrix\_transform.hpp&gt;
+
+[***glScale{f, d}:***](http://www.opengl.org/sdk/docs/man2/xhtml/glScale.xml)
+
+```cpp
+glm::mat4 glm::scale(glm::mat4 const& m, glm::vec3 const& factors);
+glm::dmat4 glm::scale(glm::dmat4 const& m, glm::dvec3 const& factors);
+```
+
+From `GLM_GTC_matrix_transform` extension: &lt;glm/gtc/matrix\_transform.hpp&gt;
+
+[***glTranslate{f, d}:***](https://www.opengl.org/sdk/docs/man2/xhtml/glTranslate.xml)
+
+```cpp
+glm::mat4 glm::translate(glm::mat4 const& m, glm::vec3 const& translation);
+glm::dmat4 glm::translate(glm::dmat4 const& m, glm::dvec3 const& translation);
+```
+
+From `GLM_GTC_matrix_transform` extension: &lt;glm/gtc/matrix\_transform.hpp&gt;
+
+[***glLoadIdentity:***](https://www.opengl.org/sdk/docs/man2/xhtml/glLoadIdentity.xml)
+
+```cpp
+glm::mat4(1.0) or glm::mat4();
+glm::dmat4(1.0) or glm::dmat4();
+```
+
+From GLM core library: `<glm/glm.hpp>`
+
+[***glMultMatrix{f, d}:***](https://www.opengl.org/sdk/docs/man2/xhtml/glMultMatrix.xml)
+
+```cpp
+glm::mat4() * glm::mat4();
+glm::dmat4() * glm::dmat4();
+```
+
+From GLM core library: `<glm/glm.hpp>`
+
+[***glLoadTransposeMatrix{f, d}:***](https://www.opengl.org/sdk/docs/man2/xhtml/glLoadTransposeMatrix.xml)
+
+```cpp
+glm::transpose(glm::mat4());
+glm::transpose(glm::dmat4());
+```
+
+From GLM core library: `<glm/glm.hpp>`
+
+[***glMultTransposeMatrix{f, d}:***](https://www.opengl.org/sdk/docs/man2/xhtml/glMultTransposeMatrix.xml)
+
+```cpp
+glm::mat4() * glm::transpose(glm::mat4());
+glm::dmat4() * glm::transpose(glm::dmat4());
+```
+
+From GLM core library: `<glm/glm.hpp>`
+
+[***glFrustum:***](http://www.opengl.org/sdk/docs/man2/xhtml/glFrustum.xml)
+
+```cpp
+glm::mat4 glm::frustum(float left, float right, float bottom, float top, float zNear, float zFar);
+glm::dmat4 glm::frustum(double left, double right, double bottom, double top, double zNear, double zFar);
+```
+
+From `GLM_GTC_matrix_transform` extension: `<glm/gtc/matrix_transform.hpp>`
+
+[***glOrtho:***](https://www.opengl.org/sdk/docs/man2/xhtml/glOrtho.xml)
+
+```cpp
+glm::mat4 glm::ortho(float left, float right, float bottom, float top, float zNear, float zFar);
+glm::dmat4 glm::ortho(double left, double right, double bottom, double top, double zNear, double zFar);
+```
+
+From `GLM_GTC_matrix_transform` extension: `<glm/gtc/matrix_transform.hpp>`
+
+### <a name="section5_2"></a> 5.2. GLM replacements for GLU functions
+
+[***gluLookAt:***](https://www.opengl.org/sdk/docs/man2/xhtml/gluLookAt.xml)
+
+```cpp
+glm::mat4 glm::lookAt(glm::vec3 const& eye, glm::vec3 const& center, glm::vec3 const& up);
+glm::dmat4 glm::lookAt(glm::dvec3 const& eye, glm::dvec3 const& center, glm::dvec3 const& up);
+```
+
+From `GLM_GTC_matrix_transform` extension: `<glm/gtc/matrix_transform.hpp>`
+
+[***gluOrtho2D:***](https://www.opengl.org/sdk/docs/man2/xhtml/gluOrtho2D.xml)
+
+```cpp
+glm::mat4 glm::ortho(float left, float right, float bottom, float top);
+glm::dmat4 glm::ortho(double left, double right, double bottom, double top);
+```
+
+From `GLM_GTC_matrix_transform` extension: `<glm/gtc/matrix_transform.hpp>`
+
+[***gluPerspective:***](https://www.opengl.org/sdk/docs/man2/xhtml/gluPerspective.xml)
+
+```cpp
+glm::mat4 perspective(float fovy, float aspect, float zNear, float zFar);
+glm::dmat4 perspective(double fovy, double aspect, double zNear, double zFar);
+```
+
+Note that in GLM, fovy is expressed in radians, not degrees.
+
+From `GLM_GTC_matrix_transform` extension: `<glm/gtc/matrix_transform.hpp>`
+
+[***gluPickMatrix:***](https://www.opengl.org/sdk/docs/man2/xhtml/gluPickMatrix.xml)
+
+```cpp
+glm::mat4 pickMatrix(glm::vec2 const& center, glm::vec2 const& delta, glm::ivec4 const& viewport);
+glm::dmat4 pickMatrix(glm::dvec2 const& center, glm::dvec2 const& delta, glm::ivec4 const& viewport);
+```
+
+From `GLM_GTC_matrix_transform` extension: `<glm/gtc/matrix_transform.hpp>`
+
+[***gluProject:***](http://www.opengl.org/sdk/docs/man2/xhtml/gluProject.xml)
+
+```cpp
+glm::vec3 project(glm::vec3 const& obj, glm::mat4 const& model, glm::mat4 const& proj, glm::ivec4 const& viewport);
+glm::dvec3 project(glm::dvec3 const& obj, glm::dmat4 const& model, glm::dmat4 const& proj, glm::ivec4 const& viewport);
+```
+
+From `GLM_GTC_matrix_transform` extension: `<glm/gtc/matrix_transform.hpp>`
+
+[***gluUnProject:***](https://www.opengl.org/sdk/docs/man2/xhtml/gluUnProject.xml)
+
+```cpp
+glm::vec3 unProject(glm::vec3 const& win, glm::mat4 const& model, glm::mat4 const& proj, glm::ivec4 const& viewport);
+glm::dvec3 unProject(glm::dvec3 const& win, glm::dmat4 const& model, glm::dmat4 const& proj, glm::ivec4 const& viewport);
+```
+
+From `GLM_GTC_matrix_transform` extension: `<glm/gtc/matrix_transform.hpp>`
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section6"></a> 6. Known issues
+
+This section reports GLSL features that GLM can't accurately emulate due to language restrictions.
+
+### <a name="section6_1"></a> 6.1. not function
+
+The GLSL function 'not' is a keyword in C++. To prevent name collisions and ensure a consistent API, the name `not\_` (note the underscore) is used instead.
+
+### <a name="section6_2"></a> 6.2. Precision qualifiers support
+
+GLM supports GLSL precision qualifiers through prefixes instead of qualifiers. For example, GLM exposes \verb|lowp_vec4|, \verb|mediump_vec4| and \verb|highp_vec4| as variations of \verb|vec4|.
+
+Similarly to GLSL, GLM precision qualifiers are used to trade precision of operations in term of [ULPs](http://en.wikipedia.org/wiki/Unit_in_the_last_place) for better performance. By default, all the types use high precision.
+
+```cpp
+// Using precision qualifier in GLSL:
+
+ivec3 foo(in vec4 v)
+{
+ highp vec4 a = v;
+ mediump vec4 b = a;
+ lowp ivec3 c = ivec3(b);
+ return c;
+}
+
+// Using precision qualifier in GLM:
+
+#include <glm/glm.hpp>
+
+ivec3 foo(const vec4 & v)
+{
+ highp_vec4 a = v;
+ medium_vec4 b = a;
+ lowp_ivec3 c = glm::ivec3(b);
+ return c;
+}
+```
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section7"></a> 7. FAQ
+
+### <a name="section7_1"></a> 7.1 Why GLM follows GLSL specification and conventions?
+
+Following GLSL conventions is a really strict policy of GLM. It has been designed following the idea that everyone does its own math library with his own conventions. The idea is that brilliant developers (the OpenGL ARB) worked together and agreed to make GLSL. Following GLSL conventions
+is a way to find consensus. Moreover, basically when a developer knows GLSL, he knows GLM.
+
+### <a name="section7_2"></a> 7.2. Does GLM run GLSL program?
+
+No, GLM is a C++ implementation of a subset of GLSL.
+
+### <a name="section7_3"></a> 7.3. Does a GLSL compiler build GLM codes?
+
+No, this is not what GLM attends to do.
+
+### <a name="section7_4"></a> 7.4. Should I use ‘GTX’ extensions?
+
+GTX extensions are qualified to be experimental extensions. In GLM this means that these extensions might change from version to version without any restriction. In practice, it doesn’t really change except time to
+time. GTC extensions are stabled, tested and perfectly reliable in time. Many GTX extensions extend GTC extensions and provide a way to explore features and implementations and APIs and then are promoted to GTC
+extensions. This is fairly the way OpenGL features are developed; through extensions.
+
+Stating with GLM 0.9.9, to use experimental extensions, an application must define GLM_ENABLE_EXPERIMENTAL.
+
+### <a name="section7_5"></a> 7.5. Where can I ask my questions?
+
+A good place is [stackoverflow](http://stackoverflow.com/search?q=GLM) using the GLM tag.
+
+### <a name="section7_6"></a> 7.6. Where can I find the documentation of extensions?
+
+The Doxygen generated documentation includes a complete list of all extensions available. Explore this [*API documentation*](http://glm.g-truc.net/html/index.html) to get a complete
+view of all GLM capabilities!
+
+### <a name="section7_7"></a> 7.7. Should I use ‘using namespace glm;’?
+
+NO! Chances are that if using namespace glm; is called, especially in a header file, name collisions will happen as GLM is based on GLSL which uses common tokens for types and functions. Avoiding using namespace
+glm; will a higher compatibility with third party library and SDKs.
+
+### <a name="section7_8"></a> 7.8. Is GLM fast?
+
+GLM is mainly designed to be convenient and that's why it is written against the GLSL specification.
+
+Following the Pareto principle where 20% of the code consumes 80% of the execution time, GLM operates perfectly on the 80% of the code that consumes 20% of the performances. Furthermore, thanks to the lowp,
+mediump and highp qualifiers, GLM provides approximations which trade precision for performance. Finally, GLM can automatically produce SIMD optimized code for functions of its implementation.
+
+However, on performance critical code paths, we should expect that dedicated algorithms should be written to reach peak performance.
+
+### <a name="section7_9"></a> 7.9. When I build with Visual C++ with /W4 warning level, I have warnings...
+
+You should not have any warnings even in `/W4` mode. However, if you expect such level for your code, then you should ask for the same level to the compiler by at least disabling the Visual C++ language extensions
+(`/Za`) which generates warnings when used. If these extensions are enabled, then GLM will take advantage of them and the compiler will generate warnings.
+
+### <a name="section7_10"></a> 7.10. Why some GLM functions can crash because of division by zero?
+
+GLM functions crashing is the result of a domain error. Such behavior follows the precedent set by C and C++'s standard library. For example, it’s a domain error to pass a null vector (all zeroes) to glm::normalize function, or to pass a negative number into std::sqrt.
+
+### <a name="section7_11"></a> 7.11. What unit for angles is used in GLM?
+
+GLSL is using radians but GLU is using degrees to express angles. This has caused GLM to use inconsistent units for angles. Starting with GLM 0.9.6, all GLM functions are using radians. For more information, follow
+the [link](http://www.g-truc.net/post-0693.html#menu).
+
+### <a name="section7_12"></a> 7.12. Windows headers cause build errors...
+
+Some Windows headers define min and max as macros which may cause compatibility with third party libraries such as GLM.
+It is highly recommended to [`define NOMINMAX`](http://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c) before including Windows headers to workaround this issue.
+To workaround the incompatibility with these macros, GLM will systematically undef these macros if they are defined.
+
+### <a name="section7_13"></a> 7.13. Constant expressions support
+
+GLM has some C++ <a href="http://en.cppreference.com/w/cpp/language/constexpr">constant expressions</a> support. However, GLM automatically detects the use of SIMD instruction sets through compiler arguments to populate its implementation with SIMD intrinsics.
+Unfortunately, GCC and Clang doesn't support SIMD instrinsics as constant expressions. To allow constant expressions on all vectors and matrices types, define `GLM_FORCE_PURE` before including GLM headers.
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section8"></a> 8. Code samples
+
+This series of samples only shows various GLM features without consideration of any sort.
+
+### <a name="section8_1"></a> 8.1. Compute a triangle normal
+
+```cpp
+#include <glm/glm.hpp> // vec3 normalize cross
+
+glm::vec3 computeNormal(glm::vec3 const& a, glm::vec3 const& b, glm::vec3 const& c)
+{
+ return glm::normalize(glm::cross(c - a, b - a));
+}
+
+// A much faster but less accurate alternative:
+#include <glm/glm.hpp> // vec3 cross
+#include <glm/gtx/fast_square_root.hpp> // fastNormalize
+
+glm::vec3 computeNormal(glm::vec3 const& a, glm::vec3 const& b, glm::vec3 const& c)
+{
+ return glm::fastNormalize(glm::cross(c - a, b - a));
+}
+```
+
+### <a name="section8_2"></a> 8.2. Matrix transform
+
+```cpp
+#include <glm/glm.hpp> // vec3, vec4, ivec4, mat4
+#include <glm/gtc/matrix_transform.hpp> // translate, rotate, scale, perspective
+#include <glm/gtc/type_ptr.hpp> // value_ptr
+
+void setUniformMVP(GLuint Location, glm::vec3 const& Translate, glm::vec3 const& Rotate)
+{
+ glm::mat4 Projection = glm::perspective(45.0f, 4.0f / 3.0f, 0.1f, 100.f);
+ glm::mat4 ViewTranslate = glm::translate(
+ glm::mat4(1.0f), Translate);
+ glm::mat4 ViewRotateX = glm::rotate(
+ ViewTranslate, Rotate.y, glm::vec3(-1.0f, 0.0f, 0.0f));
+ glm::mat4 View = glm::rotate(ViewRotateX,
+ Rotate.x, glm::vec3(0.0f, 1.0f, 0.0f));
+ glm::mat4 Model = glm::scale(
+ glm::mat4(1.0f), glm::vec3(0.5f));
+ glm::mat4 MVP = Projection * View * Model;
+ glUniformMatrix4fv(Location, 1, GL_FALSE, glm::value_ptr(MVP));
+}
+```
+
+### <a name="section8_3"></a> 8.3. Vector types
+
+```cpp
+#include <glm/glm.hpp> // vec2
+#include <glm/gtc/type_precision.hpp> // hvec2, i8vec2, i32vec2
+
+std::size_t const VertexCount = 4;
+
+// Float quad geometry
+std::size_t const PositionSizeF32 = VertexCount * sizeof(glm::vec2);
+glm::vec2 const PositionDataF32[VertexCount] =
+{
+ glm::vec2(-1.0f,-1.0f),
+ glm::vec2( 1.0f,-1.0f),
+ glm::vec2( 1.0f, 1.0f),
+ glm::vec2(-1.0f, 1.0f)
+};
+
+// Half-float quad geometry
+std::size_t const PositionSizeF16 = VertexCount * sizeof(glm::hvec2);
+glm::hvec2 const PositionDataF16[VertexCount] =
+{
+ glm::hvec2(-1.0f, -1.0f),
+ glm::hvec2( 1.0f, -1.0f),
+ glm::hvec2( 1.0f, 1.0f),
+ glm::hvec2(-1.0f, 1.0f)
+};
+
+// 8 bits signed integer quad geometry
+std::size_t const PositionSizeI8 = VertexCount * sizeof(glm::i8vec2);
+glm::i8vec2 const PositionDataI8[VertexCount] =
+{
+ glm::i8vec2(-1,-1),
+ glm::i8vec2( 1,-1),
+ glm::i8vec2( 1, 1),
+ glm::i8vec2(-1, 1)
+};
+
+// 32 bits signed integer quad geometry
+std::size_t const PositionSizeI32 = VertexCount * sizeof(glm::i32vec2);
+glm::i32vec2 const PositionDataI32[VertexCount] =
+{
+ glm::i32vec2(-1,-1),
+ glm::i32vec2( 1,-1),
+ glm::i32vec2( 1, 1),
+ glm::i32vec2(-1, 1)
+};
+```
+
+### <a name="section8_4"></a> 8.4. Lighting
+
+```cpp
+#include <glm/glm.hpp> // vec3 normalize reflect dot pow
+#include <glm/gtc/random.hpp> // ballRand
+
+// vecRand3, generate a random and equiprobable normalized vec3
+glm::vec3 lighting(intersection const& Intersection, material const& Material, light const& Light, glm::vec3 const& View)
+{
+ glm::vec3 Color = glm::vec3(0.0f);
+ glm::vec3 LightVertor = glm::normalize(
+ Light.position() - Intersection.globalPosition() +
+ glm::ballRand(0.0f, Light.inaccuracy());
+
+ if(!shadow(Intersection.globalPosition(), Light.position(), LightVertor))
+ {
+ float Diffuse = glm::dot(Intersection.normal(), LightVector);
+ if(Diffuse &lt;= 0.0f)
+ return Color;
+
+ if(Material.isDiffuse())
+ Color += Light.color() * Material.diffuse() * Diffuse;
+
+ if(Material.isSpecular())
+ {
+ glm::vec3 Reflect = glm::reflect(-LightVector, Intersection.normal());
+ float Dot = glm::dot(Reflect, View);
+ float Base = Dot &gt; 0.0f ? Dot : 0.0f;
+ float Specular = glm::pow(Base, Material.exponent());
+ Color += Material.specular() \* Specular;
+ }
+ }
+
+ return Color;
+}
+```
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section9"></a> 9. Contributing to GLM
+
+### <a name="section9_1"></a> 9.1. Submitting bug reports
+
+Bug should be reported on Github using the [issue page](https://github.com/g-truc/glm/issues).
+
+A minimal code to reproduce the issue will help.
+
+Additional, bugs can be configuration specific. We can report the configuration by defining `GLM_FORCE_MESSAGES` before including GLM headers then build and copy paste the build messages GLM will output.
+
+```cpp
+#define GLM_FORCE_MESSAGES
+#include <glm/glm.hpp>
+```
+
+An example of build messages generated by GLM:
+```
+GLM: 0.9.9.1
+GLM: C++ 17 with extensions
+GLM: GCC compiler detected"
+GLM: x86 64 bits with AVX instruction set build target
+GLM: Linux platform detected
+GLM: GLM_FORCE_SWIZZLE is undefined. swizzling functions or operators are disabled.
+GLM: GLM_FORCE_SIZE_T_LENGTH is undefined. .length() returns a glm::length_t, a typedef of int following GLSL.
+GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is undefined. Follows strictly GLSL on valid function genTypes.
+GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is undefined. Using negative one to one depth clip space.
+GLM: GLM_FORCE_LEFT_HANDED is undefined. Using right handed coordinate system.
+```
+
+### <a name="section9_2"></a> 9.2. Contributing to GLM with pull request
+
+This tutorial will show us how to successfully contribute a bug-fix to GLM using GitHub's Pull Request workflow.
+
+We will be typing git commands in the Terminal. Mac and Linux users may have git pre-installed. You can download git from [here](http://git-scm.com/downloads).
+
+The tutorial assumes you have some basic understanding of git concepts - repositories, branches, commits, etc. Explaining it all from scratch is beyond the scope of this tutorial. Some good links to learn git basics are: [Link 1](http://git-scm.com/book/en/Getting-Started-Git-Basics), [Link 2](https://www.atlassian.com/git/tutorial/git-basics)
+
+#### Step 1: Setup our GLM Fork
+
+We will make our changes in our own copy of the GLM sitory. On the GLM GitHub repo and we press the Fork button.
+We need to download a copy of our fork to our local machine. In the terminal, type:
+
+```
+>>> git clone <our-repository-fork-git-url>
+```
+
+This will clone our fork repository into the current folder.
+
+We can find our repository git url on the Github reposotory page. The url looks like this: `https://github.com/<our-username>/<repository-name>.git`
+
+#### Step 2: Synchronizing our fork
+
+We can use the following command to add `upstream` (original project repository) as a remote repository so that we can fetch the latest GLM commits into our branch and keep our forked copy is synchronized.
+
+```
+>>> git remote add upstream https://github.com/processing/processing.git
+```
+
+To synchronize our fork to the latest commit in the GLM repository, we can use the following command:
+
+```
+>>> git fetch upstream
+```
+
+Then, we can merge the remote master branch to our current branch:
+
+```
+>>> git merge upstream/master
+```
+
+Now our local copy of our fork has been synchronized. However, the fork's copy is not updated on GitHub's servers yet. To do that, use:
+
+```
+>>> git push origin master
+```
+
+#### Step 3: Modifying our GLM Fork
+
+Our fork is now setup and we are ready to modify GLM to fix a bug.
+
+It's a good practice to make changes in our fork in a separate branch than the master branch because we can submit only one pull request per branch.
+
+Before creating a new branch, it's best to synchronize our fork and then create a new branch from the latest master branch.
+
+If we are not on the master branch, we should switch to it using:
+```
+>>> git checkout master
+```
+
+To create a new branch called `bugifx`, we use:
+```
+git branch bugfix
+```
+
+Once the code changes for the fix is done, we need to commit the changes:
+```
+>>> git commit -m "Resolve the issue that caused problem with a specific fix #432"
+```
+
+The commit message should be as specific as possible and finished by the bug number in the [GLM GitHub issue page](https://github.com/g-truc/glm/issues)
+
+Finally, we need to push our changes in our branch to our GitHub fork using:
+```
+>>> git push origin bugfix
+```
+
+Some things to keep in mind for a pull request:
+* Keep it minimal: Try to make the minimum required changes to fix the issue. If we have added any debugging code, we should remove it.
+* A fix at a time: The pull request should deal with one issue at a time only, unless two issue are so interlinked they must be fixed together.
+* Write a test: GLM is largely unit tests. Unit tests are in `glm/test` directory. We should also add tests for the fixes we provide to ensure future regression doesn't happen.
+* No whitespace changes: Avoid unnecessary formatting or whitespace changes in other parts of the code. Be careful with auto-format options in the code editor which can cause wide scale formatting changes.
+* Follow [GLM Code Style](#section9_3) for consistency.
+* Tests passes: Make sure GLM build and tests don't fail because of the changes.
+
+#### Step 4: Submitting a Pull Request
+
+We need to submit a pull request from the `bugfix` branch to GLM's master branch.
+
+On the fork github page, we can click on the *Pull Request* button. Then we can describe our pull request. Finally we press *Send Pull Request*.
+
+Please be patient and give them some time to go through it.
+
+The pull request review may suggest additional changes. So we can make those changes in our branch, and push those changes to our fork repository. Our pull request will always include the latest changes in our branch on GitHub, so we don't need to resubmit the pull request.
+
+Once your changes have been accepted, a project maintainer will merge our pull request.
+
+We are grateful to the users for their time and effort in contributing fixes.
+
+### <a name="section9_3"></a> 9.3. Coding style
+
+#### Indentation
+
+Always tabs. Never spaces.
+
+#### Spacing
+
+No space after if. Use if(blah) not if (blah). Example if/else block:
+
+```cpp
+if(blah)
+{
+ // yes like this
+}
+else
+{
+ // something besides
+}
+```
+
+Single line if blocks:
+```cpp
+if(blah)
+ // yes like this
+else
+ // something besides
+```
+
+No spaces inside parens:
+```cpp
+if (blah) // No
+if( blah ) // No
+if ( blah ) // No
+if(blah) // Yes
+```
+
+Use spaces before/after commas:
+```cpp
+someFunction(apple,bear,cat); // No
+someFunction(apple, bear, cat); // Yes
+```
+
+Use spaces before/after use of `+, -, *, /, %, >>, <<, |, &, ^, ||, &&` operators:
+```cpp
+vec4 v = a + b;
+```
+
+#### Blank lines
+
+One blank line after the function blocks.
+
+#### Comments
+
+Always one space after the // in single line comments
+
+One space before // at the end of a line (that has code as well)
+
+Try to use // comments inside functions, to make it easier to remove a whole block via /* */
+
+#### Cases
+
+```cpp
+#define GLM_MY_DEFINE 76
+
+class myClass
+{};
+
+myClass const MyClass;
+
+namespace glm{ // glm namespace is for public code
+namespace detail // glm::detail namespace is for implementation detail
+{
+ float myFunction(vec2 const& V)
+ {
+ return V.x + V.y;
+ }
+
+ float myFunction(vec2 const* const V)
+ {
+ return V->x + V->y;
+ }
+}//namespace detail
+}//namespace glm
+```
+
+---
+<div style="page-break-after: always;"> </div>
+
+## <a name="section10"></a> 10. References
+
+### <a name="section10_1"></a> 10.1. OpenGL specifications
+
+* OpenGL 4.3 core specification
+* [GLSL 4.30 specification](http://www.opengl.org/registry/doc/GLSLangSpec.4.30.7.diff.pdf)
+![](media/image21.png){width="2.859722222222222in" height="1.6083333333333334in"}- [*GLU 1.3 specification*](http://www.opengl.org/documentation/specs/glu/glu1_3.pdf)
+
+### <a name="section10_2"></a> 10.2. External links
+
+* [GLM on stackoverflow](http://stackoverflow.com/search?q=GLM)
+
+### <a name="section10_3"></a> 10.3. Projects using GLM
+
+***[Leo’s Fortune](http://www.leosfortune.com/)***
+
+Leo’s Fortune is a platform adventure game where you hunt down the cunning and mysterious thief that stole your gold. Available on PS4, Xbox One, PC, Mac, iOS and Android.
+
+Beautifully hand-crafted levels bring the story of Leo to life in this epic adventure.
+
+“I just returned home to find all my gold has been stolen! For some devious purpose, the thief has dropped pieces of my gold like breadcrumbs through the woods.”
+
+“Despite this pickle of a trap, I am left with no choice but to follow the trail.”
+
+“Whatever lies ahead, I must recover my fortune.” -Leopold
+
+![](/doc/manual/references-leosfortune.jpeg)
+
+![](/doc/manual/references-leosfortune2.jpg)
+
+[***OpenGL 4.0 Shading Language Cookbook***](http://www.packtpub.com/opengl-4-0-shading-language-cookbook/book?tag=rk/opengl4-abr1/0811)
+
+A set of recipes that demonstrates a wide of techniques for producing high-quality, real-time 3D graphics with GLSL 4.0, such as:
+
+* Using GLSL 4.0 to implement lighting and shading techniques.
+* Using the new features of GLSL 4.0 including tessellation and geometry shaders.
+* Using textures in GLSL as part of a wide variety of techniques from basic texture mapping to deferred shading.
+
+Simple, easy-to-follow examples with GLSL source code are provided, as well as a basic description of the theory behind each technique.
+
+![](/doc/manual/references-glsl4book.jpg)
+
+[***Outerra***](http://outerra.com/)
+
+A 3D planetary engine for seamless planet rendering from space down to the surface. Can use arbitrary resolution of elevation data, refining it to centimetre resolution using fractal algorithms.
+
+![](/doc/manual/references-outerra1.jpg)
+
+![](/doc/manual/references-outerra2.jpg)
+
+![](/doc/manual/references-outerra3.jpg)
+
+![](/doc/manual/references-outerra4.jpg)
+
+[***Falcor***](https://github.com/NVIDIA/Falcor)
+
+Real-time rendering research framework by NVIDIA.
+
+[***Cinder***](https://libcinder.org/)
+
+Cinder is a free and open source library for professional-quality creative coding in C++.
+
+Cinder is a C++ library for programming with aesthetic intent - the sort of development often called creative coding. This includes domains like graphics, audio, video, and computational geometry. Cinder is cross-platform, with official support for OS X, Windows, iOS, and WinRT.
+
+Cinder is production-proven, powerful enough to be the primary tool for professionals, but still suitable for learning and experimentation. Cinder is released under the [2-Clause BSD License](http://opensource.org/licenses/BSD-2-Clause).
+
+![](/doc/manual/references-cinder.png)
+
+[***opencloth***](https://github.com/mmmovania/opencloth/)
+
+A collection of source codes implementing cloth simulation algorithms in OpenGL.
+
+Simple, easy-to-follow examples with GLSL source code, as well as a basic description of the theory behind each technique.
+
+![](/doc/manual/references-opencloth1.png)
+
+![](/doc/manual/references-opencloth3.png)
+
+[***LibreOffice***](https://www.libreoffice.org/)
+
+LibreOffice includes several applications that make it the most powerful Free and Open Source office suite on the market.
+
+[***Are you using GLM in a project?***](mailto:[email protected])
+
+### <a name="section10_4"></a> 10.4. Tutorials using GLM
+
+* [Sascha Willems' Vulkan examples](https://github.com/SaschaWillems/Vulkan), Examples and demos for the new Vulkan API
+* [VKTS](https://github.com/McNopper/Vulkan) Vulkan examples using VulKan ToolS (VKTS)
+* [*The OpenGL Samples Pack*](http://www.g-truc.net/project-0026.html#menu), samples that show how to set up all the different new features
+* [*Learning Modern 3D Graphics programming*](http://www.arcsynthesis.org/gltut/), a great OpenGL tutorial using GLM by Jason L. McKesson
+* [*Morten Nobel-Jørgensen’s*](http://blog.nobel-joergensen.com/2011/04/02/glm-brilliant-math-library-for-opengl/) review and use an [*OpenGL renderer*](https://github.com/mortennobel/RenderE)
+* [*Swiftless’ OpenGL tutorial*](http://www.swiftless.com/opengltuts.html) using GLM by Donald Urquhart
+* [*Rastergrid*](http://rastergrid.com/blog/), many technical articles with companion programs using GLM by Daniel Rákos\
+* [*OpenGL Tutorial*](http://www.opengl-tutorial.org), tutorials for OpenGL 3.1 and later
+* [*OpenGL Programming on Wikibooks*](http://en.wikibooks.org/wiki/OpenGL_Programming): For beginners who are discovering OpenGL.
+* [*3D Game Engine Programming*](http://3dgep.com/): Learning the latest 3D Game Engine Programming techniques.
+* [Game Tutorials](http://www.gametutorials.com/opengl-4-matrices-and-glm/), graphics and game programming.
+* [open.gl](https://open.gl/), OpenGL tutorial
+* [c-jump](http://www.c-jump.com/bcc/common/Talk3/Math/GLM/GLM.html), GLM tutorial
+* [Learn OpenGL](http://learnopengl.com/), OpenGL tutorial
+* [***Are you using GLM in a tutorial?***](mailto:[email protected])
+
+### <a name="section10_5"></a> 10.5. Equivalent for other languages
+
+* [*cglm*](https://github.com/recp/cglm): OpenGL Mathematics (glm) for C.
+* [*GlmSharp*](https://github.com/Philip-Trettner/GlmSharp): Open-source semi-generated GLM-flavored math library for .NET/C\#.
+* [glm-js](https://github.com/humbletim/glm-js): JavaScript adaptation of the OpenGL Mathematics (GLM) C++ library interfaces
+* [JVM OpenGL Mathematics (GLM)](https://github.com/kotlin-graphics/glm): written in Kotlin, Java compatible
+* [JGLM](https://github.com/jroyalty/jglm) - Java OpenGL Mathematics Library
+* [SwiftGL Math Library](https://github.com/SwiftGL/Math/blob/master/Sources/glm.swift) GLM for Swift
+* [glm-go](https://github.com/jbowtie/glm-go): Simple linear algebra library similar in spirit to GLM
+* [openll](https://github.com/Polkm/openll): Lua bindings for OpenGL, GLM, GLFW, OpenAL, SOIL and PhysicsFS
+* [glm-rs](https://github.com/dche/glm-rs): GLSL mathematics for Rust programming language
+* [glmpython](https://github.com/Queatz/glmpython): GLM math library for Python
+
+### <a name="section10_6"></a> 10.6. Alternatives to GLM
+
+* [*CML*](http://cmldev.net/): The CML (Configurable Math Library) is a free C++ math library for games and graphics.
+* [*Eigen*](http://eigen.tuxfamily.org/): A more heavy weight math library for general linear algebra in C++.
+* [*glhlib*](http://glhlib.sourceforge.net/): A much more than glu C library.
+* Are you using or developing an alternative library to GLM?
+
+### <a name="section10_7"></a> 10.7. Acknowledgements
+
+GLM is developed and maintained by [*Christophe Riccio*](http://www.g-truc.net) but many contributors have made this project what it is.
+
+Special thanks to:
+* Ashima Arts and Stefan Gustavson for their work on [*webgl-noise*](https://github.com/ashima/webgl-noise) which has been used for GLM noises implementation.
+* [*Arthur Winters*](http://athile.net/library/wiki/index.php?title=Athile_Technologies) for the C++11 and Visual C++ swizzle operators implementation and tests.
+* Joshua Smith and Christoph Schied for the discussions and the experiments around the swizzle operators implementation issues.
+* Guillaume Chevallereau for providing and maintaining the [*nightlight build system*](http://my.cdash.org/index.php?project=GLM).
+* Ghenadii Ursachi for GLM\_GTX\_matrix\_interpolation implementation.
+* Mathieu Roumillac for providing some implementation ideas.
+* [*Grant James*](http://www.zeuscmd.com/) for the implementation of all combination of none-squared matrix products.
+* Jesse Talavera-Greenberg for his work on the manual amount other things.
+* All the GLM users that have report bugs and hence help GLM to become a great library!
diff --git a/3rdparty/glm/source/readme.md b/3rdparty/glm/source/readme.md
new file mode 100644
index 0000000..55b9678
--- /dev/null
+++ b/3rdparty/glm/source/readme.md
@@ -0,0 +1,1231 @@
+![glm](/doc/manual/logo-mini.png)
+
+[OpenGL Mathematics](http://glm.g-truc.net/) (*GLM*) is a header only C++ mathematics library for graphics software based on the [OpenGL Shading Language (GLSL) specifications](https://www.opengl.org/registry/doc/GLSLangSpec.4.50.diff.pdf).
+
+*GLM* provides classes and functions designed and implemented with the same naming conventions and functionality than *GLSL* so that anyone who knows *GLSL*, can use *GLM* as well in C++.
+
+This project isn't limited to *GLSL* features. An extension system, based on the *GLSL* extension conventions, provides extended capabilities: matrix transformations, quaternions, data packing, random numbers, noise, etc...
+
+This library works perfectly with *[OpenGL](https://www.opengl.org)* but it also ensures interoperability with other third party libraries and SDK. It is a good candidate for software rendering (raytracing / rasterisation), image processing, physics simulations and any development context that requires a simple and convenient mathematics library.
+
+*GLM* is written in C++98 but can take advantage of C++11 when supported by the compiler. It is a platform independent library with no dependence and it officially supports the following compilers:
+- [*GCC*](http://gcc.gnu.org/) 4.7 and higher
+- [*Intel C++ Compose*](https://software.intel.com/en-us/intel-compilers) XE 2013 and higher
+- [*Clang*](http://llvm.org/) 3.4 and higher
+- [*Apple Clang 6.0*](https://developer.apple.com/library/mac/documentation/CompilerTools/Conceptual/LLVMCompilerOverview/index.html) and higher
+- [*Visual C++*](http://www.visualstudio.com/) 2013 and higher
+- [*CUDA*](https://developer.nvidia.com/about-cuda) 9.0 and higher (experimental)
+- [*SYCL*](https://www.khronos.org/sycl/) (experimental: only [ComputeCpp](https://codeplay.com/products/computesuite/computecpp) implementation has been tested).
+- Any C++11 compiler
+
+For more information about *GLM*, please have a look at the [manual](manual.md) and the [API reference documentation](http://glm.g-truc.net/0.9.8/api/index.html).
+The source code and the documentation are licensed under either the [Happy Bunny License (Modified MIT) or the MIT License](manual.md#section0).
+
+Thanks for contributing to the project by [submitting pull requests](https://github.com/g-truc/glm/pulls).
+
+```cpp
+#include <glm/vec3.hpp> // glm::vec3
+#include <glm/vec4.hpp> // glm::vec4
+#include <glm/mat4x4.hpp> // glm::mat4
+#include <glm/ext/matrix_transform.hpp> // glm::translate, glm::rotate, glm::scale
+#include <glm/ext/matrix_clip_space.hpp> // glm::perspective
+#include <glm/ext/scalar_constants.hpp> // glm::pi
+
+glm::mat4 camera(float Translate, glm::vec2 const& Rotate)
+{
+ glm::mat4 Projection = glm::perspective(glm::pi<float>() * 0.25f, 4.0f / 3.0f, 0.1f, 100.f);
+ glm::mat4 View = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, -Translate));
+ View = glm::rotate(View, Rotate.y, glm::vec3(-1.0f, 0.0f, 0.0f));
+ View = glm::rotate(View, Rotate.x, glm::vec3(0.0f, 1.0f, 0.0f));
+ glm::mat4 Model = glm::scale(glm::mat4(1.0f), glm::vec3(0.5f));
+ return Projection * View * Model;
+}
+```
+
+## [Lastest release](https://github.com/g-truc/glm/releases/latest)
+
+## Project Health
+
+| Service | System | Compiler | Status |
+| ------- | ------ | -------- | ------ |
+| [Travis CI](https://travis-ci.org/g-truc/glm)| MacOSX, Linux 64 bits | Clang 3.6, Clang 5.0, GCC 4.9, GCC 7.3 | [![Travis CI](https://travis-ci.org/g-truc/glm.svg?branch=master)](https://travis-ci.org/g-truc/glm)
+| [AppVeyor](https://ci.appveyor.com/project/Groovounet/glm)| Windows 32 and 64 | Visual Studio 2013, Visual Studio 2015, Visual Studio 2017 | [![AppVeyor](https://ci.appveyor.com/api/projects/status/32r7s2skrgm9ubva?svg=true)](https://ci.appveyor.com/project/Groovounet/glm)
+
+## Release notes
+
+### [GLM 0.9.9.9](https://github.com/g-truc/glm/releases/tag/0.9.9.9) - 2020-XX-XX
+#### Features:
+- Added *GLM_EXT_scalar_reciprocal* with tests
+- Added *GLM_EXT_vector_reciprocal* with tests
+- Added `glm::iround` and `glm::uround` to *GLM_EXT_scalar_common* and *GLM_EXT_vector_common*
+- Added *GLM_EXT_matrix_integer* with tests
+
+#### Improvements:
+- Added `constexpr` qualifier for `cross` product #1040
+- Added `constexpr` qualifier for `dot` product #1040
+
+#### Fixes:
+- Fixed incorrect assertion for `glm::min` and `glm::max` #1009
+- Fixed quaternion orientation in `glm::decompose` #1012
+- Fixed singularity in quaternion to euler angle roll conversion #1019
+- Fixed `quat` `glm::pow` handling of small magnitude quaternions #1022
+- Fixed `glm::fastNormalize` build error #1033
+- Fixed `glm::isMultiple` build error #1034
+- Fixed `glm::adjugate` calculation #1035
+- Fixed `glm::angle` discards the sign of result for angles in range (2*pi-1, 2*pi) #1038
+- Removed ban on using `glm::string_cast` with *CUDA* host code #1041
+
+### [GLM 0.9.9.8](https://github.com/g-truc/glm/releases/tag/0.9.9.8) - 2020-04-13
+#### Features:
+- Added *GLM_EXT_vector_intX* and *GLM_EXT_vector_uintX* extensions
+- Added *GLM_EXT_matrix_intX* and *GLM_EXT_matrix_uintX* extensions
+
+#### Improvements:
+- Added `glm::clamp`, `glm::repeat`, `glm::mirrorClamp` and `glm::mirrorRepeat` function to `GLM_EXT_scalar_commond` and `GLM_EXT_vector_commond` extensions with tests
+
+#### Fixes:
+- Fixed unnecessary warnings from `matrix_projection.inl` #995
+- Fixed quaternion `glm::slerp` overload which interpolates with extra spins #996
+- Fixed for `glm::length` using arch64 #992
+- Fixed singularity check for `glm::quatLookAt` #770
+
+### [GLM 0.9.9.7](https://github.com/g-truc/glm/releases/tag/0.9.9.7) - 2020-01-05
+#### Improvements:
+- Improved *Neon* support with more functions optimized #950
+- Added *CMake* *GLM* interface #963
+- Added `glm::fma` implementation based on `std::fma` #969
+- Added missing quat constexpr #955
+- Added `GLM_FORCE_QUAT_DATA_WXYZ` to store quat data as w,x,y,z instead of x,y,z,w #983
+
+#### Fixes:
+- Fixed equal *ULP* variation when using negative sign #965
+- Fixed for intersection ray/plane and added related tests #953
+- Fixed ARM 64bit detection #949
+- Fixed *GLM_EXT_matrix_clip_space* warnings #980
+- Fixed Wimplicit-int-float-conversion warnings with clang 10+ #986
+- Fixed *GLM_EXT_matrix_clip_space* `perspectiveFov`
+
+### [GLM 0.9.9.6](https://github.com/g-truc/glm/releases/tag/0.9.9.6) - 2019-09-08
+#### Features:
+- Added *Neon* support #945
+- Added *SYCL* support #914
+- Added *GLM_EXT_scalar_integer* extension with power of two and multiple scalar functions
+- Added *GLM_EXT_vector_integer* extension with power of two and multiple vector functions
+
+#### Improvements:
+- Added *Visual C++ 2019* detection
+- Added *Visual C++ 2017* 15.8 and 15.9 detection
+- Added missing genType check for `glm::bitCount` and `glm::bitfieldReverse` #893
+
+#### Fixes:
+- Fixed for g++6 where -std=c++1z sets __cplusplus to 201500 instead of 201402 #921
+- Fixed hash hashes qua instead of tquat #919
+- Fixed `.natvis` as structs renamed #915
+- Fixed `glm::ldexp` and `glm::frexp` declaration #895
+- Fixed missing const to quaternion conversion operators #890
+- Fixed *GLM_EXT_scalar_ulp* and *GLM_EXT_vector_ulp* API coding style
+- Fixed quaternion componant order: `w, {x, y, z}` #916
+- Fixed `GLM_HAS_CXX11_STL` broken on Clang with Linux #926
+- Fixed *Clang* or *GCC* build due to wrong `GLM_HAS_IF_CONSTEXPR` definition #907
+- Fixed *CUDA* 9 build #910
+
+#### Deprecation:
+ - Removed CMake install and uninstall scripts
+
+### [GLM 0.9.9.5](https://github.com/g-truc/glm/releases/tag/0.9.9.5) - 2019-04-01
+#### Fixes:
+- Fixed build errors when defining `GLM_ENABLE_EXPERIMENTAL` #884 #883
+- Fixed `if constexpr` warning #887
+- Fixed missing declarations for `glm::frexp` and `glm::ldexp` #886
+
+### [GLM 0.9.9.4](https://github.com/g-truc/glm/releases/tag/0.9.9.4) - 2019-03-19
+#### Features:
+- Added `glm::mix` implementation for matrices in *GLM_EXT_matrix_common/ #842
+- Added *CMake* `BUILD_SHARED_LIBS` and `BUILD_STATIC_LIBS` build options #871
+
+#### Improvements:
+- Added GLM_FORCE_INTRINSICS to enable SIMD instruction code path. By default, it's disabled allowing constexpr support by default. #865
+- Optimized inverseTransform #867
+
+#### Fixes:
+- Fixed in `glm::mat4x3` conversion #829
+- Fixed `constexpr` issue on GCC #832 #865
+- Fixed `glm::mix` implementation to improve GLSL conformance #866
+- Fixed `glm::int8` being defined as unsigned char with some compiler #839
+- Fixed `glm::vec1` include #856
+- Ignore `.vscode` #848
+
+### [GLM 0.9.9.3](https://github.com/g-truc/glm/releases/tag/0.9.9.3) - 2018-10-31
+#### Features:
+- Added `glm::equal` and `glm::notEqual` overload with max ULPs parameters for scalar numbers #121
+- Added `GLM_FORCE_SILENT_WARNINGS` to silent *GLM* warnings when using language extensions but using W4 or Wpedantic warnings #814 #775
+- Added adjugate functions to `GLM_GTX_matrix_operation` #151
+- Added `GLM_FORCE_ALIGNED_GENTYPES` to enable aligned types and SIMD instruction are not enabled. This disable `constexpr` #816
+
+#### Improvements:
+- Added constant time ULP distance between float #121
+- Added `GLM_FORCE_SILENT_WARNINGS` to suppress *GLM* warnings #822
+
+#### Fixes:
+- Fixed `glm::simplex` noise build with double #734
+- Fixed `glm::bitfieldInsert` according to GLSL spec #818
+- Fixed `glm::refract` for negative 'k' #808
+
+### [GLM 0.9.9.2](https://github.com/g-truc/glm/releases/tag/0.9.9.2) - 2018-09-14
+#### Fixes:
+- Fixed `GLM_FORCE_CXX**` section in the manual
+- Fixed default initialization with vector and quaternion types using `GLM_FORCE_CTOR_INIT` #812
+
+### [GLM 0.9.9.1](https://github.com/g-truc/glm/releases/tag/0.9.9.1) - 2018-09-03
+#### Features:
+- Added `bitfieldDeinterleave` to *GLM_GTC_bitfield*
+- Added missing `glm::equal` and `glm::notEqual` with epsilon for quaternion types to *GLM_GTC_quaternion*
+- Added *GLM_EXT_matrix_relational*: `glm::equal` and `glm::notEqual` with epsilon for matrix types
+- Added missing aligned matrix types to *GLM_GTC_type_aligned*
+- Added C++17 detection
+- Added *Visual C++* language standard version detection
+- Added PDF manual build from markdown
+
+#### Improvements:
+- Added a section to the manual for contributing to *GLM*
+- Refactor manual, lists all configuration defines
+- Added missing `glm::vec1` based constructors
+- Redesigned constexpr support which excludes both SIMD and `constexpr` #783
+- Added detection of *Visual C++ 2017* toolsets
+- Added identity functions #765
+- Splitted headers into EXT extensions to improve compilation time #670
+- Added separated performance tests
+- Clarified refract valid range of the indices of refraction, between -1 and 1 inclusively #806
+
+#### Fixes:
+- Fixed SIMD detection on *Clang* and *GCC*
+- Fixed build problems due to `std::printf` and `std::clock_t` #778
+- Fixed int mod
+- Anonymous unions require C++ language extensions
+- Fixed `glm::ortho` #790
+- Fixed *Visual C++* 2013 warnings in vector relational code #782
+- Fixed *ICC* build errors with constexpr #704
+- Fixed defaulted operator= and constructors #791
+- Fixed invalid conversion from int scalar with vec4 constructor when using SSE instruction
+- Fixed infinite loop in random functions when using negative radius values using an assert #739
+
+### [GLM 0.9.9.0](https://github.com/g-truc/glm/releases/tag/0.9.9.0) - 2018-05-22
+#### Features:
+- Added *RGBM* encoding in *GLM_GTC_packing* #420
+- Added *GLM_GTX_color_encoding* extension
+- Added *GLM_GTX_vec_swizzle*, faster compile time swizzling then swizzle operator #558
+- Added *GLM_GTX_exterior_product* with a `vec2` `glm::cross` implementation #621
+- Added *GLM_GTX_matrix_factorisation* to factor matrices in various forms #654
+- Added [`GLM_ENABLE_EXPERIMENTAL`](manual.md#section7_4) to enable experimental features.
+- Added packing functions for integer vectors #639
+- Added conan packaging configuration #643 #641
+- Added `glm::quatLookAt` to *GLM_GTX_quaternion* #659
+- Added `glm::fmin`, `glm::fmax` and `glm::fclamp` to *GLM_GTX_extended_min_max* #372
+- Added *GLM_EXT_vector_relational*: extend `glm::equal` and `glm::notEqual` to take an epsilon argument
+- Added *GLM_EXT_vector_relational*: `glm::openBounded` and `glm::closeBounded`
+- Added *GLM_EXT_vec1*: `*vec1` types
+- Added *GLM_GTX_texture*: `levels` function
+- Added spearate functions to use both nagative one and zero near clip plans #680
+- Added `GLM_FORCE_SINGLE_ONLY` to use *GLM* on platforms that don't support double #627
+- Added *GLM_GTX_easing* for interpolation functions #761
+
+#### Improvements:
+- No more default initialization of vector, matrix and quaternion types
+- Added lowp variant of GTC_color_space convertLinearToSRGB #419
+- Replaced the manual by a markdown version #458
+- Improved API documentation #668
+- Optimized GTC_packing implementation
+- Optimized GTC_noise functions
+- Optimized GTC_color_space HSV to RGB conversions
+- Optimised GTX_color_space_YCoCg YCoCgR conversions
+- Optimized GTX_matrix_interpolation axisAngle function
+- Added FAQ 12: Windows headers cause build errors... #557
+- Removed GCC shadow warnings #595
+- Added error for including of different versions of GLM #619
+- Added GLM_FORCE_IGNORE_VERSION to ignore error caused by including different version of GLM #619
+- Reduced warnings when using very strict compilation flags #646
+- length() member functions are constexpr #657
+- Added support of -Weverything with Clang #646
+- Improved exponential function test coverage
+- Enabled warnings as error with Clang unit tests
+- Conan package is an external repository: https://github.com/bincrafters/conan-glm
+- Clarify quat_cast documentation, applying on pure rotation matrices #759
+
+#### Fixes:
+- Removed doxygen references to *GLM_GTC_half_float* which was removed in 0.9.4
+- Fixed `glm::decompose` #448
+- Fixed `glm::intersectRayTriangle` #6
+- Fixed dual quaternion != operator #629
+- Fixed usused variable warning in *GLM_GTX_spline* #618
+- Fixed references to `GLM_FORCE_RADIANS` which was removed #642
+- Fixed `glm::fastInverseSqrt` to use fast inverse square #640
+- Fixed `glm::axisAngle` NaN #638
+- Fixed integer pow from *GLM_GTX_integer* with null exponent #658
+- Fixed `quat` `normalize` build error #656
+- Fixed *Visual C++ 2017.2* warning regarding `__has_feature` definision #655
+- Fixed documentation warnings
+- Fixed `GLM_HAS_OPENMP` when *OpenMP* is not enabled
+- Fixed Better follow GLSL `min` and `max` specification #372
+- Fixed quaternion constructor from two vectors special cases #469
+- Fixed `glm::to_string` on quaternions wrong components order #681
+- Fixed `glm::acsch` #698
+- Fixed `glm::isnan` on *CUDA* #727
+
+#### Deprecation:
+- Requires *Visual Studio 2013*, *GCC 4.7*, *Clang 3.4*, *Cuda 7*, *ICC 2013* or a C++11 compiler
+- Removed *GLM_GTX_simd_vec4* extension
+- Removed *GLM_GTX_simd_mat4* extension
+- Removed *GLM_GTX_simd_quat* extension
+- Removed `GLM_SWIZZLE`, use `GLM_FORCE_SWIZZLE` instead
+- Removed `GLM_MESSAGES`, use `GLM_FORCE_MESSAGES` instead
+- Removed `GLM_DEPTH_ZERO_TO_ONE`, use `GLM_FORCE_DEPTH_ZERO_TO_ONE` instead
+- Removed `GLM_LEFT_HANDED`, use `GLM_FORCE_LEFT_HANDED` instead
+- Removed `GLM_FORCE_NO_CTOR_INIT`
+- Removed `glm::uninitialize`
+
+---
+### [GLM 0.9.8.5](https://github.com/g-truc/glm/releases/tag/0.9.8.5) - 2017-08-16
+#### Features:
+- Added *Conan* package support #647
+
+#### Fixes:
+- Fixed *Clang* version detection from source #608
+- Fixed `glm::packF3x9_E1x5` exponent packing #614
+- Fixed build error `min` and `max` specializations with integer #616
+- Fixed `simd_mat4` build error #652
+
+---
+### [GLM 0.9.8.4](https://github.com/g-truc/glm/releases/tag/0.9.8.4) - 2017-01-22
+#### Fixes:
+- Fixed *GLM_GTC_packing* test failing on *GCC* x86 due to denorms #212 #577
+- Fixed `POPCNT` optimization build in *Clang* #512
+- Fixed `glm::intersectRayPlane` returns true in parallel case #578
+- Fixed *GCC* 6.2 compiler warnings #580
+- Fixed *GLM_GTX_matrix_decompose* `glm::decompose` #582 #448
+- Fixed *GCC* 4.5 and older build #566
+- Fixed *Visual C++* internal error when declaring a global vec type with siwzzle expression enabled #594
+- Fixed `GLM_FORCE_CXX11` with Clang and libstlc++ which wasn't using C++11 STL features. #604
+
+---
+### [GLM 0.9.8.3](https://github.com/g-truc/glm/releases/tag/0.9.8.3) - 2016-11-12
+#### Improvements:
+- Broader support of `GLM_FORCE_UNRESTRICTED_GENTYPE` #378
+
+#### Fixes:
+- Fixed Android build error with C++11 compiler but C++98 STL #284 #564
+- Fixed *GLM_GTX_transform2* shear* functions #403
+- Fixed interaction between `GLM_FORCE_UNRESTRICTED_GENTYPE` and `glm::ortho` function #568
+- Fixed `glm::bitCount` with AVX on 32 bit builds #567
+- Fixed *CMake* `find_package` with version specification #572 #573
+
+---
+### [GLM 0.9.8.2](https://github.com/g-truc/glm/releases/tag/0.9.8.2) - 2016-11-01
+#### Improvements:
+- Added *Visual C++* 15 detection
+- Added *Clang* 4.0 detection
+- Added warning messages when using `GLM_FORCE_CXX**` but the compiler
+ is known to not fully support the requested C++ version #555
+- Refactored `GLM_COMPILER_VC` values
+- Made quat, vec, mat type component `length()` static #565
+
+#### Fixes:
+- Fixed *Visual C++* `constexpr` build error #555, #556
+
+---
+### [GLM 0.9.8.1](https://github.com/g-truc/glm/releases/tag/0.9.8.1) - 2016-09-25
+#### Improvements:
+- Optimized quaternion `glm::log` function #554
+
+#### Fixes:
+- Fixed *GCC* warning filtering, replaced -pedantic by -Wpedantic
+- Fixed SIMD faceforward bug. #549
+- Fixed *GCC* 4.8 with C++11 compilation option #550
+- Fixed *Visual Studio* aligned type W4 warning #548
+- Fixed packing/unpacking function fixed for 5_6_5 and 5_5_5_1 #552
+
+---
+### [GLM 0.9.8.0](https://github.com/g-truc/glm/releases/tag/0.9.8.0) - 2016-09-11
+#### Features:
+- Added right and left handed projection and clip control support #447 #415 #119
+- Added `glm::compNormalize` and `glm::compScale` functions to *GLM_GTX_component_wise*
+- Added `glm::packF3x9_E1x5` and `glm::unpackF3x9_E1x5` to *GLM_GTC_packing* for RGB9E5 #416
+- Added `(un)packHalf` to *GLM_GTC_packing*
+- Added `(un)packUnorm` and `(un)packSnorm` to *GLM_GTC_packing*
+- Added 16bit pack and unpack to *GLM_GTC_packing*
+- Added 8bit pack and unpack to *GLM_GTC_packing*
+- Added missing `bvec*` && and || operators
+- Added `glm::iround` and `glm::uround` to *GLM_GTC_integer*, fast round on positive values
+- Added raw SIMD API
+- Added 'aligned' qualifiers
+- Added *GLM_GTC_type_aligned* with aligned *vec* types
+- Added *GLM_GTC_functions* extension
+- Added quaternion version of `glm::isnan` and `glm::isinf` #521
+- Added `glm::lowestBitValue` to *GLM_GTX_bit* #536
+- Added `GLM_FORCE_UNRESTRICTED_GENTYPE` allowing non basic `genType` #543
+
+#### Improvements:
+- Improved SIMD and swizzle operators interactions with *GCC* and *Clang* #474
+- Improved *GLM_GTC_random* `linearRand` documentation
+- Improved *GLM_GTC_reciprocal* documentation
+- Improved `GLM_FORCE_EXPLICIT_CTOR` coverage #481
+- Improved *OpenMP* support detection for *Clang*, *GCC*, *ICC* and *VC*
+- Improved *GLM_GTX_wrap* for SIMD friendliness
+- Added `constexpr` for `*vec*`, `*mat*`, `*quat*` and `*dual_quat*` types #493
+- Added *NEON* instruction set detection
+- Added *MIPS* CPUs detection
+- Added *PowerPC* CPUs detection
+- Use *Cuda* built-in function for abs function implementation with Cuda compiler
+- Factorized `GLM_COMPILER_LLVM` and `GLM_COMPILER_APPLE_CLANG` into `GLM_COMPILER_CLANG`
+- No more warnings for use of long long
+- Added more information to build messages
+
+#### Fixes:
+- Fixed *GLM_GTX_extended_min_max* filename typo #386
+- Fixed `glm::intersectRayTriangle` to not do any unintentional backface culling
+- Fixed long long warnings when using C++98 on *GCC* and *Clang* #482
+- Fixed sign with signed integer function on non-x86 architecture
+- Fixed strict aliasing warnings #473
+- Fixed missing `glm::vec1` overload to `glm::length2` and `glm::distance2` functions #431
+- Fixed *GLM* test '/fp:fast' and '/Za' command-line options are incompatible
+- Fixed quaterion to mat3 cast function `glm::mat3_cast` from *GLM_GTC_quaternion* #542
+- Fixed *GLM_GTX_io* for *Cuda* #547 #546
+
+#### Deprecation:
+- Removed `GLM_FORCE_SIZE_FUNC` define
+- Deprecated *GLM_GTX_simd_vec4* extension
+- Deprecated *GLM_GTX_simd_mat4* extension
+- Deprecated *GLM_GTX_simd_quat* extension
+- Deprecated `GLM_SWIZZLE`, use `GLM_FORCE_SWIZZLE` instead
+- Deprecated `GLM_MESSAGES`, use `GLM_FORCE_MESSAGES` instead
+
+---
+### [GLM 0.9.7.6](https://github.com/g-truc/glm/releases/tag/0.9.7.6) - 2016-07-16
+#### Improvements:
+- Added pkg-config file #509
+- Updated list of compiler versions detected
+- Improved C++ 11 STL detection #523
+
+#### Fixes:
+- Fixed STL for C++11 detection on ICC #510
+- Fixed missing vec1 overload to length2 and distance2 functions #431
+- Fixed long long warnings when using C++98 on GCC and Clang #482
+- Fixed scalar reciprocal functions (GTC_reciprocal) #520
+
+---
+### [GLM 0.9.7.5](https://github.com/g-truc/glm/releases/tag/0.9.7.5) - 2016-05-24
+#### Improvements:
+- Added Visual C++ Clang toolset detection
+
+#### Fixes:
+- Fixed uaddCarry warning #497
+- Fixed roundPowerOfTwo and floorPowerOfTwo #503
+- Fixed Visual C++ SIMD instruction set automatic detection in 64 bits
+- Fixed to_string when used with GLM_FORCE_INLINE #506
+- Fixed GLM_FORCE_INLINE with binary vec4 operators
+- Fixed GTX_extended_min_max filename typo #386
+- Fixed intersectRayTriangle to not do any unintentional backface culling
+
+---
+### [GLM 0.9.7.4](https://github.com/g-truc/glm/releases/tag/0.9.7.4) - 2016-03-19
+#### Fixes:
+- Fixed asinh and atanh warning with C++98 STL #484
+- Fixed polar coordinates function latitude #485
+- Fixed outerProduct defintions and operator signatures for mat2x4 and vec4 #475
+- Fixed eulerAngles precision error, returns NaN #451
+- Fixed undefined reference errors #489
+- Fixed missing GLM_PLATFORM_CYGWIN declaration #495
+- Fixed various undefined reference errors #490
+
+---
+### [GLM 0.9.7.3](https://github.com/g-truc/glm/releases/tag/0.9.7.3) - 2016-02-21
+#### Improvements:
+- Added AVX512 detection
+
+#### Fixes:
+- Fixed CMake policy warning
+- Fixed GCC 6.0 detection #477
+- Fixed Clang build on Windows #479
+- Fixed 64 bits constants warnings on GCC #463
+
+---
+### [GLM 0.9.7.2](https://github.com/g-truc/glm/releases/tag/0.9.7.2) - 2016-01-03
+#### Fixes:
+- Fixed GTC_round floorMultiple/ceilMultiple #412
+- Fixed GTC_packing unpackUnorm3x10_1x2 #414
+- Fixed GTC_matrix_inverse affineInverse #192
+- Fixed ICC on Linux build errors #449
+- Fixed ldexp and frexp compilation errors
+- Fixed "Declaration shadows a field" warning #468
+- Fixed 'GLM_COMPILER_VC2005 is not defined' warning #468
+- Fixed various 'X is not defined' warnings #468
+- Fixed missing unary + operator #435
+- Fixed Cygwin build errors when using C++11 #405
+
+---
+### [GLM 0.9.7.1](https://github.com/g-truc/glm/releases/tag/0.9.7.1) - 2015-09-07
+#### Improvements:
+- Improved constexpr for constant functions coverage #198
+- Added to_string for quat and dual_quat in GTX_string_cast #375
+- Improved overall execution time of unit tests #396
+
+#### Fixes:
+- Fixed strict alignment warnings #235 #370
+- Fixed link errors on compilers not supported default function #377
+- Fixed compilation warnings in vec4
+- Fixed non-identity quaternions for equal vectors #234
+- Fixed excessive GTX_fast_trigonometry execution time #396
+- Fixed Visual Studio 2015 'hides class member' warnings #394
+- Fixed builtin bitscan never being used #392
+- Removed unused func_noise.* files #398
+
+---
+### [GLM 0.9.7.0](https://github.com/g-truc/glm/releases/tag/0.9.7.0) - 2015-08-02
+#### Features:
+- Added GTC_color_space: convertLinearToSRGB and convertSRGBToLinear functions
+- Added 'fmod' overload to GTX_common with tests #308
+- Left handed perspective and lookAt functions #314
+- Added functions eulerAngleXYZ and extractEulerAngleXYZ #311
+- Added <glm/gtx/hash.hpp> to perform std::hash on GLM types #320 #367
+- Added <glm/gtx/wrap.hpp> for texcoord wrapping
+- Added static components and precision members to all vector and quat types #350
+- Added .gitignore #349
+- Added support of defaulted functions to GLM types, to use them in unions #366
+
+#### Improvements:
+- Changed usage of __has_include to support Intel compiler #307
+- Specialized integer implementation of YCoCg-R #310
+- Don't show status message in 'FindGLM' if 'QUIET' option is set. #317
+- Added master branch continuous integration service on Linux 64 #332
+- Clarified manual regarding angle unit in GLM, added FAQ 11 #326
+- Updated list of compiler versions
+
+#### Fixes:
+- Fixed default precision for quat and dual_quat type #312
+- Fixed (u)int64 MSB/LSB handling on BE archs #306
+- Fixed multi-line comment warning in g++. #315
+- Fixed specifier removal by 'std::make_pair<>' #333
+- Fixed perspective fovy argument documentation #327
+- Removed -m64 causing build issues on Linux 32 #331
+- Fixed isfinite with C++98 compilers #343
+- Fixed Intel compiler build error on Linux #354
+- Fixed use of libstdc++ with Clang #351
+- Fixed quaternion pow #346
+- Fixed decompose warnings #373
+- Fixed matrix conversions #371
+
+#### Deprecation:
+- Removed integer specification for 'mod' in GTC_integer #308
+- Removed GTX_multiple, replaced by GTC_round
+
+---
+### [GLM 0.9.6.3](https://github.com/g-truc/glm/releases/tag/0.9.6.3) - 2015-02-15
+- Fixed Android doesn't have C++ 11 STL #284
+
+---
+### [GLM 0.9.6.2](https://github.com/g-truc/glm/releases/tag/0.9.6.2) - 2015-02-15
+#### Features:
+- Added display of GLM version with other GLM_MESSAGES
+- Added ARM instruction set detection
+
+#### Improvements:
+- Removed assert for perspective with zFar < zNear #298
+- Added Visual Studio natvis support for vec1, quat and dualqual types
+- Cleaned up C++11 feature detections
+- Clarify GLM licensing
+
+#### Fixes:
+- Fixed faceforward build #289
+- Fixed conflict with Xlib #define True 1 #293
+- Fixed decompose function VS2010 templating issues #294
+- Fixed mat4x3 = mat2x3 * mat4x2 operator #297
+- Fixed warnings in F2x11_1x10 packing function in GTC_packing #295
+- Fixed Visual Studio natvis support for vec4 #288
+- Fixed GTC_packing *pack*norm*x* build and added tests #292
+- Disabled GTX_scalar_multiplication for GCC, failing to build tests #242
+- Fixed Visual C++ 2015 constexpr errors: Disabled only partial support
+- Fixed functions not inlined with Clang #302
+- Fixed memory corruption (undefined behaviour) #303
+
+---
+### [GLM 0.9.6.1](https://github.com/g-truc/glm/releases/tag/0.9.6.1) - 2014-12-10
+#### Features:
+- Added GLM_LANG_CXX14_FLAG and GLM_LANG_CXX1Z_FLAG language feature flags
+- Added C++14 detection
+
+#### Improvements:
+- Clean up GLM_MESSAGES compilation log to report only detected capabilities
+
+#### Fixes:
+- Fixed scalar uaddCarry build error with Cuda #276
+- Fixed C++11 explicit conversion operators detection #282
+- Fixed missing explicit conversion when using integer log2 with *vec1 types
+- Fixed 64 bits integer GTX_string_cast to_string on VC 32 bit compiler
+- Fixed Android build issue, STL C++11 is not supported by the NDK #284
+- Fixed unsupported _BitScanForward64 and _BitScanReverse64 in VC10
+- Fixed Visual C++ 32 bit build #283
+- Fixed GLM_FORCE_SIZE_FUNC pragma message
+- Fixed C++98 only build
+- Fixed conflict between GTX_compatibility and GTC_quaternion #286
+- Fixed C++ language restriction using GLM_FORCE_CXX**
+
+---
+### [GLM 0.9.6.0](https://github.com/g-truc/glm/releases/tag/0.9.6.0) - 2014-11-30
+#### Features:
+- Exposed template vector and matrix types in 'glm' namespace #239, #244
+- Added GTX_scalar_multiplication for C++ 11 compiler only #242
+- Added GTX_range for C++ 11 compiler only #240
+- Added closestPointOnLine function for tvec2 to GTX_closest_point #238
+- Added GTC_vec1 extension, *vec1 support to *vec* types
+- Updated GTX_associated_min_max with vec1 support
+- Added support of precision and integers to linearRand #230
+- Added Integer types support to GTX_string_cast #249
+- Added vec3 slerp #237
+- Added GTX_common with isdenomal #223
+- Added GLM_FORCE_SIZE_FUNC to replace .length() by .size() #245
+- Added GLM_FORCE_NO_CTOR_INIT
+- Added 'uninitialize' to explicitly not initialize a GLM type
+- Added GTC_bitfield extension, promoted GTX_bit
+- Added GTC_integer extension, promoted GTX_bit and GTX_integer
+- Added GTC_round extension, promoted GTX_bit
+- Added GLM_FORCE_EXPLICIT_CTOR to require explicit type conversions #269
+- Added GTX_type_aligned for aligned vector, matrix and quaternion types
+
+#### Improvements:
+- Rely on C++11 to implement isinf and isnan
+- Removed GLM_FORCE_CUDA, Cuda is implicitly detected
+- Separated Apple Clang and LLVM compiler detection
+- Used pragma once
+- Undetected C++ compiler automatically compile with GLM_FORCE_CXX98 and
+ GLM_FORCE_PURE
+- Added not function (from GLSL specification) on VC12
+- Optimized bitfieldReverse and bitCount functions
+- Optimized findLSB and findMSB functions.
+- Optimized matrix-vector multiple performance with Cuda #257, #258
+- Reduced integer type redifinitions #233
+- Rewrited of GTX_fast_trigonometry #264 #265
+- Made types trivially copyable #263
+- Removed <iostream> in GLM tests
+- Used std features within GLM without redeclaring
+- Optimized cot function #272
+- Optimized sign function #272
+- Added explicit cast from quat to mat3 and mat4 #275
+
+#### Fixes:
+- Fixed std::nextafter not supported with C++11 on Android #217
+- Fixed missing value_type for dual quaternion
+- Fixed return type of dual quaternion length
+- Fixed infinite loop in isfinite function with GCC #221
+- Fixed Visual Studio 14 compiler warnings
+- Fixed implicit conversion from another tvec2 type to another tvec2 #241
+- Fixed lack of consistency of quat and dualquat constructors
+- Fixed uaddCarray #253
+- Fixed float comparison warnings #270
+
+#### Deprecation:
+- Requires Visual Studio 2010, GCC 4.2, Apple Clang 4.0, LLVM 3.0, Cuda 4, ICC 2013 or a C++98 compiler
+- Removed degrees for function parameters
+- Removed GLM_FORCE_RADIANS, active by default
+- Removed VC 2005 / 8 and 2008 / 9 support
+- Removed GCC 3.4 to 4.3 support
+- Removed LLVM GCC support
+- Removed LLVM 2.6 to 3.1 support
+- Removed CUDA 3.0 to 3.2 support
+
+---
+### [GLM 0.9.5.4 - 2014-06-21](https://github.com/g-truc/glm/releases/tag/0.9.5.4)
+- Fixed non-utf8 character #196
+- Added FindGLM install for CMake #189
+- Fixed GTX_color_space - saturation #195
+- Fixed glm::isinf and glm::isnan for with Android NDK 9d #191
+- Fixed builtin GLM_ARCH_SSE4 #204
+- Optimized Quaternion vector rotation #205
+- Fixed missing doxygen @endcond tag #211
+- Fixed instruction set detection with Clang #158
+- Fixed orientate3 function #207
+- Fixed lerp when cosTheta is close to 1 in quaternion slerp #210
+- Added GTX_io for io with <iostream> #144
+- Fixed fastDistance ambiguity #215
+- Fixed tweakedInfinitePerspective #208 and added user-defined epsilon to
+ tweakedInfinitePerspective
+- Fixed std::copy and std::vector with GLM types #214
+- Fixed strict aliasing issues #212, #152
+- Fixed std::nextafter not supported with C++11 on Android #213
+- Fixed corner cases in exp and log functions for quaternions #199
+
+---
+### GLM 0.9.5.3 - 2014-04-02
+- Added instruction set auto detection with Visual C++ using _M_IX86_FP - /arch
+ compiler argument
+- Fixed GTX_raw_data code dependency
+- Fixed GCC instruction set detection
+- Added GLM_GTX_matrix_transform_2d extension (#178, #176)
+- Fixed CUDA issues (#169, #168, #183, #182)
+- Added support for all extensions but GTX_string_cast to CUDA
+- Fixed strict aliasing warnings in GCC 4.8.1 / Android NDK 9c (#152)
+- Fixed missing bitfieldInterleave definisions
+- Fixed usubBorrow (#171)
+- Fixed eulerAngle*** not consistent for right-handed coordinate system (#173)
+- Added full tests for eulerAngle*** functions (#173)
+- Added workaround for a CUDA compiler bug (#186, #185)
+
+---
+### GLM 0.9.5.2 - 2014-02-08
+- Fixed initializer list ambiguity (#159, #160)
+- Fixed warnings with the Android NDK 9c
+- Fixed non power of two matrix products
+- Fixed mix function link error
+- Fixed SSE code included in GLM tests on "pure" platforms
+- Fixed undefined reference to fastInverseSqrt (#161)
+- Fixed GLM_FORCE_RADIANS with <glm/ext.hpp> build error (#165)
+- Fix dot product clamp range for vector angle functions. (#163)
+- Tentative fix for strict aliasing warning in GCC 4.8.1 / Android NDK 9c (#152)
+- Fixed GLM_GTC_constants description brief (#162)
+
+---
+### GLM 0.9.5.1 - 2014-01-11
+- Fixed angle and orientedAngle that sometimes return NaN values (#145)
+- Deprecated degrees for function parameters and display a message
+- Added possible static_cast conversion of GLM types (#72)
+- Fixed error 'inverse' is not a member of 'glm' from glm::unProject (#146)
+- Fixed mismatch between some declarations and definitions
+- Fixed inverse link error when using namespace glm; (#147)
+- Optimized matrix inverse and division code (#149)
+- Added intersectRayPlane function (#153)
+- Fixed outerProduct return type (#155)
+
+---
+### GLM 0.9.5.0 - 2013-12-25
+- Added forward declarations (glm/fwd.hpp) for faster compilations
+- Added per feature headers
+- Minimized GLM internal dependencies
+- Improved Intel Compiler detection
+- Added bitfieldInterleave and _mm_bit_interleave_si128 functions
+- Added GTX_scalar_relational
+- Added GTX_dual_quaternion
+- Added rotation function to GTX_quaternion (#22)
+- Added precision variation of each type
+- Added quaternion comparison functions
+- Fixed GTX_multiple for negative value
+- Removed GTX_ocl_type extension
+- Fixed post increment and decrement operators
+- Fixed perspective with zNear == 0 (#71)
+- Removed l-value swizzle operators
+- Cleaned up compiler detection code for unsupported compilers
+- Replaced C cast by C++ casts
+- Fixed .length() that should return a int and not a size_t
+- Added GLM_FORCE_SIZE_T_LENGTH and glm::length_t
+- Removed unnecessary conversions
+- Optimized packing and unpacking functions
+- Removed the normalization of the up argument of lookAt function (#114)
+- Added low precision specializations of inversesqrt
+- Fixed ldexp and frexp implementations
+- Increased assert coverage
+- Increased static_assert coverage
+- Replaced GLM traits by STL traits when possible
+- Allowed including individual core feature
+- Increased unit tests completness
+- Added creating of a quaternion from two vectors
+- Added C++11 initializer lists
+- Fixed umulExtended and imulExtended implementations for vector types (#76)
+- Fixed CUDA coverage for GTC extensions
+- Added GTX_io extension
+- Improved GLM messages enabled when defining GLM_MESSAGES
+- Hidden matrix _inverse function implementation detail into private section
+
+---
+### [GLM 0.9.4.6](https://github.com/g-truc/glm/releases/tag/0.9.4.6) - 2013-09-20
+- Fixed detection to select the last known compiler if newer version #106
+- Fixed is_int and is_uint code duplication with GCC and C++11 #107
+- Fixed test suite build while using Clang in C++11 mode
+- Added c++1y mode support in CMake test suite
+- Removed ms extension mode to CMake when no using Visual C++
+- Added pedantic mode to CMake test suite for Clang and GCC
+- Added use of GCC frontend on Unix for ICC and Visual C++ fronted on Windows
+ for ICC
+- Added compilation errors for unsupported compiler versions
+- Fixed glm::orientation with GLM_FORCE_RADIANS defined #112
+- Fixed const ref issue on assignment operator taking a scalar parameter #116
+- Fixed glm::eulerAngleY implementation #117
+
+---
+### GLM 0.9.4.5 - 2013-08-12
+- Fixed CUDA support
+- Fixed inclusion of intrinsics in "pure" mode #92
+- Fixed language detection on GCC when the C++0x mode isn't enabled #95
+- Fixed issue #97: register is deprecated in C++11
+- Fixed issue #96: CUDA issues
+- Added Windows CE detection #92
+- Added missing value_ptr for quaternions #99
+
+---
+### GLM 0.9.4.4 - 2013-05-29
+- Fixed slerp when costheta is close to 1 #65
+- Fixed mat4x2 value_type constructor #70
+- Fixed glm.natvis for Visual C++ 12 #82
+- Added assert in inversesqrt to detect division by zero #61
+- Fixed missing swizzle operators #86
+- Fixed CUDA warnings #86
+- Fixed GLM natvis for VC11 #82
+- Fixed GLM_GTX_multiple with negative values #79
+- Fixed glm::perspective when zNear is zero #71
+
+---
+### GLM 0.9.4.3 - 2013-03-20
+- Detected qualifier for Clang
+- Fixed C++11 mode for GCC, couldn't be enabled without MS extensions
+- Fixed squad, intermediate and exp quaternion functions
+- Fixed GTX_polar_coordinates euclidean function, takes a vec2 instead of a vec3
+- Clarify the license applying on the manual
+- Added a docx copy of the manual
+- Fixed GLM_GTX_matrix_interpolation
+- Fixed isnan and isinf on Android with Clang
+- Autodetected C++ version using __cplusplus value
+- Fixed mix for bool and bvec* third parameter
+
+---
+### GLM 0.9.4.2 - 2013-02-14
+- Fixed compAdd from GTX_component_wise
+- Fixed SIMD support for Intel compiler on Windows
+- Fixed isnan and isinf for CUDA compiler
+- Fixed GLM_FORCE_RADIANS on glm::perspective
+- Fixed GCC warnings
+- Fixed packDouble2x32 on Xcode
+- Fixed mix for vec4 SSE implementation
+- Fixed 0x2013 dash character in comments that cause issue in Windows
+ Japanese mode
+- Fixed documentation warnings
+- Fixed CUDA warnings
+
+---
+### GLM 0.9.4.1 - 2012-12-22
+- Improved half support: -0.0 case and implicit conversions
+- Fixed Intel Composer Compiler support on Linux
+- Fixed interaction between quaternion and euler angles
+- Fixed GTC_constants build
+- Fixed GTX_multiple
+- Fixed quat slerp using mix function when cosTheta close to 1
+- Improved fvec4SIMD and fmat4x4SIMD implementations
+- Fixed assert messages
+- Added slerp and lerp quaternion functions and tests
+
+---
+### GLM 0.9.4.0 - 2012-11-18
+- Added Intel Composer Compiler support
+- Promoted GTC_espilon extension
+- Promoted GTC_ulp extension
+- Removed GLM website from the source repository
+- Added GLM_FORCE_RADIANS so that all functions takes radians for arguments
+- Fixed detection of Clang and LLVM GCC on MacOS X
+- Added debugger visualizers for Visual C++ 2012
+- Requires Visual Studio 2005, GCC 4.2, Clang 2.6, Cuda 3, ICC 2013 or a C++98 compiler
+
+---
+### [GLM 0.9.3.4](https://github.com/g-truc/glm/releases/tag/0.9.3.4) - 2012-06-30
+- Added SSE4 and AVX2 detection.
+- Removed VIRTREV_xstream and the incompatibility generated with GCC
+- Fixed C++11 compiler option for GCC
+- Removed MS language extension option for GCC (not fonctionnal)
+- Fixed bitfieldExtract for vector types
+- Fixed warnings
+- Fixed SSE includes
+
+---
+### GLM 0.9.3.3 - 2012-05-10
+- Fixed isinf and isnan
+- Improved compatibility with Intel compiler
+- Added CMake test build options: SIMD, C++11, fast math and MS land ext
+- Fixed SIMD mat4 test on GCC
+- Fixed perspectiveFov implementation
+- Fixed matrixCompMult for none-square matrices
+- Fixed namespace issue on stream operators
+- Fixed various warnings
+- Added VC11 support
+
+---
+### GLM 0.9.3.2 - 2012-03-15
+- Fixed doxygen documentation
+- Fixed Clang version detection
+- Fixed simd mat4 /= operator
+
+---
+### GLM 0.9.3.1 - 2012-01-25
+- Fixed platform detection
+- Fixed warnings
+- Removed detail code from Doxygen doc
+
+---
+### GLM 0.9.3.0 - 2012-01-09
+- Added CPP Check project
+- Fixed conflict with Windows headers
+- Fixed isinf implementation
+- Fixed Boost conflict
+- Fixed warnings
+
+---
+### GLM 0.9.3.B - 2011-12-12
+- Added support for Chrone Native Client
+- Added epsilon constant
+- Removed value_size function from vector types
+- Fixed roundEven on GCC
+- Improved API documentation
+- Fixed modf implementation
+- Fixed step function accuracy
+- Fixed outerProduct
+
+---
+### GLM 0.9.3.A - 2011-11-11
+- Improved doxygen documentation
+- Added new swizzle operators for C++11 compilers
+- Added new swizzle operators declared as functions
+- Added GLSL 4.20 length for vector and matrix types
+- Promoted GLM_GTC_noise extension: simplex, perlin, periodic noise functions
+- Promoted GLM_GTC_random extension: linear, gaussian and various random number
+generation distribution
+- Added GLM_GTX_constants: provides useful constants
+- Added extension versioning
+- Removed many unused namespaces
+- Fixed half based type contructors
+- Added GLSL core noise functions
+
+---
+### [GLM 0.9.2.7](https://github.com/g-truc/glm/releases/tag/0.9.2.7) - 2011-10-24
+- Added more swizzling constructors
+- Added missing none-squared matrix products
+
+---
+### [GLM 0.9.2.6](https://github.com/g-truc/glm/releases/tag/0.9.2.6) - 2011-10-01
+- Fixed half based type build on old GCC
+- Fixed /W4 warnings on Visual C++
+- Fixed some missing l-value swizzle operators
+
+---
+### GLM 0.9.2.5 - 2011-09-20
+- Fixed floatBitToXint functions
+- Fixed pack and unpack functions
+- Fixed round functions
+
+---
+### GLM 0.9.2.4 - 2011-09-03
+- Fixed extensions bugs
+
+---
+### GLM 0.9.2.3 - 2011-06-08
+- Fixed build issues
+
+---
+### GLM 0.9.2.2 - 2011-06-02
+- Expend matrix constructors flexibility
+- Improved quaternion implementation
+- Fixed many warnings across platforms and compilers
+
+---
+### GLM 0.9.2.1 - 2011-05-24
+- Automatically detect CUDA support
+- Improved compiler detection
+- Fixed errors and warnings in VC with C++ extensions disabled
+- Fixed and tested GLM_GTX_vector_angle
+- Fixed and tested GLM_GTX_rotate_vector
+
+---
+### GLM 0.9.2.0 - 2011-05-09
+- Added CUDA support
+- Added CTest test suite
+- Added GLM_GTX_ulp extension
+- Added GLM_GTX_noise extension
+- Added GLM_GTX_matrix_interpolation extension
+- Updated quaternion slerp interpolation
+
+---
+### [GLM 0.9.1.3](https://github.com/g-truc/glm/releases/tag/0.9.1.3) - 2011-05-07
+- Fixed bugs
+
+---
+### GLM 0.9.1.2 - 2011-04-15
+- Fixed bugs
+
+---
+### GLM 0.9.1.1 - 2011-03-17
+- Fixed bugs
+
+---
+### GLM 0.9.1.0 - 2011-03-03
+- Fixed bugs
+
+---
+### GLM 0.9.1.B - 2011-02-13
+- Updated API documentation
+- Improved SIMD implementation
+- Fixed Linux build
+
+---
+### [GLM 0.9.0.8](https://github.com/g-truc/glm/releases/tag/0.9.0.8) - 2011-02-13
+- Added quaternion product operator.
+- Clarify that GLM is a header only library.
+
+---
+### GLM 0.9.1.A - 2011-01-31
+- Added SIMD support
+- Added new swizzle functions
+- Improved static assert error message with C++0x static_assert
+- New setup system
+- Reduced branching
+- Fixed trunc implementation
+
+---
+### [GLM 0.9.0.7](https://github.com/g-truc/glm/releases/tag/0.9.0.7) - 2011-01-30
+- Added GLSL 4.10 packing functions
+- Added == and != operators for every types.
+
+---
+### GLM 0.9.0.6 - 2010-12-21
+- Many matrices bugs fixed
+
+---
+### GLM 0.9.0.5 - 2010-11-01
+- Improved Clang support
+- Fixed bugs
+
+---
+### GLM 0.9.0.4 - 2010-10-04
+- Added autoexp for GLM
+- Fixed bugs
+
+---
+### GLM 0.9.0.3 - 2010-08-26
+- Fixed non-squared matrix operators
+
+---
+### GLM 0.9.0.2 - 2010-07-08
+- Added GLM_GTX_int_10_10_10_2
+- Fixed bugs
+
+---
+### GLM 0.9.0.1 - 2010-06-21
+- Fixed extensions errors
+
+---
+### GLM 0.9.0.0 - 2010-05-25
+- Objective-C support
+- Fixed warnings
+- Updated documentation
+
+---
+### GLM 0.9.B.2 - 2010-04-30
+- Git transition
+- Removed experimental code from releases
+- Fixed bugs
+
+---
+### GLM 0.9.B.1 - 2010-04-03
+- Based on GLSL 4.00 specification
+- Added the new core functions
+- Added some implicit conversion support
+
+---
+### GLM 0.9.A.2 - 2010-02-20
+- Improved some possible errors messages
+- Improved declarations and definitions match
+
+---
+### GLM 0.9.A.1 - 2010-02-09
+- Removed deprecated features
+- Internal redesign
+
+---
+### GLM 0.8.4.4 final - 2010-01-25
+- Fixed warnings
+
+---
+### GLM 0.8.4.3 final - 2009-11-16
+- Fixed Half float arithmetic
+- Fixed setup defines
+
+---
+### GLM 0.8.4.2 final - 2009-10-19
+- Fixed Half float adds
+
+---
+### GLM 0.8.4.1 final - 2009-10-05
+- Updated documentation
+- Fixed MacOS X build
+
+---
+### GLM 0.8.4.0 final - 2009-09-16
+- Added GCC 4.4 and VC2010 support
+- Added matrix optimizations
+
+---
+### GLM 0.8.3.5 final - 2009-08-11
+- Fixed bugs
+
+---
+### GLM 0.8.3.4 final - 2009-08-10
+- Updated GLM according GLSL 1.5 spec
+- Fixed bugs
+
+---
+### GLM 0.8.3.3 final - 2009-06-25
+- Fixed bugs
+
+---
+### GLM 0.8.3.2 final - 2009-06-04
+- Added GLM_GTC_quaternion
+- Added GLM_GTC_type_precision
+
+---
+### GLM 0.8.3.1 final - 2009-05-21
+- Fixed old extension system.
+
+---
+### GLM 0.8.3.0 final - 2009-05-06
+- Added stable extensions.
+- Added new extension system.
+
+---
+### GLM 0.8.2.3 final - 2009-04-01
+- Fixed bugs.
+
+---
+### GLM 0.8.2.2 final - 2009-02-24
+- Fixed bugs.
+
+---
+### GLM 0.8.2.1 final - 2009-02-13
+- Fixed bugs.
+
+---
+### GLM 0.8.2 final - 2009-01-21
+- Fixed bugs.
+
+---
+### GLM 0.8.1 final - 2008-10-30
+- Fixed bugs.
+
+---
+### GLM 0.8.0 final - 2008-10-23
+- New method to use extension.
+
+---
+### GLM 0.8.0 beta3 - 2008-10-10
+- Added CMake support for GLM tests.
+
+---
+### GLM 0.8.0 beta2 - 2008-10-04
+- Improved half scalars and vectors support.
+
+---
+### GLM 0.8.0 beta1 - 2008-09-26
+- Improved GLSL conformance
+- Added GLSL 1.30 support
+- Improved API documentation
+
+---
+### GLM 0.7.6 final - 2008-08-08
+- Improved C++ standard comformance
+- Added Static assert for types checking
+
+---
+### GLM 0.7.5 final - 2008-07-05
+- Added build message system with Visual Studio
+- Pedantic build with GCC
+
+---
+### GLM 0.7.4 final - 2008-06-01
+- Added external dependencies system.
+
+---
+### GLM 0.7.3 final - 2008-05-24
+- Fixed bugs
+- Added new extension group
+
+---
+### GLM 0.7.2 final - 2008-04-27
+- Updated documentation
+- Added preprocessor options
+
+---
+### GLM 0.7.1 final - 2008-03-24
+- Disabled half on GCC
+- Fixed extensions
+
+---
+### GLM 0.7.0 final - 2008-03-22
+- Changed to MIT license
+- Added new documentation
+
+---
+### GLM 0.6.4 - 2007-12-10
+- Fixed swizzle operators
+
+---
+### GLM 0.6.3 - 2007-11-05
+- Fixed type data accesses
+- Fixed 3DSMax sdk conflict
+
+---
+### GLM 0.6.2 - 2007-10-08
+- Fixed extension
+
+---
+### GLM 0.6.1 - 2007-10-07
+- Fixed a namespace error
+- Added extensions
+
+---
+### GLM 0.6.0 : 2007-09-16
+- Added new extension namespace mecanium
+- Added Automatic compiler detection
+
+---
+### GLM 0.5.1 - 2007-02-19
+- Fixed swizzle operators
+
+---
+### GLM 0.5.0 - 2007-01-06
+- Upgrated to GLSL 1.2
+- Added swizzle operators
+- Added setup settings
+
+---
+### GLM 0.4.1 - 2006-05-22
+- Added OpenGL examples
+
+---
+### GLM 0.4.0 - 2006-05-17
+- Added missing operators to vec* and mat*
+- Added first GLSL 1.2 features
+- Fixed windows.h before glm.h when windows.h required
+
+---
+### GLM 0.3.2 - 2006-04-21
+- Fixed texcoord components access.
+- Fixed mat4 and imat4 division operators.
+
+---
+### GLM 0.3.1 - 2006-03-28
+- Added GCC 4.0 support under MacOS X.
+- Added GCC 4.0 and 4.1 support under Linux.
+- Added code optimisations.
+
+---
+### GLM 0.3 - 2006-02-19
+- Improved GLSL type conversion and construction compliance.
+- Added experimental extensions.
+- Added Doxygen Documentation.
+- Added code optimisations.
+- Fixed bugs.
+
+---
+### GLM 0.2 - 2005-05-05
+- Improve adaptative from GLSL.
+- Add experimental extensions based on OpenGL extension process.
+- Fixe bugs.
+
+---
+### GLM 0.1 - 2005-02-21
+- Add vec2, vec3, vec4 GLSL types
+- Add ivec2, ivec3, ivec4 GLSL types
+- Add bvec2, bvec3, bvec4 GLSL types
+- Add mat2, mat3, mat4 GLSL types
+- Add almost all functions
+
diff --git a/3rdparty/glm/source/test/CMakeLists.txt b/3rdparty/glm/source/test/CMakeLists.txt
new file mode 100644
index 0000000..e7f85f1
--- /dev/null
+++ b/3rdparty/glm/source/test/CMakeLists.txt
@@ -0,0 +1,246 @@
+option(GLM_QUIET "No CMake Message" OFF)
+option(BUILD_SHARED_LIBS "Build shared library" ON)
+option(BUILD_STATIC_LIBS "Build static library" ON)
+option(GLM_TEST_ENABLE_CXX_98 "Enable C++ 98" OFF)
+option(GLM_TEST_ENABLE_CXX_11 "Enable C++ 11" OFF)
+option(GLM_TEST_ENABLE_CXX_14 "Enable C++ 14" OFF)
+option(GLM_TEST_ENABLE_CXX_17 "Enable C++ 17" OFF)
+option(GLM_TEST_ENABLE_CXX_20 "Enable C++ 20" OFF)
+
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+if(GLM_TEST_ENABLE_CXX_20)
+ set(CMAKE_CXX_STANDARD 20)
+ add_definitions(-DGLM_FORCE_CXX2A)
+ if(NOT GLM_QUIET)
+ message(STATUS "GLM: Build with C++20 features")
+ endif()
+
+elseif(GLM_TEST_ENABLE_CXX_17)
+ set(CMAKE_CXX_STANDARD 17)
+ add_definitions(-DGLM_FORCE_CXX17)
+ if(NOT GLM_QUIET)
+ message(STATUS "GLM: Build with C++17 features")
+ endif()
+
+elseif(GLM_TEST_ENABLE_CXX_14)
+ set(CMAKE_CXX_STANDARD 14)
+ add_definitions(-DGLM_FORCE_CXX14)
+ if(NOT GLM_QUIET)
+ message(STATUS "GLM: Build with C++14 features")
+ endif()
+
+elseif(GLM_TEST_ENABLE_CXX_11)
+ set(CMAKE_CXX_STANDARD 11)
+ add_definitions(-DGLM_FORCE_CXX11)
+ if(NOT GLM_QUIET)
+ message(STATUS "GLM: Build with C++11 features")
+ endif()
+
+elseif(GLM_TEST_ENABLE_CXX_98)
+ set(CMAKE_CXX_STANDARD 98)
+ add_definitions(-DGLM_FORCE_CXX98)
+ if(NOT GLM_QUIET)
+ message(STATUS "GLM: Build with C++98 features")
+ endif()
+endif()
+
+option(GLM_TEST_ENABLE_LANG_EXTENSIONS "Enable language extensions" OFF)
+
+option(GLM_DISABLE_AUTO_DETECTION "Enable language extensions" OFF)
+
+if(GLM_DISABLE_AUTO_DETECTION)
+ add_definitions(-DGLM_FORCE_PLATFORM_UNKNOWN -DGLM_FORCE_COMPILER_UNKNOWN -DGLM_FORCE_ARCH_UNKNOWN -DGLM_FORCE_CXX_UNKNOWN)
+endif()
+
+if(GLM_TEST_ENABLE_LANG_EXTENSIONS)
+ set(CMAKE_CXX_EXTENSIONS ON)
+ if((CMAKE_CXX_COMPILER_ID MATCHES "Clang") OR (CMAKE_CXX_COMPILER_ID MATCHES "GNU"))
+ add_compile_options(-fms-extensions)
+ endif()
+ message(STATUS "GLM: Build with C++ language extensions")
+else()
+ set(CMAKE_CXX_EXTENSIONS OFF)
+ if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
+ add_compile_options(/Za)
+ if(MSVC15)
+ add_compile_options(/permissive-)
+ endif()
+ endif()
+endif()
+
+option(GLM_TEST_ENABLE_FAST_MATH "Enable fast math optimizations" OFF)
+if(GLM_TEST_ENABLE_FAST_MATH)
+ if(NOT GLM_QUIET)
+ message(STATUS "GLM: Build with fast math optimizations")
+ endif()
+
+ if((CMAKE_CXX_COMPILER_ID MATCHES "Clang") OR (CMAKE_CXX_COMPILER_ID MATCHES "GNU"))
+ add_compile_options(-ffast-math)
+
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
+ add_compile_options(/fp:fast)
+ endif()
+else()
+ if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
+ add_compile_options(/fp:precise)
+ endif()
+endif()
+
+option(GLM_TEST_ENABLE "Build unit tests" ON)
+option(GLM_TEST_ENABLE_SIMD_SSE2 "Enable SSE2 optimizations" OFF)
+option(GLM_TEST_ENABLE_SIMD_SSE3 "Enable SSE3 optimizations" OFF)
+option(GLM_TEST_ENABLE_SIMD_SSSE3 "Enable SSSE3 optimizations" OFF)
+option(GLM_TEST_ENABLE_SIMD_SSE4_1 "Enable SSE 4.1 optimizations" OFF)
+option(GLM_TEST_ENABLE_SIMD_SSE4_2 "Enable SSE 4.2 optimizations" OFF)
+option(GLM_TEST_ENABLE_SIMD_AVX "Enable AVX optimizations" OFF)
+option(GLM_TEST_ENABLE_SIMD_AVX2 "Enable AVX2 optimizations" OFF)
+option(GLM_TEST_FORCE_PURE "Force 'pure' instructions" OFF)
+
+if(GLM_TEST_FORCE_PURE)
+ add_definitions(-DGLM_FORCE_PURE)
+
+ if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
+ add_compile_options(-mfpmath=387)
+ endif()
+ message(STATUS "GLM: No SIMD instruction set")
+
+elseif(GLM_TEST_ENABLE_SIMD_AVX2)
+ add_definitions(-DGLM_FORCE_INTRINSICS)
+
+ if((CMAKE_CXX_COMPILER_ID MATCHES "GNU") OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang"))
+ add_compile_options(-mavx2)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ add_compile_options(/QxAVX2)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
+ add_compile_options(/arch:AVX2)
+ endif()
+ message(STATUS "GLM: AVX2 instruction set")
+
+elseif(GLM_TEST_ENABLE_SIMD_AVX)
+ add_definitions(-DGLM_FORCE_INTRINSICS)
+
+ if((CMAKE_CXX_COMPILER_ID MATCHES "GNU") OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang"))
+ add_compile_options(-mavx)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ add_compile_options(/QxAVX)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
+ add_compile_options(/arch:AVX)
+ endif()
+ message(STATUS "GLM: AVX instruction set")
+
+elseif(GLM_TEST_ENABLE_SIMD_SSE4_2)
+ add_definitions(-DGLM_FORCE_INTRINSICS)
+
+ if((CMAKE_CXX_COMPILER_ID MATCHES "GNU") OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang"))
+ add_compile_options(-msse4.2)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ add_compile_options(/QxSSE4.2)
+ elseif((CMAKE_CXX_COMPILER_ID MATCHES "MSVC") AND NOT CMAKE_CL_64)
+ add_compile_options(/arch:SSE2) # VC doesn't support SSE4.2
+ endif()
+ message(STATUS "GLM: SSE4.2 instruction set")
+
+elseif(GLM_TEST_ENABLE_SIMD_SSE4_1)
+ add_definitions(-DGLM_FORCE_INTRINSICS)
+
+ if((CMAKE_CXX_COMPILER_ID MATCHES "GNU") OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang"))
+ add_compile_options(-msse4.1)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ add_compile_options(/QxSSE4.1)
+ elseif((CMAKE_CXX_COMPILER_ID MATCHES "MSVC") AND NOT CMAKE_CL_64)
+ add_compile_options(/arch:SSE2) # VC doesn't support SSE4.1
+ endif()
+ message(STATUS "GLM: SSE4.1 instruction set")
+
+elseif(GLM_TEST_ENABLE_SIMD_SSSE3)
+ add_definitions(-DGLM_FORCE_INTRINSICS)
+
+ if((CMAKE_CXX_COMPILER_ID MATCHES "GNU") OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang"))
+ add_compile_options(-mssse3)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ add_compile_options(/QxSSSE3)
+ elseif((CMAKE_CXX_COMPILER_ID MATCHES "MSVC") AND NOT CMAKE_CL_64)
+ add_compile_options(/arch:SSE2) # VC doesn't support SSSE3
+ endif()
+ message(STATUS "GLM: SSSE3 instruction set")
+
+elseif(GLM_TEST_ENABLE_SIMD_SSE3)
+ add_definitions(-DGLM_FORCE_INTRINSICS)
+
+ if((CMAKE_CXX_COMPILER_ID MATCHES "GNU") OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang"))
+ add_compile_options(-msse3)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ add_compile_options(/QxSSE3)
+ elseif((CMAKE_CXX_COMPILER_ID MATCHES "MSVC") AND NOT CMAKE_CL_64)
+ add_compile_options(/arch:SSE2) # VC doesn't support SSE3
+ endif()
+ message(STATUS "GLM: SSE3 instruction set")
+
+elseif(GLM_TEST_ENABLE_SIMD_SSE2)
+ add_definitions(-DGLM_FORCE_INTRINSICS)
+
+ if((CMAKE_CXX_COMPILER_ID MATCHES "GNU") OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang"))
+ add_compile_options(-msse2)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ add_compile_options(/QxSSE2)
+ elseif((CMAKE_CXX_COMPILER_ID MATCHES "MSVC") AND NOT CMAKE_CL_64)
+ add_compile_options(/arch:SSE2)
+ endif()
+ message(STATUS "GLM: SSE2 instruction set")
+endif()
+
+# Compiler and default options
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ if(NOT GLM_QUIET)
+ message("GLM: Clang - ${CMAKE_CXX_COMPILER_ID} compiler")
+ endif()
+
+ add_compile_options(-Werror -Weverything)
+ add_compile_options(-Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-c++11-long-long -Wno-padded -Wno-gnu-anonymous-struct -Wno-nested-anon-types)
+ add_compile_options(-Wno-undefined-reinterpret-cast -Wno-sign-conversion -Wno-unused-variable -Wno-missing-prototypes -Wno-unreachable-code -Wno-missing-variable-declarations -Wno-sign-compare -Wno-global-constructors -Wno-unused-macros -Wno-format-nonliteral)
+
+elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
+ if(NOT GLM_QUIET)
+ message("GLM: GCC - ${CMAKE_CXX_COMPILER_ID} compiler")
+ endif()
+
+ add_compile_options(-O2)
+ add_compile_options(-Wno-long-long)
+
+elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ if(NOT GLM_QUIET)
+ message("GLM: Intel - ${CMAKE_CXX_COMPILER_ID} compiler")
+ endif()
+
+elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
+ if(NOT GLM_QUIET)
+ message("GLM: Visual C++ - ${CMAKE_CXX_COMPILER_ID} compiler")
+ endif()
+
+ add_compile_options(/W4 /WX)
+ add_compile_options(/wd4309 /wd4324 /wd4389 /wd4127 /wd4267 /wd4146 /wd4201 /wd4464 /wd4514 /wd4701 /wd4820 /wd4365)
+ add_definitions(-D_CRT_SECURE_NO_WARNINGS)
+endif()
+
+function(glmCreateTestGTC NAME)
+ set(SAMPLE_NAME test-${NAME})
+ add_executable(${SAMPLE_NAME} ${NAME}.cpp)
+
+ add_test(
+ NAME ${SAMPLE_NAME}
+ COMMAND $<TARGET_FILE:${SAMPLE_NAME}> )
+ target_link_libraries(${SAMPLE_NAME} PRIVATE glm::glm)
+endfunction()
+
+if(GLM_TEST_ENABLE)
+ add_subdirectory(bug)
+ add_subdirectory(core)
+ add_subdirectory(ext)
+ add_subdirectory(gtc)
+ add_subdirectory(gtx)
+ add_subdirectory(perf)
+endif()
+
+
diff --git a/3rdparty/glm/source/test/bug/CMakeLists.txt b/3rdparty/glm/source/test/bug/CMakeLists.txt
new file mode 100644
index 0000000..26e8569
--- /dev/null
+++ b/3rdparty/glm/source/test/bug/CMakeLists.txt
@@ -0,0 +1 @@
+glmCreateTestGTC(bug_ms_vec_static)
diff --git a/3rdparty/glm/source/test/bug/bug_ms_vec_static.cpp b/3rdparty/glm/source/test/bug/bug_ms_vec_static.cpp
new file mode 100644
index 0000000..7f44e40
--- /dev/null
+++ b/3rdparty/glm/source/test/bug/bug_ms_vec_static.cpp
@@ -0,0 +1,31 @@
+#include <glm/glm.hpp>
+
+#if GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE
+struct vec2;
+
+struct _swizzle
+{
+ char _buffer[1];
+};
+
+struct vec2
+{
+ GLM_CONSTEXPR vec2() :
+ x(0), y(0)
+ {}
+
+ union
+ {
+ struct { float x, y; };
+ struct { _swizzle xx; };
+ };
+};
+#endif
+
+// Visual C++ has a bug generating the error: fatal error C1001: An internal error has occurred in the compiler.
+// vec2 Bar;
+
+int main()
+{
+ return 0;
+}
diff --git a/3rdparty/glm/source/test/cmake/CMakeLists.txt b/3rdparty/glm/source/test/cmake/CMakeLists.txt
new file mode 100644
index 0000000..5bc11ef
--- /dev/null
+++ b/3rdparty/glm/source/test/cmake/CMakeLists.txt
@@ -0,0 +1,8 @@
+cmake_minimum_required(VERSION 3.2 FATAL_ERROR)
+project(test_find_glm)
+
+find_package(glm REQUIRED)
+
+add_executable(test_find_glm test_find_glm.cpp)
+target_link_libraries(test_find_glm glm::glm)
+
diff --git a/3rdparty/glm/source/test/cmake/test_find_glm.cpp b/3rdparty/glm/source/test/cmake/test_find_glm.cpp
new file mode 100644
index 0000000..361b977
--- /dev/null
+++ b/3rdparty/glm/source/test/cmake/test_find_glm.cpp
@@ -0,0 +1,22 @@
+#include <iostream>
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+glm::mat4 camera(float Translate, glm::vec2 const& Rotate)
+{
+ glm::mat4 Projection = glm::perspective(glm::pi<float>() * 0.25f, 4.0f / 3.0f, 0.1f, 100.f);
+ glm::mat4 View = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, -Translate));
+ View = glm::rotate(View, Rotate.y, glm::vec3(-1.0f, 0.0f, 0.0f));
+ View = glm::rotate(View, Rotate.x, glm::vec3(0.0f, 1.0f, 0.0f));
+ glm::mat4 Model = glm::scale(glm::mat4(1.0f), glm::vec3(0.5f));
+ return Projection * View * Model;
+}
+
+int main()
+{
+ const glm::mat4 m = camera(1.f, glm::vec2(1.f, 0.5f));
+ std::cout << "matrix diagonal: " << m[0][0] << ", "
+ << m[1][1] << ", " << m[2][2] << ", " << m[3][3] << "\n";
+ return 0;
+}
+
diff --git a/3rdparty/glm/source/test/core/CMakeLists.txt b/3rdparty/glm/source/test/core/CMakeLists.txt
new file mode 100644
index 0000000..6cd57b1
--- /dev/null
+++ b/3rdparty/glm/source/test/core/CMakeLists.txt
@@ -0,0 +1,52 @@
+glmCreateTestGTC(core_cpp_constexpr)
+glmCreateTestGTC(core_cpp_defaulted_ctor)
+glmCreateTestGTC(core_force_aligned_gentypes)
+glmCreateTestGTC(core_force_ctor_init)
+glmCreateTestGTC(core_force_cxx03)
+glmCreateTestGTC(core_force_cxx98)
+glmCreateTestGTC(core_force_arch_unknown)
+glmCreateTestGTC(core_force_compiler_unknown)
+glmCreateTestGTC(core_force_cxx_unknown)
+glmCreateTestGTC(core_force_explicit_ctor)
+glmCreateTestGTC(core_force_inline)
+glmCreateTestGTC(core_force_platform_unknown)
+glmCreateTestGTC(core_force_pure)
+glmCreateTestGTC(core_force_unrestricted_gentype)
+glmCreateTestGTC(core_force_xyzw_only)
+glmCreateTestGTC(core_force_quat_xyzw)
+glmCreateTestGTC(core_type_aligned)
+glmCreateTestGTC(core_type_cast)
+glmCreateTestGTC(core_type_ctor)
+glmCreateTestGTC(core_type_int)
+glmCreateTestGTC(core_type_length)
+glmCreateTestGTC(core_type_mat2x2)
+glmCreateTestGTC(core_type_mat2x3)
+glmCreateTestGTC(core_type_mat2x4)
+glmCreateTestGTC(core_type_mat3x2)
+glmCreateTestGTC(core_type_mat3x3)
+glmCreateTestGTC(core_type_mat3x4)
+glmCreateTestGTC(core_type_mat4x2)
+glmCreateTestGTC(core_type_mat4x3)
+glmCreateTestGTC(core_type_mat4x4)
+glmCreateTestGTC(core_type_vec1)
+glmCreateTestGTC(core_type_vec2)
+glmCreateTestGTC(core_type_vec3)
+glmCreateTestGTC(core_type_vec4)
+glmCreateTestGTC(core_func_common)
+glmCreateTestGTC(core_func_exponential)
+glmCreateTestGTC(core_func_geometric)
+glmCreateTestGTC(core_func_integer)
+glmCreateTestGTC(core_func_integer_bit_count)
+glmCreateTestGTC(core_func_integer_find_lsb)
+glmCreateTestGTC(core_func_integer_find_msb)
+glmCreateTestGTC(core_func_matrix)
+glmCreateTestGTC(core_func_noise)
+glmCreateTestGTC(core_func_packing)
+glmCreateTestGTC(core_func_trigonometric)
+glmCreateTestGTC(core_func_vector_relational)
+glmCreateTestGTC(core_func_swizzle)
+glmCreateTestGTC(core_setup_force_cxx98)
+glmCreateTestGTC(core_setup_force_size_t_length)
+glmCreateTestGTC(core_setup_message)
+glmCreateTestGTC(core_setup_platform_unknown)
+glmCreateTestGTC(core_setup_precision)
diff --git a/3rdparty/glm/source/test/core/core_cpp_constexpr.cpp b/3rdparty/glm/source/test/core/core_cpp_constexpr.cpp
new file mode 100644
index 0000000..3dc0a92
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_cpp_constexpr.cpp
@@ -0,0 +1,750 @@
+#include <glm/glm.hpp>
+
+#if GLM_CONFIG_CONSTEXP == GLM_ENABLE
+
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_int1.hpp>
+#include <glm/ext/vector_bool1.hpp>
+#include <glm/ext/vector_bool4.hpp>
+#include <glm/ext/vector_float1.hpp>
+#include <glm/vector_relational.hpp>
+
+static int test_vec1()
+{
+ int Error = 0;
+
+ {
+ constexpr glm::bvec1 B(true);
+ constexpr bool A = glm::all(B);
+ static_assert(A, "GLM: Failed constexpr");
+
+ constexpr glm::bvec1 D(true);
+ constexpr bool C = glm::any(D);
+ static_assert(C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::bvec2 C(true);
+ constexpr glm::bvec2 B(true);
+ static_assert(glm::any(glm::equal(C, B)), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 O(glm::ivec1(1));
+ static_assert(glm::ivec1(1) == O, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 P(1);
+ static_assert(glm::ivec1(1) == P, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 L(glm::ivec2(1, 2));
+ static_assert(glm::ivec1(1) == L, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 M(glm::ivec3(1, 2, 3));
+ static_assert(glm::ivec1(1) == M, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 N(glm::ivec4(1, 2, 3, 4));
+ static_assert(glm::ivec1(1) == N, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(1);
+ static_assert(A[0] == 1, "GLM: Failed constexpr");
+ static_assert(glm::vec1(1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec1::length() == 1, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::bvec1 A1(true);
+ constexpr glm::bvec1 A2(true);
+ constexpr glm::bvec1 B1(false);
+ constexpr glm::bvec1 B2(false);
+ static_assert(A1 == A2 && B1 == B2, "GLM: Failed constexpr");
+ static_assert(A1 == A2 || B1 == B2, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(1);
+ constexpr glm::ivec1 B = A + 1;
+ constexpr glm::ivec1 C(3);
+ static_assert(A + B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 D = +A;
+ static_assert(D == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(3);
+ constexpr glm::ivec1 B = A - 1;
+ constexpr glm::ivec1 C(1);
+ static_assert(A - B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 D = -A;
+ static_assert(-D == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(3);
+ constexpr glm::ivec1 B = A * 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B * C == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(3);
+ constexpr glm::ivec1 B = A / 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B / C == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(3);
+ constexpr glm::ivec1 B = A % 2;
+ constexpr glm::ivec1 C(1);
+ static_assert(B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 D(2);
+ static_assert(A % D == C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(1);
+ constexpr glm::ivec1 B = A & 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(A == (A & C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(1);
+ constexpr glm::ivec1 B = A | 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(A == (A | C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(1);
+ constexpr glm::ivec1 B = A ^ 0;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(0);
+ static_assert(A == (A ^ C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(1);
+ constexpr glm::ivec1 B = A << 1;
+ static_assert(B == glm::ivec1(2), "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B == (A << C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(2);
+ constexpr glm::ivec1 B = A >> 1;
+ static_assert(B == glm::ivec1(1), "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B == A >> C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec1 A(~0);
+ constexpr glm::ivec1 B = ~A;
+ static_assert(A == ~B, "GLM: Failed constexpr");
+ }
+
+ return Error;
+}
+
+static int test_vec2()
+{
+ int Error = 0;
+
+ {
+ constexpr glm::bvec2 B(true);
+ constexpr bool A = glm::all(B);
+ static_assert(A, "GLM: Failed constexpr");
+
+ constexpr glm::bvec2 D(true, false);
+ constexpr bool C = glm::any(D);
+ static_assert(C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::bvec2 C(true);
+ constexpr glm::bvec2 B(true, false);
+ static_assert(glm::any(glm::equal(C, B)), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 O(glm::ivec1(1));
+ static_assert(glm::ivec2(1) == O, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 A(1);
+ static_assert(glm::ivec2(1) == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 F(glm::ivec1(1), glm::ivec1(2));
+ static_assert(glm::ivec2(1, 2) == F, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 G(1, glm::ivec1(2));
+ static_assert(glm::ivec2(1, 2) == G, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 H(glm::ivec1(1), 2);
+ static_assert(glm::ivec2(1, 2) == H, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 I(1, 2);
+ static_assert(glm::ivec2(1, 2) == I, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 L(glm::ivec2(1, 2));
+ static_assert(glm::ivec2(1, 2) == L, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 M(glm::ivec3(1, 2, 3));
+ static_assert(glm::ivec2(1, 2) == M, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 N(glm::ivec4(1, 2, 3, 4));
+ static_assert(glm::ivec2(1, 2) == N, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(1);
+ static_assert(A[0] == 1, "GLM: Failed constexpr");
+ static_assert(glm::vec2(1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec2(1.0f, -1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec2(1.0f, -1.0f).y < 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec2::length() == 2, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::bvec2 A1(true);
+ constexpr glm::bvec2 A2(true);
+ constexpr glm::bvec2 B1(false);
+ constexpr glm::bvec2 B2(false);
+ static_assert(A1 == A2 && B1 == B2, "GLM: Failed constexpr");
+ static_assert(A1 == A2 || B1 == B2, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(1);
+ constexpr glm::ivec2 B = A + 1;
+ constexpr glm::ivec2 C(3);
+ static_assert(A + B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 D = +A;
+ static_assert(D == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(3);
+ constexpr glm::ivec2 B = A - 1;
+ constexpr glm::ivec2 C(1);
+ static_assert(A - B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 D = -A;
+ static_assert(-D == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(3);
+ constexpr glm::ivec2 B = A * 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 C(1);
+ static_assert(B * C == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(3);
+ constexpr glm::ivec2 B = A / 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec2 C(1);
+ static_assert(B / C == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(3);
+ constexpr glm::ivec2 B = A % 2;
+ constexpr glm::ivec2 C(1);
+ static_assert(B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 D(2);
+ static_assert(A % D == C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(1);
+ constexpr glm::ivec2 B = A & 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(A == (A & C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(1);
+ constexpr glm::ivec2 B = A | 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(A == (A | C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(1);
+ constexpr glm::ivec2 B = A ^ 0;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(0);
+ static_assert(A == (A ^ C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(1);
+ constexpr glm::ivec2 B = A << 1;
+ static_assert(B == glm::ivec2(2), "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B == (A << C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(2);
+ constexpr glm::ivec2 B = A >> 1;
+ static_assert(B == glm::ivec2(1), "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B == A >> C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec2 A(~0);
+ constexpr glm::ivec2 B = ~A;
+ static_assert(A == ~B, "GLM: Failed constexpr");
+ }
+
+ return Error;
+}
+
+static int test_vec3()
+{
+ int Error = 0;
+
+ {
+ constexpr glm::bvec3 B(true);
+ constexpr bool A = glm::all(B);
+ static_assert(A, "GLM: Failed constexpr");
+
+ constexpr glm::bvec3 D(true, false, true);
+ constexpr bool C = glm::any(D);
+ static_assert(C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::bvec3 C(true);
+ constexpr glm::bvec3 B(true, false, true);
+ static_assert(glm::any(glm::equal(C, B)), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 O(glm::ivec1(1));
+ static_assert(glm::ivec3(1) == O, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 A(1);
+ static_assert(glm::ivec3(1) == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 B(glm::ivec2(1, 2), 3);
+ static_assert(glm::ivec3(1, 2, 3) == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 C(1, glm::ivec2(2, 3));
+ static_assert(glm::ivec3(1, 2, 3) == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 D(glm::ivec1(1), glm::ivec2(2, 3));
+ static_assert(glm::ivec3(1, 2, 3) == D, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 E(glm::ivec2(1, 2), glm::ivec1(3));
+ static_assert(glm::ivec3(1, 2, 3) == E, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 F(glm::ivec1(1), glm::ivec1(2), glm::ivec1(3));
+ static_assert(glm::ivec3(1, 2, 3) == F, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 G(1, glm::ivec1(2), glm::ivec1(3));
+ static_assert(glm::ivec3(1, 2, 3) == G, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 H(glm::ivec1(1), 2, glm::ivec1(3));
+ static_assert(glm::ivec3(1, 2, 3) == H, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 I(1, 2, glm::ivec1(3));
+ static_assert(glm::ivec3(1, 2, 3) == I, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 J(glm::ivec1(1), glm::ivec1(2), 3);
+ static_assert(glm::ivec3(1, 2, 3) == J, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 K(1, glm::ivec1(2), 3);
+ static_assert(glm::ivec3(1, 2, 3) == K, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 L(glm::ivec1(1), 2, 3);
+ static_assert(glm::ivec3(1, 2, 3) == L, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 M(1, 2, 3);
+ static_assert(glm::ivec3(1, 2, 3) == M, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 N(glm::ivec4(1, 2, 3, 4));
+ static_assert(glm::ivec3(1, 2, 3) == N, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 const A(1);
+ static_assert(A[0] == 1, "GLM: Failed constexpr");
+ static_assert(glm::vec3(1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec3(1.0f, -1.0f, -1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec3(1.0f, -1.0f, -1.0f).y < 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec3::length() == 3, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::bvec3 A1(true);
+ constexpr glm::bvec3 A2(true);
+ constexpr glm::bvec3 B1(false);
+ constexpr glm::bvec3 B2(false);
+ static_assert(A1 == A2 && B1 == B2, "GLM: Failed constexpr");
+ static_assert(A1 == A2 || B1 == B2, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(1);
+ constexpr glm::ivec3 B = A + 1;
+ constexpr glm::ivec3 C(3);
+ static_assert(A + B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 D = +A;
+ static_assert(D == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(3);
+ constexpr glm::ivec3 B = A - 1;
+ constexpr glm::ivec3 C(1);
+ static_assert(A - B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 D = -A;
+ static_assert(-D == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(3);
+ constexpr glm::ivec3 B = A * 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 C(1);
+ static_assert(B * C == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(3);
+ constexpr glm::ivec3 B = A / 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec3 C(1);
+ static_assert(B / C == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(3);
+ constexpr glm::ivec3 B = A % 2;
+ constexpr glm::ivec3 C(1);
+ static_assert(B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 D(2);
+ static_assert(A % D == C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(1);
+ constexpr glm::ivec3 B = A & 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(A == (A & C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(1);
+ constexpr glm::ivec3 B = A | 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(A == (A | C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(1);
+ constexpr glm::ivec3 B = A ^ 0;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(0);
+ static_assert(A == (A ^ C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(1);
+ constexpr glm::ivec3 B = A << 1;
+ static_assert(B == glm::ivec3(2), "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B == (A << C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(2);
+ constexpr glm::ivec3 B = A >> 1;
+ static_assert(B == glm::ivec3(1), "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B == A >> C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec3 A(~0);
+ constexpr glm::ivec3 B = ~A;
+ static_assert(A == ~B, "GLM: Failed constexpr");
+ }
+
+ return Error;
+}
+
+static int test_vec4()
+{
+ int Error = 0;
+
+ {
+ constexpr glm::bvec4 B(true);
+ constexpr bool A = glm::all(B);
+ static_assert(A, "GLM: Failed constexpr");
+
+ constexpr glm::bvec4 D(true, false, true, false);
+ constexpr bool C = glm::any(D);
+ static_assert(C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::bvec4 C(true);
+ constexpr glm::bvec4 B(true, false, true, false);
+ static_assert(glm::any(glm::equal(C, B)), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 O(glm::ivec4(1));
+ static_assert(glm::ivec4(1) == O, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 A(1);
+ static_assert(glm::ivec4(1) == A, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 N(glm::ivec4(1, 2, 3, 4));
+ static_assert(glm::ivec4(1, 2, 3, 4) == N, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(glm::ivec3(1, 2, 3), 4);
+ static_assert(glm::ivec4(1, 2, 3, 4) == A, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 B(glm::ivec2(1, 2), glm::ivec2(3, 4));
+ static_assert(glm::ivec4(1, 2, 3, 4) == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 C(1, glm::ivec3(2, 3, 4));
+ static_assert(glm::ivec4(1, 2, 3, 4) == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 D(glm::ivec1(1), glm::ivec2(2, 3), glm::ivec1(4));
+ static_assert(glm::ivec4(1, 2, 3, 4) == D, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 E(glm::ivec2(1, 2), glm::ivec1(3), glm::ivec1(4));
+ static_assert(glm::ivec4(1, 2, 3, 4) == E, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 F(glm::ivec1(1), glm::ivec1(2), glm::ivec2(3, 4));
+ static_assert(glm::ivec4(1, 2, 3, 4) == F, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(1);
+ static_assert(A[0] == 1, "GLM: Failed constexpr");
+ static_assert(glm::ivec4(1).x > 0, "GLM: Failed constexpr");
+ static_assert(glm::ivec4(1.0f, -1.0f, -1.0f, 1.0f).x > 0, "GLM: Failed constexpr");
+ static_assert(glm::ivec4(1.0f, -1.0f, -1.0f, 1.0f).y < 0, "GLM: Failed constexpr");
+ static_assert(glm::ivec4::length() == 4, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::bvec4 A1(true);
+ constexpr glm::bvec4 A2(true);
+ constexpr glm::bvec4 B1(false);
+ constexpr glm::bvec4 B2(false);
+ static_assert(A1 == A2 && B1 == B2, "GLM: Failed constexpr");
+ static_assert(A1 == A2 || B1 == B2, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(1);
+ constexpr glm::ivec4 B = A + 1;
+ constexpr glm::ivec4 C(3);
+ static_assert(A + B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 D = +A;
+ static_assert(D == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(3);
+ constexpr glm::ivec4 B = A - 1;
+ constexpr glm::ivec4 C(1);
+ static_assert(A - B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 D = -A;
+ static_assert(-D == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(3);
+ constexpr glm::ivec4 B = A * 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 C(1);
+ static_assert(B * C == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(3);
+ constexpr glm::ivec4 B = A / 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec4 C(1);
+ static_assert(B / C == A, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(3);
+ constexpr glm::ivec4 B = A % 2;
+ constexpr glm::ivec4 C(1);
+ static_assert(B == C, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 D(2);
+ static_assert(A % D == C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(1);
+ constexpr glm::ivec4 B = A & 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(A == (A & C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(1);
+ constexpr glm::ivec4 B = A | 1;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(A == (A | C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(1);
+ constexpr glm::ivec4 B = A ^ 0;
+ static_assert(A == B, "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(0);
+ static_assert(A == (A ^ C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(1);
+ constexpr glm::ivec4 B = A << 1;
+ static_assert(B == glm::ivec4(2), "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B == (A << C), "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(2);
+ constexpr glm::ivec4 B = A >> 1;
+ static_assert(B == glm::ivec4(1), "GLM: Failed constexpr");
+
+ constexpr glm::ivec1 C(1);
+ static_assert(B == A >> C, "GLM: Failed constexpr");
+ }
+
+ {
+ constexpr glm::ivec4 A(~0);
+ constexpr glm::ivec4 B = ~A;
+ static_assert(A == ~B, "GLM: Failed constexpr");
+ }
+
+ return Error;
+}
+
+static int test_quat()
+{
+ int Error = 0;
+
+ {
+ static_assert(glm::quat::length() == 4, "GLM: Failed constexpr");
+ static_assert(glm::quat(1.0f, glm::vec3(0.0f)).w > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::quat(1.0f, 0.0f, 0.0f, 0.0f).w > 0.0f, "GLM: Failed constexpr");
+
+ glm::quat constexpr Q = glm::identity<glm::quat>();
+ static_assert(Q.x - glm::quat(1.0f, glm::vec3(0.0f)).x <= glm::epsilon<float>(), "GLM: Failed constexpr");
+ }
+
+ return Error;
+}
+
+static int test_mat2x2()
+{
+ int Error = 0;
+
+ static_assert(glm::mat2x2::length() == 2, "GLM: Failed constexpr");
+
+ return Error;
+}
+
+#endif//GLM_CONFIG_CONSTEXP == GLM_ENABLE
+
+int main()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_CONSTEXP == GLM_ENABLE
+ Error += test_vec1();
+ Error += test_vec2();
+ Error += test_vec3();
+ Error += test_vec4();
+ Error += test_quat();
+ Error += test_mat2x2();
+# endif//GLM_CONFIG_CONSTEXP == GLM_ENABLE
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_cpp_defaulted_ctor.cpp b/3rdparty/glm/source/test/core/core_cpp_defaulted_ctor.cpp
new file mode 100644
index 0000000..07afd9c
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_cpp_defaulted_ctor.cpp
@@ -0,0 +1,145 @@
+#include <glm/glm.hpp>
+
+#if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <cstring>
+
+static int test_vec_memcpy()
+{
+ int Error = 0;
+
+ {
+ glm::ivec1 const A = glm::ivec1(76);
+ glm::ivec1 B;
+ std::memcpy(&B, &A, sizeof(glm::ivec1));
+ Error += B == A ? 0 : 1;
+ }
+
+ {
+ glm::ivec2 const A = glm::ivec2(76);
+ glm::ivec2 B;
+ std::memcpy(&B, &A, sizeof(glm::ivec2));
+ Error += B == A ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 const A = glm::ivec3(76);
+ glm::ivec3 B;
+ std::memcpy(&B, &A, sizeof(glm::ivec3));
+ Error += B == A ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 const A = glm::ivec4(76);
+ glm::ivec4 B;
+ std::memcpy(&B, &A, sizeof(glm::ivec4));
+ Error += B == A ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_mat_memcpy()
+{
+ int Error = 0;
+
+ {
+ glm::mat2x2 const A = glm::mat2x2(76);
+ glm::mat2x2 B;
+ std::memcpy(&B, &A, sizeof(glm::mat2x2));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat2x3 const A = glm::mat2x3(76);
+ glm::mat2x3 B;
+ std::memcpy(&B, &A, sizeof(glm::mat2x3));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat2x4 const A = glm::mat2x4(76);
+ glm::mat2x4 B;
+ std::memcpy(&B, &A, sizeof(glm::mat2x4));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat3x2 const A = glm::mat3x2(76);
+ glm::mat3x2 B;
+ std::memcpy(&B, &A, sizeof(glm::mat3x2));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat3x3 const A = glm::mat3x3(76);
+ glm::mat3x3 B;
+ std::memcpy(&B, &A, sizeof(glm::mat3x3));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat3x4 const A = glm::mat3x4(76);
+ glm::mat3x4 B;
+ std::memcpy(&B, &A, sizeof(glm::mat3x4));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat4x2 const A = glm::mat4x2(76);
+ glm::mat4x2 B;
+ std::memcpy(&B, &A, sizeof(glm::mat4x2));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat4x3 const A = glm::mat4x3(76);
+ glm::mat4x3 B;
+ std::memcpy(&B, &A, sizeof(glm::mat4x3));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat4x4 const A = glm::mat4x4(76);
+ glm::mat4x4 B;
+ std::memcpy(&B, &A, sizeof(glm::mat4x4));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_quat_memcpy()
+{
+ int Error = 0;
+
+ {
+ glm::quat const A = glm::quat(1, 0, 0, 0);
+ glm::quat B;
+ std::memcpy(&B, &A, sizeof(glm::quat));
+ Error += glm::all(glm::equal(B, A, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+#endif//GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+
+int main()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+ Error += test_vec_memcpy();
+ Error += test_mat_memcpy();
+ Error += test_quat_memcpy();
+# endif//GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_aligned_gentypes.cpp b/3rdparty/glm/source/test/core/core_force_aligned_gentypes.cpp
new file mode 100644
index 0000000..70713c4
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_aligned_gentypes.cpp
@@ -0,0 +1,10 @@
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_arch_unknown.cpp b/3rdparty/glm/source/test/core/core_force_arch_unknown.cpp
new file mode 100644
index 0000000..45b51bf
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_arch_unknown.cpp
@@ -0,0 +1,14 @@
+#ifndef GLM_FORCE_ARCH_UNKNOWN
+# define GLM_FORCE_ARCH_UNKNOWN
+#endif
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_compiler_unknown.cpp b/3rdparty/glm/source/test/core/core_force_compiler_unknown.cpp
new file mode 100644
index 0000000..44d7fc3
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_compiler_unknown.cpp
@@ -0,0 +1,14 @@
+#ifndef GLM_FORCE_COMPILER_UNKNOWN
+# define GLM_FORCE_COMPILER_UNKNOWN
+#endif
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_ctor_init.cpp b/3rdparty/glm/source/test/core/core_force_ctor_init.cpp
new file mode 100644
index 0000000..298b7ed
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_ctor_init.cpp
@@ -0,0 +1,139 @@
+#define GLM_FORCE_CTOR_INIT
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+static int test_vec()
+{
+ int Error = 0;
+
+ glm::vec1 V1;
+ Error += glm::all(glm::equal(V1, glm::vec1(0), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dvec1 U1;
+ Error += glm::all(glm::equal(U1, glm::dvec1(0), glm::epsilon<double>())) ? 0 : 1;
+
+ glm::vec2 V2;
+ Error += glm::all(glm::equal(V2, glm::vec2(0, 0), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dvec2 U2;
+ Error += glm::all(glm::equal(U2, glm::dvec2(0, 0), glm::epsilon<double>())) ? 0 : 1;
+
+ glm::vec3 V3;
+ Error += glm::all(glm::equal(V3, glm::vec3(0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dvec3 U3;
+ Error += glm::all(glm::equal(U3, glm::dvec3(0, 0, 0), glm::epsilon<double>())) ? 0 : 1;
+
+ glm::vec4 V4;
+ Error += glm::all(glm::equal(V4, glm::vec4(0, 0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dvec4 U4;
+ Error += glm::all(glm::equal(U4, glm::dvec4(0, 0, 0, 0), glm::epsilon<double>())) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_mat()
+{
+ int Error = 0;
+
+ {
+ glm::mat2x2 F;
+ Error += glm::all(glm::equal(F, glm::mat2x2(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat2x2 D;
+ Error += glm::all(glm::equal(D, glm::dmat2x2(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat2x3 F;
+ Error += glm::all(glm::equal(F, glm::mat2x3(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat2x3 D;
+ Error += glm::all(glm::equal(D, glm::dmat2x3(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat2x4 F;
+ Error += glm::all(glm::equal(F, glm::mat2x4(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat2x4 D;
+ Error += glm::all(glm::equal(D, glm::dmat2x4(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat3x2 F;
+ Error += glm::all(glm::equal(F, glm::mat3x2(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat3x2 D;
+ Error += glm::all(glm::equal(D, glm::dmat3x2(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat3x3 F;
+ Error += glm::all(glm::equal(F, glm::mat3x3(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat3x3 D;
+ Error += glm::all(glm::equal(D, glm::dmat3x3(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat3x4 F;
+ Error += glm::all(glm::equal(F, glm::mat3x4(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat3x4 D;
+ Error += glm::all(glm::equal(D, glm::dmat3x4(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat4x2 F;
+ Error += glm::all(glm::equal(F, glm::mat4x2(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat4x2 D;
+ Error += glm::all(glm::equal(D, glm::dmat4x2(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat4x3 F;
+ Error += glm::all(glm::equal(F, glm::mat4x3(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat4x3 D;
+ Error += glm::all(glm::equal(D, glm::dmat4x3(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat4x4 F;
+ Error += glm::all(glm::equal(F, glm::mat4x4(1), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dmat4x4 D;
+ Error += glm::all(glm::equal(D, glm::dmat4x4(1), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_qua()
+{
+ int Error = 0;
+
+ glm::quat F;
+ Error += glm::all(glm::equal(F, glm::quat(1, 0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::dquat D;
+ Error += glm::all(glm::equal(D, glm::dquat(1, 0, 0, 0), glm::epsilon<double>())) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_vec();
+ Error += test_mat();
+ Error += test_qua();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_cxx03.cpp b/3rdparty/glm/source/test/core/core_force_cxx03.cpp
new file mode 100644
index 0000000..fc6e9c5
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_cxx03.cpp
@@ -0,0 +1,14 @@
+#ifndef GLM_FORCE_CXX03
+# define GLM_FORCE_CXX03
+#endif
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_cxx98.cpp b/3rdparty/glm/source/test/core/core_force_cxx98.cpp
new file mode 100644
index 0000000..42a5c25
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_cxx98.cpp
@@ -0,0 +1,14 @@
+#ifndef GLM_FORCE_CXX98
+# define GLM_FORCE_CXX98
+#endif
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_cxx_unknown.cpp b/3rdparty/glm/source/test/core/core_force_cxx_unknown.cpp
new file mode 100644
index 0000000..62299d6
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_cxx_unknown.cpp
@@ -0,0 +1,14 @@
+#ifndef GLM_FORCE_CXX_UNKNOWN
+# define GLM_FORCE_CXX_UNKNOWN
+#endif
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_depth_zero_to_one.cpp b/3rdparty/glm/source/test/core/core_force_depth_zero_to_one.cpp
new file mode 100644
index 0000000..23b3615
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_depth_zero_to_one.cpp
@@ -0,0 +1,12 @@
+#define GLM_FORCE_DEPTH_ZERO_TO_ONE
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_explicit_ctor.cpp b/3rdparty/glm/source/test/core/core_force_explicit_ctor.cpp
new file mode 100644
index 0000000..7af5b79
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_explicit_ctor.cpp
@@ -0,0 +1,17 @@
+#define GLM_FORCE_EXPLICIT_CTOR
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ glm::ivec4 B(1);
+ Error += B == glm::ivec4(1) ? 0 : 1;
+
+ //glm::vec4 A = B;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_inline.cpp b/3rdparty/glm/source/test/core/core_force_inline.cpp
new file mode 100644
index 0000000..cd23fd9
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_inline.cpp
@@ -0,0 +1,12 @@
+#define GLM_FORCE_INLINE
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_left_handed.cpp b/3rdparty/glm/source/test/core/core_force_left_handed.cpp
new file mode 100644
index 0000000..b7ec31b
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_left_handed.cpp
@@ -0,0 +1,12 @@
+#define GLM_FORCE_LEFT_HANDED
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_platform_unknown.cpp b/3rdparty/glm/source/test/core/core_force_platform_unknown.cpp
new file mode 100644
index 0000000..fb7fa75
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_platform_unknown.cpp
@@ -0,0 +1,14 @@
+#ifndef GLM_FORCE_PLATFORM_UNKNOWN
+# define GLM_FORCE_PLATFORM_UNKNOWN
+#endif
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_pure.cpp b/3rdparty/glm/source/test/core/core_force_pure.cpp
new file mode 100644
index 0000000..a32a4ed
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_pure.cpp
@@ -0,0 +1,434 @@
+#ifndef GLM_FORCE_PURE
+# define GLM_FORCE_PURE
+#endif//GLM_FORCE_PURE
+#define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES
+#define GLM_FORCE_SWIZZLE
+#include <glm/ext/vector_relational.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/vec2.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec4.hpp>
+#include <ctime>
+#include <vector>
+
+static int test_vec4_ctor()
+{
+ int Error = 0;
+
+ {
+ glm::ivec4 A(1, 2, 3, 4);
+ glm::ivec4 B(A);
+ Error += glm::all(glm::equal(A, B)) ? 0 : 1;
+ }
+
+# if GLM_HAS_TRIVIAL_QUERIES
+ // Error += std::is_trivially_default_constructible<glm::vec4>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::vec4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::vec4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::dvec4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::ivec4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::uvec4>::value ? 0 : 1;
+
+ Error += std::is_copy_constructible<glm::vec4>::value ? 0 : 1;
+# endif
+
+#if GLM_HAS_INITIALIZER_LISTS
+ {
+ glm::vec4 a{ 0, 1, 2, 3 };
+ std::vector<glm::vec4> v = {
+ {0, 1, 2, 3},
+ {4, 5, 6, 7},
+ {8, 9, 0, 1}};
+ }
+
+ {
+ glm::dvec4 a{ 0, 1, 2, 3 };
+ std::vector<glm::dvec4> v = {
+ {0, 1, 2, 3},
+ {4, 5, 6, 7},
+ {8, 9, 0, 1}};
+ }
+#endif
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::ivec4 A = glm::vec4(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = A.xyzw;
+ glm::ivec4 C(A.xyzw);
+ glm::ivec4 D(A.xyzw());
+ glm::ivec4 E(A.x, A.yzw);
+ glm::ivec4 F(A.x, A.yzw());
+ glm::ivec4 G(A.xyz, A.w);
+ glm::ivec4 H(A.xyz(), A.w);
+ glm::ivec4 I(A.xy, A.zw);
+ glm::ivec4 J(A.xy(), A.zw());
+ glm::ivec4 K(A.x, A.y, A.zw);
+ glm::ivec4 L(A.x, A.yz, A.w);
+ glm::ivec4 M(A.xy, A.z, A.w);
+
+ Error += glm::all(glm::equal(A, B)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, D)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, E)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, F)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, G)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, H)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, I)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, J)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, K)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, L)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, M)) ? 0 : 1;
+ }
+# endif
+
+# if GLM_CONFIG_SWIZZLE
+ {
+ glm::ivec4 A = glm::vec4(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = A.xyzw();
+ glm::ivec4 C(A.xyzw());
+ glm::ivec4 D(A.xyzw());
+ glm::ivec4 E(A.x, A.yzw());
+ glm::ivec4 F(A.x, A.yzw());
+ glm::ivec4 G(A.xyz(), A.w);
+ glm::ivec4 H(A.xyz(), A.w);
+ glm::ivec4 I(A.xy(), A.zw());
+ glm::ivec4 J(A.xy(), A.zw());
+ glm::ivec4 K(A.x, A.y, A.zw());
+ glm::ivec4 L(A.x, A.yz(), A.w);
+ glm::ivec4 M(A.xy(), A.z, A.w);
+
+ Error += glm::all(glm::equal(A, B)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, D)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, E)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, F)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, G)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, H)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, I)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, J)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, K)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, L)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, M)) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE
+
+ {
+ glm::ivec4 A(1);
+ glm::ivec4 B(1, 1, 1, 1);
+
+ Error += A == B ? 0 : 1;
+ }
+
+ {
+ std::vector<glm::ivec4> Tests;
+ Tests.push_back(glm::ivec4(glm::ivec2(1, 2), 3, 4));
+ Tests.push_back(glm::ivec4(1, glm::ivec2(2, 3), 4));
+ Tests.push_back(glm::ivec4(1, 2, glm::ivec2(3, 4)));
+ Tests.push_back(glm::ivec4(glm::ivec3(1, 2, 3), 4));
+ Tests.push_back(glm::ivec4(1, glm::ivec3(2, 3, 4)));
+ Tests.push_back(glm::ivec4(glm::ivec2(1, 2), glm::ivec2(3, 4)));
+ Tests.push_back(glm::ivec4(1, 2, 3, 4));
+ Tests.push_back(glm::ivec4(glm::ivec4(1, 2, 3, 4)));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ Error += Tests[i] == glm::ivec4(1, 2, 3, 4) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_bvec4_ctor()
+{
+ int Error = 0;
+
+ glm::bvec4 const A(true);
+ glm::bvec4 const B(true);
+ glm::bvec4 const C(false);
+ glm::bvec4 const D = A && B;
+ glm::bvec4 const E = A && C;
+ glm::bvec4 const F = A || C;
+
+ Error += D == glm::bvec4(true) ? 0 : 1;
+ Error += E == glm::bvec4(false) ? 0 : 1;
+ Error += F == glm::bvec4(true) ? 0 : 1;
+
+ bool const G = A == C;
+ bool const H = A != C;
+
+ Error += !G ? 0 : 1;
+ Error += H ? 0 : 1;
+
+ return Error;
+}
+
+static int test_vec4_operators()
+{
+ int Error = 0;
+
+ {
+ glm::ivec4 A(1);
+ glm::ivec4 B(1);
+ bool R = A != B;
+ bool S = A == B;
+
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 const A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::vec4 const B(4.0f, 5.0f, 6.0f, 7.0f);
+
+ glm::vec4 const C = A + B;
+ Error += glm::all(glm::equal(C, glm::vec4(5, 7, 9, 11), 0.001f)) ? 0 : 1;
+
+ glm::vec4 D = B - A;
+ Error += glm::all(glm::equal(D, glm::vec4(3, 3, 3, 3), 0.001f)) ? 0 : 1;
+
+ glm::vec4 E = A * B;
+ Error += glm::all(glm::equal(E, glm::vec4(4, 10, 18, 28), 0.001f)) ? 0 : 1;
+
+ glm::vec4 F = B / A;
+ Error += glm::all(glm::equal(F, glm::vec4(4, 2.5, 2, 7.0f / 4.0f), 0.001f)) ? 0 : 1;
+
+ glm::vec4 G = A + 1.0f;
+ Error += glm::all(glm::equal(G, glm::vec4(2, 3, 4, 5), 0.001f)) ? 0 : 1;
+
+ glm::vec4 H = B - 1.0f;
+ Error += glm::all(glm::equal(H, glm::vec4(3, 4, 5, 6), 0.001f)) ? 0 : 1;
+
+ glm::vec4 I = A * 2.0f;
+ Error += glm::all(glm::equal(I, glm::vec4(2, 4, 6, 8), 0.001f)) ? 0 : 1;
+
+ glm::vec4 J = B / 2.0f;
+ Error += glm::all(glm::equal(J, glm::vec4(2, 2.5, 3, 3.5), 0.001f)) ? 0 : 1;
+
+ glm::vec4 K = 1.0f + A;
+ Error += glm::all(glm::equal(K, glm::vec4(2, 3, 4, 5), 0.001f)) ? 0 : 1;
+
+ glm::vec4 L = 1.0f - B;
+ Error += glm::all(glm::equal(L, glm::vec4(-3, -4, -5, -6), 0.001f)) ? 0 : 1;
+
+ glm::vec4 M = 2.0f * A;
+ Error += glm::all(glm::equal(M, glm::vec4(2, 4, 6, 8), 0.001f)) ? 0 : 1;
+
+ glm::vec4 const N = 2.0f / B;
+ Error += glm::all(glm::equal(N, glm::vec4(0.5, 2.0 / 5.0, 2.0 / 6.0, 2.0 / 7.0), 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B(4.0f, 5.0f, 6.0f, 7.0f);
+
+ A += B;
+ Error += A == glm::ivec4(5, 7, 9, 11) ? 0 : 1;
+
+ A += 1;
+ Error += A == glm::ivec4(6, 8, 10, 12) ? 0 : 1;
+ }
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B(4.0f, 5.0f, 6.0f, 7.0f);
+
+ B -= A;
+ Error += B == glm::ivec4(3, 3, 3, 3) ? 0 : 1;
+
+ B -= 1;
+ Error += B == glm::ivec4(2, 2, 2, 2) ? 0 : 1;
+ }
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B(4.0f, 5.0f, 6.0f, 7.0f);
+
+ A *= B;
+ Error += A == glm::ivec4(4, 10, 18, 28) ? 0 : 1;
+
+ A *= 2;
+ Error += A == glm::ivec4(8, 20, 36, 56) ? 0 : 1;
+ }
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B(4.0f, 4.0f, 6.0f, 8.0f);
+
+ B /= A;
+ Error += B == glm::ivec4(4, 2, 2, 2) ? 0 : 1;
+
+ B /= 2;
+ Error += B == glm::ivec4(2, 1, 1, 1) ? 0 : 1;
+ }
+ {
+ glm::ivec4 B(2);
+
+ B /= B.y;
+ Error += B == glm::ivec4(1) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = -A;
+ Error += B == glm::ivec4(-1.0f, -2.0f, -3.0f, -4.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = --A;
+ Error += B == glm::ivec4(0.0f, 1.0f, 2.0f, 3.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = A--;
+ Error += B == glm::ivec4(1.0f, 2.0f, 3.0f, 4.0f) ? 0 : 1;
+ Error += A == glm::ivec4(0.0f, 1.0f, 2.0f, 3.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = ++A;
+ Error += B == glm::ivec4(2.0f, 3.0f, 4.0f, 5.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = A++;
+ Error += B == glm::ivec4(1.0f, 2.0f, 3.0f, 4.0f) ? 0 : 1;
+ Error += A == glm::ivec4(2.0f, 3.0f, 4.0f, 5.0f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_vec4_equal()
+{
+ int Error = 0;
+
+ {
+ glm::uvec4 const A(1, 2, 3, 4);
+ glm::uvec4 const B(1, 2, 3, 4);
+ Error += A == B ? 0 : 1;
+ Error += A != B ? 1 : 0;
+ }
+
+ {
+ glm::ivec4 const A(1, 2, 3, 4);
+ glm::ivec4 const B(1, 2, 3, 4);
+ Error += A == B ? 0 : 1;
+ Error += A != B ? 1 : 0;
+ }
+
+ return Error;
+}
+
+static int test_vec4_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::vec4) == sizeof(glm::lowp_vec4) ? 0 : 1;
+ Error += sizeof(glm::vec4) == sizeof(glm::mediump_vec4) ? 0 : 1;
+ Error += sizeof(glm::vec4) == sizeof(glm::highp_vec4) ? 0 : 1;
+ Error += 16 == sizeof(glm::mediump_vec4) ? 0 : 1;
+ Error += sizeof(glm::dvec4) == sizeof(glm::lowp_dvec4) ? 0 : 1;
+ Error += sizeof(glm::dvec4) == sizeof(glm::mediump_dvec4) ? 0 : 1;
+ Error += sizeof(glm::dvec4) == sizeof(glm::highp_dvec4) ? 0 : 1;
+ Error += 32 == sizeof(glm::highp_dvec4) ? 0 : 1;
+ Error += glm::vec4().length() == 4 ? 0 : 1;
+ Error += glm::dvec4().length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_vec4_swizzle_partial()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+ glm::ivec4 A(1, 2, 3, 4);
+
+ {
+ glm::ivec4 B(A.xy, A.zw);
+ Error += A == B ? 0 : 1;
+ }
+ {
+ glm::ivec4 B(A.xy, 3, 4);
+ Error += A == B ? 0 : 1;
+ }
+ {
+ glm::ivec4 B(1, A.yz, 4);
+ Error += A == B ? 0 : 1;
+ }
+ {
+ glm::ivec4 B(1, 2, A.zw);
+ Error += A == B ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 B(A.xyz, 4);
+ Error += A == B ? 0 : 1;
+ }
+ {
+ glm::ivec4 B(1, A.yzw);
+ Error += A == B ? 0 : 1;
+ }
+# endif
+
+ return Error;
+}
+
+static int test_operator_increment()
+{
+ int Error(0);
+
+ glm::ivec4 v0(1);
+ glm::ivec4 v1(v0);
+ glm::ivec4 v2(v0);
+ glm::ivec4 v3 = ++v1;
+ glm::ivec4 v4 = v2++;
+
+ Error += glm::all(glm::equal(v0, v4)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v2)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v3)) ? 0 : 1;
+
+ int i0(1);
+ int i1(i0);
+ int i2(i0);
+ int i3 = ++i1;
+ int i4 = i2++;
+
+ Error += i0 == i4 ? 0 : 1;
+ Error += i1 == i2 ? 0 : 1;
+ Error += i1 == i3 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_vec4_simd()
+{
+ int Error = 0;
+
+ glm::vec4 const a(std::clock(), std::clock(), std::clock(), std::clock());
+ glm::vec4 const b(std::clock(), std::clock(), std::clock(), std::clock());
+
+ glm::vec4 const c(b * a);
+ glm::vec4 const d(a + c);
+
+ Error += glm::all(glm::greaterThanEqual(d, glm::vec4(0))) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_vec4_ctor();
+ Error += test_bvec4_ctor();
+ Error += test_vec4_size();
+ Error += test_vec4_operators();
+ Error += test_vec4_equal();
+ Error += test_vec4_swizzle_partial();
+ Error += test_vec4_simd();
+ Error += test_operator_increment();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_quat_xyzw.cpp b/3rdparty/glm/source/test/core/core_force_quat_xyzw.cpp
new file mode 100644
index 0000000..7d5281c
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_quat_xyzw.cpp
@@ -0,0 +1,13 @@
+#define GLM_FORCE_QUAT_DATA_XYZW
+#define GLM_FORCE_INLINE
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_size_t_length.cpp b/3rdparty/glm/source/test/core/core_force_size_t_length.cpp
new file mode 100644
index 0000000..19dac89
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_size_t_length.cpp
@@ -0,0 +1,12 @@
+#define GLM_FORCE_SIZE_T_LENGTH
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_unrestricted_gentype.cpp b/3rdparty/glm/source/test/core/core_force_unrestricted_gentype.cpp
new file mode 100644
index 0000000..21d6e52
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_unrestricted_gentype.cpp
@@ -0,0 +1,12 @@
+#define GLM_FORCE_UNRESTRICTED_GENTYPE
+
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_force_xyzw_only.cpp b/3rdparty/glm/source/test/core/core_force_xyzw_only.cpp
new file mode 100644
index 0000000..d19509d
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_force_xyzw_only.cpp
@@ -0,0 +1,58 @@
+#define GLM_FORCE_XYZW_ONLY
+
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/vec2.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec4.hpp>
+
+static int test_comp()
+{
+ int Error = 0;
+
+ {
+ glm::ivec1 const A(1);
+ Error += A.x == 1 ? 0 : 1;
+ }
+
+ {
+ glm::ivec2 const A(1, 2);
+ Error += A.x == 1 ? 0 : 1;
+ Error += A.y == 2 ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 const A(1, 2, 3);
+ Error += A.x == 1 ? 0 : 1;
+ Error += A.y == 2 ? 0 : 1;
+ Error += A.z == 3 ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 const A(1, 2, 3, 4);
+ Error += A.x == 1 ? 0 : 1;
+ Error += A.y == 2 ? 0 : 1;
+ Error += A.z == 3 ? 0 : 1;
+ Error += A.w == 4 ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+ int Error = 0;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+ Error += test_constexpr();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_func_common.cpp b/3rdparty/glm/source/test/core/core_func_common.cpp
new file mode 100644
index 0000000..b8640de
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_common.cpp
@@ -0,0 +1,1349 @@
+#define GLM_FORCE_EXPLICIT_CTOR
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/random.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_float1.hpp>
+#include <glm/common.hpp>
+#include <glm/vec4.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec2.hpp>
+#include <vector>
+#include <cstdio>
+#include <cmath>
+#include <ctime>
+
+// This file has divisions by zero to test isnan
+#if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(disable : 4723)
+#endif
+
+namespace floor_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ {
+ float A = 1.1f;
+ float B = glm::floor(A);
+ Error += glm::equal(B, 1.f, 0.0001f) ? 0 : 1;
+ }
+
+ {
+ double A = 1.1;
+ double B = glm::floor(A);
+ Error += glm::equal(B, 1.0, 0.0001) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 A(1.1f);
+ glm::vec1 B = glm::floor(A);
+
+ Error += glm::all(glm::equal(B, glm::vec1(1.0), 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::dvec1 A(1.1);
+ glm::dvec1 B = glm::floor(A);
+
+ Error += glm::all(glm::equal(B, glm::dvec1(1.0), 0.0001)) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.1f);
+ glm::vec2 B = glm::floor(A);
+
+ Error += glm::all(glm::equal(B, glm::vec2(1.0), 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::dvec2 A(1.1);
+ glm::dvec2 B = glm::floor(A);
+
+ Error += glm::all(glm::equal(B, glm::dvec2(1.0), 0.0001)) ? 0 : 1;
+ }
+
+ {
+ glm::vec3 A(1.1f);
+ glm::vec3 B = glm::floor(A);
+
+ Error += glm::all(glm::equal(B, glm::vec3(1.0), 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::dvec3 A(1.1);
+ glm::dvec3 B = glm::floor(A);
+
+ Error += glm::all(glm::equal(B, glm::dvec3(1.0), 0.0001)) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 A(1.1f);
+ glm::vec4 B = glm::floor(A);
+
+ Error += glm::all(glm::equal(B, glm::vec4(1.0), 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::dvec4 A(1.1);
+ glm::dvec4 B = glm::floor(A);
+
+ Error += glm::all(glm::equal(B, glm::dvec4(1.0), 0.0001)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace floor
+
+namespace modf_
+{
+ static int test()
+ {
+ int Error(0);
+
+ {
+ float X(1.5f);
+ float I(0.0f);
+ float A = glm::modf(X, I);
+
+ Error += glm::equal(I, 1.0f, 0.0001f) ? 0 : 1;
+ Error += glm::equal(A, 0.5f, 0.0001f) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 X(1.1f, 1.2f, 1.5f, 1.7f);
+ glm::vec4 I(0.0f);
+ glm::vec4 A = glm::modf(X, I);
+
+ Error += glm::ivec4(I) == glm::ivec4(1) ? 0 : 1;
+ Error += glm::all(glm::equal(A, glm::vec4(0.1f, 0.2f, 0.5f, 0.7f), 0.00001f)) ? 0 : 1;
+ }
+
+ {
+ glm::dvec4 X(1.1, 1.2, 1.5, 1.7);
+ glm::dvec4 I(0.0);
+ glm::dvec4 A = glm::modf(X, I);
+
+ Error += glm::ivec4(I) == glm::ivec4(1) ? 0 : 1;
+ Error += glm::all(glm::equal(A, glm::dvec4(0.1, 0.2, 0.5, 0.7), 0.000000001)) ? 0 : 1;
+ }
+
+ {
+ double X(1.5);
+ double I(0.0);
+ double A = glm::modf(X, I);
+
+ Error += glm::equal(I, 1.0, 0.0001) ? 0 : 1;
+ Error += glm::equal(A, 0.5, 0.0001) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace modf
+
+namespace mod_
+{
+ static int test()
+ {
+ int Error(0);
+
+ {
+ float A(1.5f);
+ float B(1.0f);
+ float C = glm::mod(A, B);
+
+ Error += glm::equal(C, 0.5f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ float A(-0.2f);
+ float B(1.0f);
+ float C = glm::mod(A, B);
+
+ Error += glm::equal(C, 0.8f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ float A(3.0);
+ float B(2.0f);
+ float C = glm::mod(A, B);
+
+ Error += glm::equal(C, 1.0f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 A(3.0);
+ float B(2.0f);
+ glm::vec4 C = glm::mod(A, B);
+
+ Error += glm::all(glm::equal(C, glm::vec4(1.0f), 0.00001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 A(3.0);
+ glm::vec4 B(2.0f);
+ glm::vec4 C = glm::mod(A, B);
+
+ Error += glm::all(glm::equal(C, glm::vec4(1.0f), 0.00001f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace mod_
+
+namespace floatBitsToInt
+{
+ static int test()
+ {
+ int Error = 0;
+
+ {
+ float A = 1.0f;
+ int B = glm::floatBitsToInt(A);
+ float C = glm::intBitsToFloat(B);
+ Error += glm::equal(A, C, 0.0001f) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f, 2.0f);
+ glm::ivec2 B = glm::floatBitsToInt(A);
+ glm::vec2 C = glm::intBitsToFloat(B);
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B = glm::floatBitsToInt(A);
+ glm::vec3 C = glm::intBitsToFloat(B);
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = glm::floatBitsToInt(A);
+ glm::vec4 C = glm::intBitsToFloat(B);
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace floatBitsToInt
+
+namespace floatBitsToUint
+{
+ static int test()
+ {
+ int Error = 0;
+
+ {
+ float A = 1.0f;
+ glm::uint B = glm::floatBitsToUint(A);
+ float C = glm::uintBitsToFloat(B);
+ Error += glm::equal(A, C, 0.0001f) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f, 2.0f);
+ glm::uvec2 B = glm::floatBitsToUint(A);
+ glm::vec2 C = glm::uintBitsToFloat(B);
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec3 A(1.0f, 2.0f, 3.0f);
+ glm::uvec3 B = glm::floatBitsToUint(A);
+ glm::vec3 C = glm::uintBitsToFloat(B);
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::uvec4 B = glm::floatBitsToUint(A);
+ glm::vec4 C = glm::uintBitsToFloat(B);
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace floatBitsToUint
+
+namespace min_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ glm::vec1 A0 = glm::min(glm::vec1(1), glm::vec1(1));
+ bool A1 = glm::all(glm::equal(A0, glm::vec1(1), glm::epsilon<float>()));
+ Error += A1 ? 0 : 1;
+
+ glm::vec2 B0 = glm::min(glm::vec2(1), glm::vec2(1));
+ glm::vec2 B1 = glm::min(glm::vec2(1), 1.0f);
+ bool B2 = glm::all(glm::equal(B0, B1, glm::epsilon<float>()));
+ Error += B2 ? 0 : 1;
+
+ glm::vec3 C0 = glm::min(glm::vec3(1), glm::vec3(1));
+ glm::vec3 C1 = glm::min(glm::vec3(1), 1.0f);
+ bool C2 = glm::all(glm::equal(C0, C1, glm::epsilon<float>()));
+ Error += C2 ? 0 : 1;
+
+ glm::vec4 D0 = glm::min(glm::vec4(1), glm::vec4(1));
+ glm::vec4 D1 = glm::min(glm::vec4(1), 1.0f);
+ bool D2 = glm::all(glm::equal(D0, D1, glm::epsilon<float>()));
+ Error += D2 ? 0 : 1;
+
+ return Error;
+ }
+
+ int min_tern(int a, int b)
+ {
+ return a < b ? a : b;
+ }
+
+ int min_int(int x, int y)
+ {
+ return y ^ ((x ^ y) & -(x < y));
+ }
+
+ static int perf(std::size_t Count)
+ {
+ std::vector<int> A(Count);
+ std::vector<int> B(Count);
+
+ std::size_t const InternalCount = 200000;
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ A[i] = glm::linearRand(-1000, 1000);
+ B[i] = glm::linearRand(-1000, 1000);
+ }
+
+ int Error = 0;
+
+ glm::int32 SumA = 0;
+ {
+ std::clock_t Timestamp0 = std::clock();
+
+ for (std::size_t j = 0; j < InternalCount; ++j)
+ for (std::size_t i = 0; i < Count; ++i)
+ SumA += min_tern(A[i], B[i]);
+
+ std::clock_t Timestamp1 = std::clock();
+
+ std::printf("min_tern Time %d clocks\n", static_cast<int>(Timestamp1 - Timestamp0));
+ }
+
+ glm::int32 SumB = 0;
+ {
+ std::clock_t Timestamp0 = std::clock();
+
+ for (std::size_t j = 0; j < InternalCount; ++j)
+ for (std::size_t i = 0; i < Count; ++i)
+ SumB += min_int(A[i], B[i]);
+
+ std::clock_t Timestamp1 = std::clock();
+
+ std::printf("min_int Time %d clocks\n", static_cast<int>(Timestamp1 - Timestamp0));
+ }
+
+ Error += SumA == SumB ? 0 : 1;
+
+ return Error;
+ }
+}//namespace min_
+
+namespace max_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ glm::vec1 A0 = glm::max(glm::vec1(1), glm::vec1(1));
+ bool A1 = glm::all(glm::equal(A0, glm::vec1(1), glm::epsilon<float>()));
+ Error += A1 ? 0 : 1;
+
+
+ glm::vec2 B0 = glm::max(glm::vec2(1), glm::vec2(1));
+ glm::vec2 B1 = glm::max(glm::vec2(1), 1.0f);
+ bool B2 = glm::all(glm::equal(B0, B1, glm::epsilon<float>()));
+ Error += B2 ? 0 : 1;
+
+ glm::vec3 C0 = glm::max(glm::vec3(1), glm::vec3(1));
+ glm::vec3 C1 = glm::max(glm::vec3(1), 1.0f);
+ bool C2 = glm::all(glm::equal(C0, C1, glm::epsilon<float>()));
+ Error += C2 ? 0 : 1;
+
+ glm::vec4 D0 = glm::max(glm::vec4(1), glm::vec4(1));
+ glm::vec4 D1 = glm::max(glm::vec4(1), 1.0f);
+ bool D2 = glm::all(glm::equal(D0, D1, glm::epsilon<float>()));
+ Error += D2 ? 0 : 1;
+
+ return Error;
+ }
+}//namespace max_
+
+namespace clamp_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ return Error;
+ }
+}//namespace clamp_
+
+namespace mix_
+{
+ template<typename T, typename B>
+ struct entry
+ {
+ T x;
+ T y;
+ B a;
+ T Result;
+ };
+
+ entry<float, bool> const TestBool[] =
+ {
+ {0.0f, 1.0f, false, 0.0f},
+ {0.0f, 1.0f, true, 1.0f},
+ {-1.0f, 1.0f, false, -1.0f},
+ {-1.0f, 1.0f, true, 1.0f}
+ };
+
+ entry<float, float> const TestFloat[] =
+ {
+ {0.0f, 1.0f, 0.0f, 0.0f},
+ {0.0f, 1.0f, 1.0f, 1.0f},
+ {-1.0f, 1.0f, 0.0f, -1.0f},
+ {-1.0f, 1.0f, 1.0f, 1.0f}
+ };
+
+ entry<glm::vec2, bool> const TestVec2Bool[] =
+ {
+ {glm::vec2(0.0f), glm::vec2(1.0f), false, glm::vec2(0.0f)},
+ {glm::vec2(0.0f), glm::vec2(1.0f), true, glm::vec2(1.0f)},
+ {glm::vec2(-1.0f), glm::vec2(1.0f), false, glm::vec2(-1.0f)},
+ {glm::vec2(-1.0f), glm::vec2(1.0f), true, glm::vec2(1.0f)}
+ };
+
+ entry<glm::vec2, glm::bvec2> const TestBVec2[] =
+ {
+ {glm::vec2(0.0f), glm::vec2(1.0f), glm::bvec2(false), glm::vec2(0.0f)},
+ {glm::vec2(0.0f), glm::vec2(1.0f), glm::bvec2(true), glm::vec2(1.0f)},
+ {glm::vec2(-1.0f), glm::vec2(1.0f), glm::bvec2(false), glm::vec2(-1.0f)},
+ {glm::vec2(-1.0f), glm::vec2(1.0f), glm::bvec2(true), glm::vec2(1.0f)},
+ {glm::vec2(-1.0f), glm::vec2(1.0f), glm::bvec2(true, false), glm::vec2(1.0f, -1.0f)}
+ };
+
+ entry<glm::vec3, bool> const TestVec3Bool[] =
+ {
+ {glm::vec3(0.0f), glm::vec3(1.0f), false, glm::vec3(0.0f)},
+ {glm::vec3(0.0f), glm::vec3(1.0f), true, glm::vec3(1.0f)},
+ {glm::vec3(-1.0f), glm::vec3(1.0f), false, glm::vec3(-1.0f)},
+ {glm::vec3(-1.0f), glm::vec3(1.0f), true, glm::vec3(1.0f)}
+ };
+
+ entry<glm::vec3, glm::bvec3> const TestBVec3[] =
+ {
+ {glm::vec3(0.0f), glm::vec3(1.0f), glm::bvec3(false), glm::vec3(0.0f)},
+ {glm::vec3(0.0f), glm::vec3(1.0f), glm::bvec3(true), glm::vec3(1.0f)},
+ {glm::vec3(-1.0f), glm::vec3(1.0f), glm::bvec3(false), glm::vec3(-1.0f)},
+ {glm::vec3(-1.0f), glm::vec3(1.0f), glm::bvec3(true), glm::vec3(1.0f)},
+ {glm::vec3(1.0f, 2.0f, 3.0f), glm::vec3(4.0f, 5.0f, 6.0f), glm::bvec3(true, false, true), glm::vec3(4.0f, 2.0f, 6.0f)}
+ };
+
+ entry<glm::vec4, bool> const TestVec4Bool[] =
+ {
+ {glm::vec4(0.0f), glm::vec4(1.0f), false, glm::vec4(0.0f)},
+ {glm::vec4(0.0f), glm::vec4(1.0f), true, glm::vec4(1.0f)},
+ {glm::vec4(-1.0f), glm::vec4(1.0f), false, glm::vec4(-1.0f)},
+ {glm::vec4(-1.0f), glm::vec4(1.0f), true, glm::vec4(1.0f)}
+ };
+
+ entry<glm::vec4, glm::bvec4> const TestBVec4[] =
+ {
+ {glm::vec4(0.0f, 0.0f, 1.0f, 1.0f), glm::vec4(2.0f, 2.0f, 3.0f, 3.0f), glm::bvec4(false, true, false, true), glm::vec4(0.0f, 2.0f, 1.0f, 3.0f)},
+ {glm::vec4(0.0f), glm::vec4(1.0f), glm::bvec4(true), glm::vec4(1.0f)},
+ {glm::vec4(-1.0f), glm::vec4(1.0f), glm::bvec4(false), glm::vec4(-1.0f)},
+ {glm::vec4(-1.0f), glm::vec4(1.0f), glm::bvec4(true), glm::vec4(1.0f)},
+ {glm::vec4(1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(5.0f, 6.0f, 7.0f, 8.0f), glm::bvec4(true, false, true, false), glm::vec4(5.0f, 2.0f, 7.0f, 4.0f)}
+ };
+
+ static int test()
+ {
+ int Error = 0;
+
+ // Float with bool
+ {
+ for(std::size_t i = 0; i < sizeof(TestBool) / sizeof(entry<float, bool>); ++i)
+ {
+ float Result = glm::mix(TestBool[i].x, TestBool[i].y, TestBool[i].a);
+ Error += glm::equal(Result, TestBool[i].Result, glm::epsilon<float>()) ? 0 : 1;
+ }
+ }
+
+ // Float with float
+ {
+ for(std::size_t i = 0; i < sizeof(TestFloat) / sizeof(entry<float, float>); ++i)
+ {
+ float Result = glm::mix(TestFloat[i].x, TestFloat[i].y, TestFloat[i].a);
+ Error += glm::equal(Result, TestFloat[i].Result, glm::epsilon<float>()) ? 0 : 1;
+ }
+ }
+
+ // vec2 with bool
+ {
+ for(std::size_t i = 0; i < sizeof(TestVec2Bool) / sizeof(entry<glm::vec2, bool>); ++i)
+ {
+ glm::vec2 Result = glm::mix(TestVec2Bool[i].x, TestVec2Bool[i].y, TestVec2Bool[i].a);
+ Error += glm::equal(Result.x, TestVec2Bool[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.y, TestVec2Bool[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
+ }
+ }
+
+ // vec2 with bvec2
+ {
+ for(std::size_t i = 0; i < sizeof(TestBVec2) / sizeof(entry<glm::vec2, glm::bvec2>); ++i)
+ {
+ glm::vec2 Result = glm::mix(TestBVec2[i].x, TestBVec2[i].y, TestBVec2[i].a);
+ Error += glm::equal(Result.x, TestBVec2[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.y, TestBVec2[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
+ }
+ }
+
+ // vec3 with bool
+ {
+ for(std::size_t i = 0; i < sizeof(TestVec3Bool) / sizeof(entry<glm::vec3, bool>); ++i)
+ {
+ glm::vec3 Result = glm::mix(TestVec3Bool[i].x, TestVec3Bool[i].y, TestVec3Bool[i].a);
+ Error += glm::equal(Result.x, TestVec3Bool[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.y, TestVec3Bool[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.z, TestVec3Bool[i].Result.z, glm::epsilon<float>()) ? 0 : 1;
+ }
+ }
+
+ // vec3 with bvec3
+ {
+ for(std::size_t i = 0; i < sizeof(TestBVec3) / sizeof(entry<glm::vec3, glm::bvec3>); ++i)
+ {
+ glm::vec3 Result = glm::mix(TestBVec3[i].x, TestBVec3[i].y, TestBVec3[i].a);
+ Error += glm::equal(Result.x, TestBVec3[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.y, TestBVec3[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.z, TestBVec3[i].Result.z, glm::epsilon<float>()) ? 0 : 1;
+ }
+ }
+
+ // vec4 with bool
+ {
+ for(std::size_t i = 0; i < sizeof(TestVec4Bool) / sizeof(entry<glm::vec4, bool>); ++i)
+ {
+ glm::vec4 Result = glm::mix(TestVec4Bool[i].x, TestVec4Bool[i].y, TestVec4Bool[i].a);
+ Error += glm::equal(Result.x, TestVec4Bool[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.y, TestVec4Bool[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.z, TestVec4Bool[i].Result.z, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.w, TestVec4Bool[i].Result.w, glm::epsilon<float>()) ? 0 : 1;
+ }
+ }
+
+ // vec4 with bvec4
+ {
+ for(std::size_t i = 0; i < sizeof(TestBVec4) / sizeof(entry<glm::vec4, glm::bvec4>); ++i)
+ {
+ glm::vec4 Result = glm::mix(TestBVec4[i].x, TestBVec4[i].y, TestBVec4[i].a);
+ Error += glm::equal(Result.x, TestBVec4[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.y, TestBVec4[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.z, TestBVec4[i].Result.z, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(Result.w, TestBVec4[i].Result.w, glm::epsilon<float>()) ? 0 : 1;
+ }
+ }
+
+ return Error;
+ }
+}//namespace mix_
+
+namespace step_
+{
+ template<typename EDGE, typename VEC>
+ struct entry
+ {
+ EDGE edge;
+ VEC x;
+ VEC result;
+ };
+
+ entry<float, glm::vec4> TestVec4Scalar [] =
+ {
+ { 1.0f, glm::vec4(1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(1.0f) },
+ { 0.0f, glm::vec4(1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(1.0f) },
+ { 0.0f, glm::vec4(-1.0f, -2.0f, -3.0f, -4.0f), glm::vec4(0.0f) }
+ };
+
+ entry<glm::vec4, glm::vec4> TestVec4Vector [] =
+ {
+ { glm::vec4(-1.0f, -2.0f, -3.0f, -4.0f), glm::vec4(-2.0f, -3.0f, -4.0f, -5.0f), glm::vec4(0.0f) },
+ { glm::vec4( 0.0f, 1.0f, 2.0f, 3.0f), glm::vec4( 1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(1.0f) },
+ { glm::vec4( 2.0f, 3.0f, 4.0f, 5.0f), glm::vec4( 1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(0.0f) },
+ { glm::vec4( 0.0f, 1.0f, 2.0f, 3.0f), glm::vec4(-1.0f,-2.0f,-3.0f,-4.0f), glm::vec4(0.0f) }
+ };
+
+ static int test()
+ {
+ int Error = 0;
+
+ // scalar
+ {
+ float const Edge = 2.0f;
+
+ float const A = glm::step(Edge, 1.0f);
+ Error += glm::equal(A, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+
+ float const B = glm::step(Edge, 3.0f);
+ Error += glm::equal(B, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+
+ float const C = glm::step(Edge, 2.0f);
+ Error += glm::equal(C, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ // vec4 and float
+ {
+ for (std::size_t i = 0; i < sizeof(TestVec4Scalar) / sizeof(entry<float, glm::vec4>); ++i)
+ {
+ glm::vec4 Result = glm::step(TestVec4Scalar[i].edge, TestVec4Scalar[i].x);
+ Error += glm::all(glm::equal(Result, TestVec4Scalar[i].result, glm::epsilon<float>())) ? 0 : 1;
+ }
+ }
+
+ // vec4 and vec4
+ {
+ for (std::size_t i = 0; i < sizeof(TestVec4Vector) / sizeof(entry<glm::vec4, glm::vec4>); ++i)
+ {
+ glm::vec4 Result = glm::step(TestVec4Vector[i].edge, TestVec4Vector[i].x);
+ Error += glm::all(glm::equal(Result, TestVec4Vector[i].result, glm::epsilon<float>())) ? 0 : 1;
+ }
+ }
+
+ return Error;
+ }
+}//namespace step_
+
+namespace round_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ {
+ float A = glm::round(0.0f);
+ Error += glm::equal(A, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float B = glm::round(0.5f);
+ Error += glm::equal(B, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float C = glm::round(1.0f);
+ Error += glm::equal(C, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float D = glm::round(0.1f);
+ Error += glm::equal(D, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float E = glm::round(0.9f);
+ Error += glm::equal(E, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float F = glm::round(1.5f);
+ Error += glm::equal(F, 2.0f, glm::epsilon<float>()) ? 0 : 1;
+ float G = glm::round(1.9f);
+ Error += glm::equal(G, 2.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ {
+ float A = glm::round(-0.0f);
+ Error += glm::equal(A, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float B = glm::round(-0.5f);
+ Error += glm::equal(B, -1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float C = glm::round(-1.0f);
+ Error += glm::equal(C, -1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float D = glm::round(-0.1f);
+ Error += glm::equal(D, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float E = glm::round(-0.9f);
+ Error += glm::equal(E, -1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float F = glm::round(-1.5f);
+ Error += glm::equal(F, -2.0f, glm::epsilon<float>()) ? 0 : 1;
+ float G = glm::round(-1.9f);
+ Error += glm::equal(G, -2.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace round_
+
+namespace roundEven
+{
+ static int test()
+ {
+ int Error = 0;
+
+ {
+ float A1 = glm::roundEven(-1.5f);
+ Error += glm::equal(A1, -2.0f, 0.0001f) ? 0 : 1;
+
+ float A2 = glm::roundEven(1.5f);
+ Error += glm::equal(A2, 2.0f, 0.0001f) ? 0 : 1;
+
+ float A5 = glm::roundEven(-2.5f);
+ Error += glm::equal(A5, -2.0f, 0.0001f) ? 0 : 1;
+
+ float A6 = glm::roundEven(2.5f);
+ Error += glm::equal(A6, 2.0f, 0.0001f) ? 0 : 1;
+
+ float A3 = glm::roundEven(-3.5f);
+ Error += glm::equal(A3, -4.0f, 0.0001f) ? 0 : 1;
+
+ float A4 = glm::roundEven(3.5f);
+ Error += glm::equal(A4, 4.0f, 0.0001f) ? 0 : 1;
+
+ float C7 = glm::roundEven(-4.5f);
+ Error += glm::equal(C7, -4.0f, 0.0001f) ? 0 : 1;
+
+ float C8 = glm::roundEven(4.5f);
+ Error += glm::equal(C8, 4.0f, 0.0001f) ? 0 : 1;
+
+ float C1 = glm::roundEven(-5.5f);
+ Error += glm::equal(C1, -6.0f, 0.0001f) ? 0 : 1;
+
+ float C2 = glm::roundEven(5.5f);
+ Error += glm::equal(C2, 6.0f, 0.0001f) ? 0 : 1;
+
+ float C3 = glm::roundEven(-6.5f);
+ Error += glm::equal(C3, -6.0f, 0.0001f) ? 0 : 1;
+
+ float C4 = glm::roundEven(6.5f);
+ Error += glm::equal(C4, 6.0f, 0.0001f) ? 0 : 1;
+
+ float C5 = glm::roundEven(-7.5f);
+ Error += glm::equal(C5, -8.0f, 0.0001f) ? 0 : 1;
+
+ float C6 = glm::roundEven(7.5f);
+ Error += glm::equal(C6, 8.0f, 0.0001f) ? 0 : 1;
+
+ Error += 0;
+ }
+
+ {
+ float A7 = glm::roundEven(-2.4f);
+ Error += glm::equal(A7, -2.0f, 0.0001f) ? 0 : 1;
+
+ float A8 = glm::roundEven(2.4f);
+ Error += glm::equal(A8, 2.0f, 0.0001f) ? 0 : 1;
+
+ float B1 = glm::roundEven(-2.6f);
+ Error += glm::equal(B1, -3.0f, 0.0001f) ? 0 : 1;
+
+ float B2 = glm::roundEven(2.6f);
+ Error += glm::equal(B2, 3.0f, 0.0001f) ? 0 : 1;
+
+ float B3 = glm::roundEven(-2.0f);
+ Error += glm::equal(B3, -2.0f, 0.0001f) ? 0 : 1;
+
+ float B4 = glm::roundEven(2.0f);
+ Error += glm::equal(B4, 2.0f, 0.0001f) ? 0 : 1;
+
+ Error += 0;
+ }
+
+ {
+ float A = glm::roundEven(0.0f);
+ Error += glm::equal(A, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float B = glm::roundEven(0.5f);
+ Error += glm::equal(B, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float C = glm::roundEven(1.0f);
+ Error += glm::equal(C, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float D = glm::roundEven(0.1f);
+ Error += glm::equal(D, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float E = glm::roundEven(0.9f);
+ Error += glm::equal(E, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float F = glm::roundEven(1.5f);
+ Error += glm::equal(F, 2.0f, glm::epsilon<float>()) ? 0 : 1;
+ float G = glm::roundEven(1.9f);
+ Error += glm::equal(G, 2.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ {
+ float A = glm::roundEven(-0.0f);
+ Error += glm::equal(A, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float B = glm::roundEven(-0.5f);
+ Error += glm::equal(B, -0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float C = glm::roundEven(-1.0f);
+ Error += glm::equal(C, -1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float D = glm::roundEven(-0.1f);
+ Error += glm::equal(D, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ float E = glm::roundEven(-0.9f);
+ Error += glm::equal(E, -1.0f, glm::epsilon<float>()) ? 0 : 1;
+ float F = glm::roundEven(-1.5f);
+ Error += glm::equal(F, -2.0f, glm::epsilon<float>()) ? 0 : 1;
+ float G = glm::roundEven(-1.9f);
+ Error += glm::equal(G, -2.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ {
+ float A = glm::roundEven(1.5f);
+ Error += glm::equal(A, 2.0f, glm::epsilon<float>()) ? 0 : 1;
+ float B = glm::roundEven(2.5f);
+ Error += glm::equal(B, 2.0f, glm::epsilon<float>()) ? 0 : 1;
+ float C = glm::roundEven(3.5f);
+ Error += glm::equal(C, 4.0f, glm::epsilon<float>()) ? 0 : 1;
+ float D = glm::roundEven(4.5f);
+ Error += glm::equal(D, 4.0f, glm::epsilon<float>()) ? 0 : 1;
+ float E = glm::roundEven(5.5f);
+ Error += glm::equal(E, 6.0f, glm::epsilon<float>()) ? 0 : 1;
+ float F = glm::roundEven(6.5f);
+ Error += glm::equal(F, 6.0f, glm::epsilon<float>()) ? 0 : 1;
+ float G = glm::roundEven(7.5f);
+ Error += glm::equal(G, 8.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ {
+ float A = glm::roundEven(-1.5f);
+ Error += glm::equal(A, -2.0f, glm::epsilon<float>()) ? 0 : 1;
+ float B = glm::roundEven(-2.5f);
+ Error += glm::equal(B, -2.0f, glm::epsilon<float>()) ? 0 : 1;
+ float C = glm::roundEven(-3.5f);
+ Error += glm::equal(C, -4.0f, glm::epsilon<float>()) ? 0 : 1;
+ float D = glm::roundEven(-4.5f);
+ Error += glm::equal(D, -4.0f, glm::epsilon<float>()) ? 0 : 1;
+ float E = glm::roundEven(-5.5f);
+ Error += glm::equal(E, -6.0f, glm::epsilon<float>()) ? 0 : 1;
+ float F = glm::roundEven(-6.5f);
+ Error += glm::equal(F, -6.0f, glm::epsilon<float>()) ? 0 : 1;
+ float G = glm::roundEven(-7.5f);
+ Error += glm::equal(G, -8.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace roundEven
+
+namespace isnan_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ float Zero_f = 0.0;
+ double Zero_d = 0.0;
+
+ {
+ Error += true == glm::isnan(0.0/Zero_d) ? 0 : 1;
+ Error += true == glm::any(glm::isnan(glm::dvec2(0.0 / Zero_d))) ? 0 : 1;
+ Error += true == glm::any(glm::isnan(glm::dvec3(0.0 / Zero_d))) ? 0 : 1;
+ Error += true == glm::any(glm::isnan(glm::dvec4(0.0 / Zero_d))) ? 0 : 1;
+ }
+
+ {
+ Error += true == glm::isnan(0.0f/Zero_f) ? 0 : 1;
+ Error += true == glm::any(glm::isnan(glm::vec2(0.0f/Zero_f))) ? 0 : 1;
+ Error += true == glm::any(glm::isnan(glm::vec3(0.0f/Zero_f))) ? 0 : 1;
+ Error += true == glm::any(glm::isnan(glm::vec4(0.0f/Zero_f))) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace isnan_
+
+namespace isinf_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ float Zero_f = 0.0;
+ double Zero_d = 0.0;
+
+ {
+ Error += true == glm::isinf( 1.0/Zero_d) ? 0 : 1;
+ Error += true == glm::isinf(-1.0/Zero_d) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::dvec2( 1.0/Zero_d))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::dvec2(-1.0/Zero_d))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::dvec3( 1.0/Zero_d))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::dvec3(-1.0/Zero_d))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::dvec4( 1.0/Zero_d))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::dvec4(-1.0/Zero_d))) ? 0 : 1;
+ }
+
+ {
+ Error += true == glm::isinf( 1.0f/Zero_f) ? 0 : 1;
+ Error += true == glm::isinf(-1.0f/Zero_f) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::vec2( 1.0f/Zero_f))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::vec2(-1.0f/Zero_f))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::vec3( 1.0f/Zero_f))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::vec3(-1.0f/Zero_f))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::vec4( 1.0f/Zero_f))) ? 0 : 1;
+ Error += true == glm::any(glm::isinf(glm::vec4(-1.0f/Zero_f))) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace isinf_
+
+namespace sign
+{
+ template<typename genFIType>
+ GLM_FUNC_QUALIFIER genFIType sign_if(genFIType x)
+ {
+ GLM_STATIC_ASSERT(
+ std::numeric_limits<genFIType>::is_iec559 ||
+ (std::numeric_limits<genFIType>::is_signed && std::numeric_limits<genFIType>::is_integer), "'sign' only accept signed inputs");
+
+ genFIType result;
+ if(x > genFIType(0))
+ result = genFIType(1);
+ else if(x < genFIType(0))
+ result = genFIType(-1);
+ else
+ result = genFIType(0);
+ return result;
+ }
+
+ template<typename genFIType>
+ GLM_FUNC_QUALIFIER genFIType sign_alu1(genFIType x)
+ {
+ GLM_STATIC_ASSERT(
+ std::numeric_limits<genFIType>::is_signed && std::numeric_limits<genFIType>::is_integer,
+ "'sign' only accept integer inputs");
+
+ return (x >> 31) | (static_cast<unsigned>(-x) >> 31);
+ }
+
+ GLM_FUNC_QUALIFIER int sign_alu2(int x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<int>::is_signed && std::numeric_limits<int>::is_integer, "'sign' only accept integer inputs");
+
+# if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(push)
+# pragma warning(disable : 4146) //cast truncates constant value
+# endif
+
+ return -(static_cast<unsigned>(x) >> 31) | (-static_cast<unsigned>(x) >> 31);
+
+# if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(pop)
+# endif
+ }
+
+ template<typename genFIType>
+ GLM_FUNC_QUALIFIER genFIType sign_sub(genFIType x)
+ {
+ GLM_STATIC_ASSERT(
+ std::numeric_limits<genFIType>::is_signed && std::numeric_limits<genFIType>::is_integer,
+ "'sign' only accept integer inputs");
+
+ return (static_cast<unsigned>(-x) >> 31) - (static_cast<unsigned>(x) >> 31);
+ }
+
+ template<typename genFIType>
+ GLM_FUNC_QUALIFIER genFIType sign_cmp(genFIType x)
+ {
+ GLM_STATIC_ASSERT(
+ std::numeric_limits<genFIType>::is_signed && std::numeric_limits<genFIType>::is_integer,
+ "'sign' only accept integer inputs");
+
+ return (x > 0) - (x < 0);
+ }
+
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ genType Return;
+ };
+
+ int test_int32()
+ {
+ type<glm::int32> const Data[] =
+ {
+ { std::numeric_limits<glm::int32>::max(), 1},
+ { std::numeric_limits<glm::int32>::min(), -1},
+ { 0, 0},
+ { 1, 1},
+ { 2, 1},
+ { 3, 1},
+ {-1,-1},
+ {-2,-1},
+ {-3,-1}
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<glm::int32>); ++i)
+ {
+ glm::int32 Result = glm::sign(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<glm::int32>); ++i)
+ {
+ glm::int32 Result = sign_cmp(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<glm::int32>); ++i)
+ {
+ glm::int32 Result = sign_if(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<glm::int32>); ++i)
+ {
+ glm::int32 Result = sign_alu1(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<glm::int32>); ++i)
+ {
+ glm::int32 Result = sign_alu2(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_i32vec4()
+ {
+ type<glm::ivec4> const Data[] =
+ {
+ {glm::ivec4( 1), glm::ivec4( 1)},
+ {glm::ivec4( 0), glm::ivec4( 0)},
+ {glm::ivec4( 2), glm::ivec4( 1)},
+ {glm::ivec4( 3), glm::ivec4( 1)},
+ {glm::ivec4(-1), glm::ivec4(-1)},
+ {glm::ivec4(-2), glm::ivec4(-1)},
+ {glm::ivec4(-3), glm::ivec4(-1)}
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<glm::ivec4>); ++i)
+ {
+ glm::ivec4 Result = glm::sign(Data[i].Value);
+ Error += glm::all(glm::equal(Data[i].Return, Result)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_f32vec4()
+ {
+ type<glm::vec4> const Data[] =
+ {
+ {glm::vec4( 1), glm::vec4( 1)},
+ {glm::vec4( 0), glm::vec4( 0)},
+ {glm::vec4( 2), glm::vec4( 1)},
+ {glm::vec4( 3), glm::vec4( 1)},
+ {glm::vec4(-1), glm::vec4(-1)},
+ {glm::vec4(-2), glm::vec4(-1)},
+ {glm::vec4(-3), glm::vec4(-1)}
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<glm::vec4>); ++i)
+ {
+ glm::vec4 Result = glm::sign(Data[i].Value);
+ Error += glm::all(glm::equal(Data[i].Return, Result, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ static int test()
+ {
+ int Error = 0;
+
+ Error += test_int32();
+ Error += test_i32vec4();
+ Error += test_f32vec4();
+
+ return Error;
+ }
+
+ int perf_rand(std::size_t Samples)
+ {
+ int Error = 0;
+
+ std::size_t const Count = Samples;
+ std::vector<glm::int32> Input, Output;
+ Input.resize(Count);
+ Output.resize(Count);
+ for(std::size_t i = 0; i < Count; ++i)
+ Input[i] = static_cast<glm::int32>(glm::linearRand(-65536.f, 65536.f));
+
+ std::clock_t Timestamp0 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_cmp(Input[i]);
+
+ std::clock_t Timestamp1 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_if(Input[i]);
+
+ std::clock_t Timestamp2 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_alu1(Input[i]);
+
+ std::clock_t Timestamp3 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_alu2(Input[i]);
+
+ std::clock_t Timestamp4 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_sub(Input[i]);
+
+ std::clock_t Timestamp5 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = glm::sign(Input[i]);
+
+ std::clock_t Timestamp6 = std::clock();
+
+ std::printf("sign_cmp(rand) Time %d clocks\n", static_cast<int>(Timestamp1 - Timestamp0));
+ std::printf("sign_if(rand) Time %d clocks\n", static_cast<int>(Timestamp2 - Timestamp1));
+ std::printf("sign_alu1(rand) Time %d clocks\n", static_cast<int>(Timestamp3 - Timestamp2));
+ std::printf("sign_alu2(rand) Time %d clocks\n", static_cast<int>(Timestamp4 - Timestamp3));
+ std::printf("sign_sub(rand) Time %d clocks\n", static_cast<int>(Timestamp5 - Timestamp4));
+ std::printf("glm::sign(rand) Time %d clocks\n", static_cast<int>(Timestamp6 - Timestamp5));
+
+ return Error;
+ }
+
+ int perf_linear(std::size_t Samples)
+ {
+ int Error = 0;
+
+ std::size_t const Count = Samples;
+ std::vector<glm::int32> Input, Output;
+ Input.resize(Count);
+ Output.resize(Count);
+ for(std::size_t i = 0; i < Count; ++i)
+ Input[i] = static_cast<glm::int32>(i);
+
+ std::clock_t Timestamp0 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_cmp(Input[i]);
+
+ std::clock_t Timestamp1 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_if(Input[i]);
+
+ std::clock_t Timestamp2 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_alu1(Input[i]);
+
+ std::clock_t Timestamp3 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_alu2(Input[i]);
+
+ std::clock_t Timestamp4 = std::clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Output[i] = sign_sub(Input[i]);
+
+ std::clock_t Timestamp5 = std::clock();
+
+ std::printf("sign_cmp(linear) Time %d clocks\n", static_cast<int>(Timestamp1 - Timestamp0));
+ std::printf("sign_if(linear) Time %d clocks\n", static_cast<int>(Timestamp2 - Timestamp1));
+ std::printf("sign_alu1(linear) Time %d clocks\n", static_cast<int>(Timestamp3 - Timestamp2));
+ std::printf("sign_alu2(linear) Time %d clocks\n", static_cast<int>(Timestamp4 - Timestamp3));
+ std::printf("sign_sub(linear) Time %d clocks\n", static_cast<int>(Timestamp5 - Timestamp4));
+
+ return Error;
+ }
+
+ int perf_linear_cal(std::size_t Samples)
+ {
+ int Error = 0;
+
+ glm::int32 const Count = static_cast<glm::int32>(Samples);
+
+ std::clock_t Timestamp0 = std::clock();
+ glm::int32 Sum = 0;
+
+ for(glm::int32 i = 1; i < Count; ++i)
+ Sum += sign_cmp(i);
+
+ std::clock_t Timestamp1 = std::clock();
+
+ for(glm::int32 i = 1; i < Count; ++i)
+ Sum += sign_if(i);
+
+ std::clock_t Timestamp2 = std::clock();
+
+ for(glm::int32 i = 1; i < Count; ++i)
+ Sum += sign_alu1(i);
+
+ std::clock_t Timestamp3 = std::clock();
+
+ for(glm::int32 i = 1; i < Count; ++i)
+ Sum += sign_alu2(i);
+
+ std::clock_t Timestamp4 = std::clock();
+
+ for(glm::int32 i = 1; i < Count; ++i)
+ Sum += sign_sub(i);
+
+ std::clock_t Timestamp5 = std::clock();
+
+ std::printf("Sum %d\n", static_cast<int>(Sum));
+
+ std::printf("sign_cmp(linear_cal) Time %d clocks\n", static_cast<int>(Timestamp1 - Timestamp0));
+ std::printf("sign_if(linear_cal) Time %d clocks\n", static_cast<int>(Timestamp2 - Timestamp1));
+ std::printf("sign_alu1(linear_cal) Time %d clocks\n", static_cast<int>(Timestamp3 - Timestamp2));
+ std::printf("sign_alu2(linear_cal) Time %d clocks\n", static_cast<int>(Timestamp4 - Timestamp3));
+ std::printf("sign_sub(linear_cal) Time %d clocks\n", static_cast<int>(Timestamp5 - Timestamp4));
+
+ return Error;
+ }
+
+ static int perf(std::size_t Samples)
+ {
+ int Error(0);
+
+ Error += perf_linear_cal(Samples);
+ Error += perf_linear(Samples);
+ Error += perf_rand(Samples);
+
+ return Error;
+ }
+}//namespace sign
+
+namespace frexp_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ {
+ glm::vec1 const x(1024);
+ glm::ivec1 exp;
+ glm::vec1 A = glm::frexp(x, exp);
+ Error += glm::all(glm::equal(A, glm::vec1(0.5), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(exp, glm::ivec1(11))) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 const x(1024, 0.24);
+ glm::ivec2 exp;
+ glm::vec2 A = glm::frexp(x, exp);
+ Error += glm::all(glm::equal(A, glm::vec2(0.5, 0.96), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(exp, glm::ivec2(11, -2))) ? 0 : 1;
+ }
+
+ {
+ glm::vec3 const x(1024, 0.24, 0);
+ glm::ivec3 exp;
+ glm::vec3 A = glm::frexp(x, exp);
+ Error += glm::all(glm::equal(A, glm::vec3(0.5, 0.96, 0.0), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(exp, glm::ivec3(11, -2, 0))) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 const x(1024, 0.24, 0, -1.33);
+ glm::ivec4 exp;
+ glm::vec4 A = glm::frexp(x, exp);
+ Error += glm::all(glm::equal(A, glm::vec4(0.5, 0.96, 0.0, -0.665), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(exp, glm::ivec4(11, -2, 0, 1))) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace frexp_
+
+namespace ldexp_
+{
+ static int test()
+ {
+ int Error(0);
+
+ {
+ glm::vec1 A = glm::vec1(0.5);
+ glm::ivec1 exp = glm::ivec1(11);
+ glm::vec1 x = glm::ldexp(A, exp);
+ Error += glm::all(glm::equal(x, glm::vec1(1024),0.00001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A = glm::vec2(0.5, 0.96);
+ glm::ivec2 exp = glm::ivec2(11, -2);
+ glm::vec2 x = glm::ldexp(A, exp);
+ Error += glm::all(glm::equal(x, glm::vec2(1024, .24),0.00001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec3 A = glm::vec3(0.5, 0.96, 0.0);
+ glm::ivec3 exp = glm::ivec3(11, -2, 0);
+ glm::vec3 x = glm::ldexp(A, exp);
+ Error += glm::all(glm::equal(x, glm::vec3(1024, .24, 0),0.00001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 A = glm::vec4(0.5, 0.96, 0.0, -0.665);
+ glm::ivec4 exp = glm::ivec4(11, -2, 0, 1);
+ glm::vec4 x = glm::ldexp(A, exp);
+ Error += glm::all(glm::equal(x, glm::vec4(1024, .24, 0, -1.33),0.00001f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace ldexp_
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::abs(1.0f) > 0.0f, "GLM: Failed constexpr");
+ constexpr glm::vec1 const A = glm::abs(glm::vec1(1.0f));
+ constexpr glm::vec2 const B = glm::abs(glm::vec2(1.0f));
+ constexpr glm::vec3 const C = glm::abs(glm::vec3(1.0f));
+ constexpr glm::vec4 const D = glm::abs(glm::vec4(1.0f));
+#endif // GLM_HAS_CONSTEXPR
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_constexpr();
+ Error += sign::test();
+ Error += floor_::test();
+ Error += mod_::test();
+ Error += modf_::test();
+ Error += floatBitsToInt::test();
+ Error += floatBitsToUint::test();
+ Error += mix_::test();
+ Error += step_::test();
+ Error += max_::test();
+ Error += min_::test();
+ Error += clamp_::test();
+ Error += round_::test();
+ Error += roundEven::test();
+ Error += isnan_::test();
+ Error += isinf_::test();
+ Error += frexp_::test();
+ Error += ldexp_::test();
+
+# ifdef NDEBUG
+ std::size_t Samples = 1000;
+# else
+ std::size_t Samples = 1;
+# endif
+ Error += sign::perf(Samples);
+
+ Error += min_::perf(Samples);
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_func_exponential.cpp b/3rdparty/glm/source/test/core/core_func_exponential.cpp
new file mode 100644
index 0000000..380cdfb
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_exponential.cpp
@@ -0,0 +1,185 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_float1.hpp>
+#include <glm/ext/vector_float2.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/common.hpp>
+#include <glm/exponential.hpp>
+
+static int test_pow()
+{
+ int Error(0);
+
+ float A = glm::pow(2.f, 2.f);
+ Error += glm::equal(A, 4.f, 0.01f) ? 0 : 1;
+
+ glm::vec1 B = glm::pow(glm::vec1(2.f), glm::vec1(2.f));
+ Error += glm::all(glm::equal(B, glm::vec1(4.f), 0.01f)) ? 0 : 1;
+
+ glm::vec2 C = glm::pow(glm::vec2(2.f), glm::vec2(2.f));
+ Error += glm::all(glm::equal(C, glm::vec2(4.f), 0.01f)) ? 0 : 1;
+
+ glm::vec3 D = glm::pow(glm::vec3(2.f), glm::vec3(2.f));
+ Error += glm::all(glm::equal(D, glm::vec3(4.f), 0.01f)) ? 0 : 1;
+
+ glm::vec4 E = glm::pow(glm::vec4(2.f), glm::vec4(2.f));
+ Error += glm::all(glm::equal(E, glm::vec4(4.f), 0.01f)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_sqrt()
+{
+ int Error = 0;
+
+ float A = glm::sqrt(4.f);
+ Error += glm::equal(A, 2.f, 0.01f) ? 0 : 1;
+
+ glm::vec1 B = glm::sqrt(glm::vec1(4.f));
+ Error += glm::all(glm::equal(B, glm::vec1(2.f), 0.01f)) ? 0 : 1;
+
+ glm::vec2 C = glm::sqrt(glm::vec2(4.f));
+ Error += glm::all(glm::equal(C, glm::vec2(2.f), 0.01f)) ? 0 : 1;
+
+ glm::vec3 D = glm::sqrt(glm::vec3(4.f));
+ Error += glm::all(glm::equal(D, glm::vec3(2.f), 0.01f)) ? 0 : 1;
+
+ glm::vec4 E = glm::sqrt(glm::vec4(4.f));
+ Error += glm::all(glm::equal(E, glm::vec4(2.f), 0.01f)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_exp()
+{
+ int Error = 0;
+
+ float A = glm::exp(1.f);
+ Error += glm::equal(A, glm::e<float>(), 0.01f) ? 0 : 1;
+
+ glm::vec1 B = glm::exp(glm::vec1(1.f));
+ Error += glm::all(glm::equal(B, glm::vec1(glm::e<float>()), 0.01f)) ? 0 : 1;
+
+ glm::vec2 C = glm::exp(glm::vec2(1.f));
+ Error += glm::all(glm::equal(C, glm::vec2(glm::e<float>()), 0.01f)) ? 0 : 1;
+
+ glm::vec3 D = glm::exp(glm::vec3(1.f));
+ Error += glm::all(glm::equal(D, glm::vec3(glm::e<float>()), 0.01f)) ? 0 : 1;
+
+ glm::vec4 E = glm::exp(glm::vec4(1.f));
+ Error += glm::all(glm::equal(E, glm::vec4(glm::e<float>()), 0.01f)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_log()
+{
+ int Error = 0;
+
+ float const A = glm::log(glm::e<float>());
+ Error += glm::equal(A, 1.f, 0.01f) ? 0 : 1;
+
+ glm::vec1 const B = glm::log(glm::vec1(glm::e<float>()));
+ Error += glm::all(glm::equal(B, glm::vec1(1.f), 0.01f)) ? 0 : 1;
+
+ glm::vec2 const C = glm::log(glm::vec2(glm::e<float>()));
+ Error += glm::all(glm::equal(C, glm::vec2(1.f), 0.01f)) ? 0 : 1;
+
+ glm::vec3 const D = glm::log(glm::vec3(glm::e<float>()));
+ Error += glm::all(glm::equal(D, glm::vec3(1.f), 0.01f)) ? 0 : 1;
+
+ glm::vec4 const E = glm::log(glm::vec4(glm::e<float>()));
+ Error += glm::all(glm::equal(E, glm::vec4(1.f), 0.01f)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_exp2()
+{
+ int Error = 0;
+
+ float A = glm::exp2(4.f);
+ Error += glm::equal(A, 16.f, 0.01f) ? 0 : 1;
+
+ glm::vec1 B = glm::exp2(glm::vec1(4.f));
+ Error += glm::all(glm::equal(B, glm::vec1(16.f), 0.01f)) ? 0 : 1;
+
+ glm::vec2 C = glm::exp2(glm::vec2(4.f, 3.f));
+ Error += glm::all(glm::equal(C, glm::vec2(16.f, 8.f), 0.01f)) ? 0 : 1;
+
+ glm::vec3 D = glm::exp2(glm::vec3(4.f, 3.f, 2.f));
+ Error += glm::all(glm::equal(D, glm::vec3(16.f, 8.f, 4.f), 0.01f)) ? 0 : 1;
+
+ glm::vec4 E = glm::exp2(glm::vec4(4.f, 3.f, 2.f, 1.f));
+ Error += glm::all(glm::equal(E, glm::vec4(16.f, 8.f, 4.f, 2.f), 0.01f)) ? 0 : 1;
+
+# if GLM_HAS_CXX11_STL
+ //large exponent
+ float F = glm::exp2(23.f);
+ Error += glm::equal(F, 8388608.f, 0.01f) ? 0 : 1;
+# endif
+
+ return Error;
+}
+
+static int test_log2()
+{
+ int Error = 0;
+
+ float A = glm::log2(16.f);
+ Error += glm::equal(A, 4.f, 0.01f) ? 0 : 1;
+
+ glm::vec1 B = glm::log2(glm::vec1(16.f));
+ Error += glm::all(glm::equal(B, glm::vec1(4.f), 0.01f)) ? 0 : 1;
+
+ glm::vec2 C = glm::log2(glm::vec2(16.f, 8.f));
+ Error += glm::all(glm::equal(C, glm::vec2(4.f, 3.f), 0.01f)) ? 0 : 1;
+
+ glm::vec3 D = glm::log2(glm::vec3(16.f, 8.f, 4.f));
+ Error += glm::all(glm::equal(D, glm::vec3(4.f, 3.f, 2.f), 0.01f)) ? 0 : 1;
+
+ glm::vec4 E = glm::log2(glm::vec4(16.f, 8.f, 4.f, 2.f));
+ Error += glm::all(glm::equal(E, glm::vec4(4.f, 3.f, 2.f, 1.f), 0.01f)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_inversesqrt()
+{
+ int Error = 0;
+
+ float A = glm::inversesqrt(16.f) * glm::sqrt(16.f);
+ Error += glm::equal(A, 1.f, 0.01f) ? 0 : 1;
+
+ glm::vec1 B = glm::inversesqrt(glm::vec1(16.f)) * glm::sqrt(16.f);
+ Error += glm::all(glm::equal(B, glm::vec1(1.f), 0.01f)) ? 0 : 1;
+
+ glm::vec2 C = glm::inversesqrt(glm::vec2(16.f)) * glm::sqrt(16.f);
+ Error += glm::all(glm::equal(C, glm::vec2(1.f), 0.01f)) ? 0 : 1;
+
+ glm::vec3 D = glm::inversesqrt(glm::vec3(16.f)) * glm::sqrt(16.f);
+ Error += glm::all(glm::equal(D, glm::vec3(1.f), 0.01f)) ? 0 : 1;
+
+ glm::vec4 E = glm::inversesqrt(glm::vec4(16.f)) * glm::sqrt(16.f);
+ Error += glm::all(glm::equal(E, glm::vec4(1.f), 0.01f)) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_pow();
+ Error += test_sqrt();
+ Error += test_exp();
+ Error += test_log();
+ Error += test_exp2();
+ Error += test_log2();
+ Error += test_inversesqrt();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_func_geometric.cpp b/3rdparty/glm/source/test/core/core_func_geometric.cpp
new file mode 100644
index 0000000..7ef9c68
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_geometric.cpp
@@ -0,0 +1,200 @@
+#include <glm/geometric.hpp>
+#include <glm/trigonometric.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_float1.hpp>
+#include <glm/ext/vector_float2.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/vector_double2.hpp>
+#include <glm/ext/vector_double3.hpp>
+#include <glm/ext/vector_double4.hpp>
+#include <limits>
+
+namespace length
+{
+ int test()
+ {
+ float Length1 = glm::length(glm::vec1(1));
+ float Length2 = glm::length(glm::vec2(1, 0));
+ float Length3 = glm::length(glm::vec3(1, 0, 0));
+ float Length4 = glm::length(glm::vec4(1, 0, 0, 0));
+
+ int Error = 0;
+
+ Error += glm::abs(Length1 - 1.0f) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Length2 - 1.0f) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Length3 - 1.0f) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Length4 - 1.0f) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+
+ return Error;
+ }
+}//namespace length
+
+namespace distance
+{
+ int test()
+ {
+ float Distance1 = glm::distance(glm::vec1(1), glm::vec1(1));
+ float Distance2 = glm::distance(glm::vec2(1, 0), glm::vec2(1, 0));
+ float Distance3 = glm::distance(glm::vec3(1, 0, 0), glm::vec3(1, 0, 0));
+ float Distance4 = glm::distance(glm::vec4(1, 0, 0, 0), glm::vec4(1, 0, 0, 0));
+
+ int Error = 0;
+
+ Error += glm::abs(Distance1) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Distance2) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Distance3) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Distance4) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+
+ return Error;
+ }
+}//namespace distance
+
+namespace dot
+{
+ int test()
+ {
+ float Dot1 = glm::dot(glm::vec1(1), glm::vec1(1));
+ float Dot2 = glm::dot(glm::vec2(1), glm::vec2(1));
+ float Dot3 = glm::dot(glm::vec3(1), glm::vec3(1));
+ float Dot4 = glm::dot(glm::vec4(1), glm::vec4(1));
+
+ int Error = 0;
+
+ Error += glm::abs(Dot1 - 1.0f) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Dot2 - 2.0f) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Dot3 - 3.0f) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += glm::abs(Dot4 - 4.0f) < std::numeric_limits<float>::epsilon() ? 0 : 1;
+
+ return Error;
+ }
+}//namespace dot
+
+namespace cross
+{
+ int test()
+ {
+ glm::vec3 Cross1 = glm::cross(glm::vec3(1, 0, 0), glm::vec3(0, 1, 0));
+ glm::vec3 Cross2 = glm::cross(glm::vec3(0, 1, 0), glm::vec3(1, 0, 0));
+
+ int Error = 0;
+
+ Error += glm::all(glm::lessThan(glm::abs(Cross1 - glm::vec3(0, 0, 1)), glm::vec3(std::numeric_limits<float>::epsilon()))) ? 0 : 1;
+ Error += glm::all(glm::lessThan(glm::abs(Cross2 - glm::vec3(0, 0,-1)), glm::vec3(std::numeric_limits<float>::epsilon()))) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace cross
+
+namespace normalize
+{
+ int test()
+ {
+ glm::vec3 Normalize1 = glm::normalize(glm::vec3(1, 0, 0));
+ glm::vec3 Normalize2 = glm::normalize(glm::vec3(2, 0, 0));
+
+ glm::vec3 Normalize3 = glm::normalize(glm::vec3(-0.6, 0.7, -0.5));
+
+ glm::vec3 ro = glm::vec3(glm::cos(5.f) * 3.f, 2.f, glm::sin(5.f) * 3.f);
+ glm::vec3 w = glm::normalize(glm::vec3(0, -0.2f, 0) - ro);
+ glm::vec3 u = glm::normalize(glm::cross(w, glm::vec3(0, 1, 0)));
+ glm::vec3 v = glm::cross(u, w);
+
+ int Error = 0;
+
+ Error += glm::all(glm::lessThan(glm::abs(Normalize1 - glm::vec3(1, 0, 0)), glm::vec3(std::numeric_limits<float>::epsilon()))) ? 0 : 1;
+ Error += glm::all(glm::lessThan(glm::abs(Normalize2 - glm::vec3(1, 0, 0)), glm::vec3(std::numeric_limits<float>::epsilon()))) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace normalize
+
+namespace faceforward
+{
+ int test()
+ {
+ int Error = 0;
+
+ {
+ glm::vec3 N(0.0f, 0.0f, 1.0f);
+ glm::vec3 I(1.0f, 0.0f, 1.0f);
+ glm::vec3 Nref(0.0f, 0.0f, 1.0f);
+ glm::vec3 F = glm::faceforward(N, I, Nref);
+ }
+
+ return Error;
+ }
+}//namespace faceforward
+
+namespace reflect
+{
+ int test()
+ {
+ int Error = 0;
+
+ {
+ glm::vec2 A(1.0f,-1.0f);
+ glm::vec2 B(0.0f, 1.0f);
+ glm::vec2 C = glm::reflect(A, B);
+ Error += glm::all(glm::equal(C, glm::vec2(1.0, 1.0), 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::dvec2 A(1.0f,-1.0f);
+ glm::dvec2 B(0.0f, 1.0f);
+ glm::dvec2 C = glm::reflect(A, B);
+ Error += glm::all(glm::equal(C, glm::dvec2(1.0, 1.0), 0.0001)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace reflect
+
+namespace refract
+{
+ int test()
+ {
+ int Error = 0;
+
+ {
+ float A(-1.0f);
+ float B(1.0f);
+ float C = glm::refract(A, B, 0.5f);
+ Error += glm::equal(C, -1.0f, 0.0001f) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(0.0f,-1.0f);
+ glm::vec2 B(0.0f, 1.0f);
+ glm::vec2 C = glm::refract(A, B, 0.5f);
+ Error += glm::all(glm::equal(C, glm::vec2(0.0, -1.0), 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::dvec2 A(0.0f,-1.0f);
+ glm::dvec2 B(0.0f, 1.0f);
+ glm::dvec2 C = glm::refract(A, B, 0.5);
+ Error += glm::all(glm::equal(C, glm::dvec2(0.0, -1.0), 0.0001)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace refract
+
+int main()
+{
+ int Error(0);
+
+ Error += length::test();
+ Error += distance::test();
+ Error += dot::test();
+ Error += cross::test();
+ Error += normalize::test();
+ Error += faceforward::test();
+ Error += reflect::test();
+ Error += refract::test();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_func_integer.cpp b/3rdparty/glm/source/test/core/core_func_integer.cpp
new file mode 100644
index 0000000..95d650c
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_integer.cpp
@@ -0,0 +1,1556 @@
+#include <glm/integer.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/ext/vector_int1.hpp>
+#include <glm/ext/vector_int2.hpp>
+#include <glm/ext/vector_int3.hpp>
+#include <glm/ext/vector_int4.hpp>
+#include <glm/ext/vector_uint1.hpp>
+#include <glm/ext/vector_uint2.hpp>
+#include <glm/ext/vector_uint3.hpp>
+#include <glm/ext/vector_uint4.hpp>
+#include <glm/ext/scalar_int_sized.hpp>
+#include <glm/ext/scalar_uint_sized.hpp>
+#include <vector>
+#include <ctime>
+#include <cstdio>
+
+enum result
+{
+ SUCCESS,
+ FAIL,
+ ASSERT,
+ STATIC_ASSERT
+};
+
+namespace bitfieldInsert
+{
+ template<typename genType>
+ struct type
+ {
+ genType Base;
+ genType Insert;
+ int Offset;
+ int Bits;
+ genType Return;
+ };
+
+ typedef type<glm::uint> typeU32;
+
+ typeU32 const Data32[] =
+ {
+ {0x00000000, 0xffffffff, 0, 32, 0xffffffff},
+ {0x00000000, 0xffffffff, 0, 31, 0x7fffffff},
+ {0x00000000, 0xffffffff, 0, 0, 0x00000000},
+ {0xff000000, 0x000000ff, 8, 8, 0xff00ff00},
+ {0xffff0000, 0xffff0000, 16, 16, 0x00000000},
+ {0x0000ffff, 0x0000ffff, 16, 16, 0xffffffff}
+ };
+
+ static int test()
+ {
+ int Error = 0;
+ glm::uint count = sizeof(Data32) / sizeof(typeU32);
+
+ for(glm::uint i = 0; i < count; ++i)
+ {
+ glm::uint Return = glm::bitfieldInsert(
+ Data32[i].Base,
+ Data32[i].Insert,
+ Data32[i].Offset,
+ Data32[i].Bits);
+
+ Error += Data32[i].Return == Return ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//bitfieldInsert
+
+namespace bitfieldExtract
+{
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ int Offset;
+ int Bits;
+ genType Return;
+ result Result;
+ };
+
+ typedef type<glm::uint> typeU32;
+
+ typeU32 const Data32[] =
+ {
+ {0xffffffff, 0,32, 0xffffffff, SUCCESS},
+ {0xffffffff, 8, 0, 0x00000000, SUCCESS},
+ {0x00000000, 0,32, 0x00000000, SUCCESS},
+ {0x0f0f0f0f, 0,32, 0x0f0f0f0f, SUCCESS},
+ {0x00000000, 8, 0, 0x00000000, SUCCESS},
+ {0x80000000,31, 1, 0x00000001, SUCCESS},
+ {0x7fffffff,31, 1, 0x00000000, SUCCESS},
+ {0x00000300, 8, 8, 0x00000003, SUCCESS},
+ {0x0000ff00, 8, 8, 0x000000ff, SUCCESS},
+ {0xfffffff0, 0, 5, 0x00000010, SUCCESS},
+ {0x000000ff, 1, 3, 0x00000007, SUCCESS},
+ {0x000000ff, 0, 3, 0x00000007, SUCCESS},
+ {0x00000000, 0, 2, 0x00000000, SUCCESS},
+ {0xffffffff, 0, 8, 0x000000ff, SUCCESS},
+ {0xffff0000,16,16, 0x0000ffff, SUCCESS},
+ {0xfffffff0, 0, 8, 0x00000000, FAIL},
+ {0xffffffff,16,16, 0x00000000, FAIL},
+ //{0xffffffff,32, 1, 0x00000000, ASSERT}, // Throw an assert
+ //{0xffffffff, 0,33, 0x00000000, ASSERT}, // Throw an assert
+ //{0xffffffff,16,16, 0x00000000, ASSERT}, // Throw an assert
+ };
+
+ static int test()
+ {
+ int Error = 0;
+
+ glm::uint count = sizeof(Data32) / sizeof(typeU32);
+
+ for(glm::uint i = 0; i < count; ++i)
+ {
+ glm::uint Return = glm::bitfieldExtract(
+ Data32[i].Value,
+ Data32[i].Offset,
+ Data32[i].Bits);
+
+ bool Compare = Data32[i].Return == Return;
+
+ if(Data32[i].Result == SUCCESS && Compare)
+ continue;
+ else if(Data32[i].Result == FAIL && !Compare)
+ continue;
+
+ Error += 1;
+ }
+
+ return Error;
+ }
+}//extractField
+
+namespace bitfieldReverse
+{
+/*
+ GLM_FUNC_QUALIFIER unsigned int bitfieldReverseLoop(unsigned int v)
+ {
+ unsigned int Result(0);
+ unsigned int const BitSize = static_cast<unsigned int>(sizeof(unsigned int) * 8);
+ for(unsigned int i = 0; i < BitSize; ++i)
+ {
+ unsigned int const BitSet(v & (static_cast<unsigned int>(1) << i));
+ unsigned int const BitFirst(BitSet >> i);
+ Result |= BitFirst << (BitSize - 1 - i);
+ }
+ return Result;
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint64_t bitfieldReverseLoop(glm::uint64_t v)
+ {
+ glm::uint64_t Result(0);
+ glm::uint64_t const BitSize = static_cast<glm::uint64_t>(sizeof(unsigned int) * 8);
+ for(glm::uint64_t i = 0; i < BitSize; ++i)
+ {
+ glm::uint64_t const BitSet(v & (static_cast<glm::uint64_t>(1) << i));
+ glm::uint64_t const BitFirst(BitSet >> i);
+ Result |= BitFirst << (BitSize - 1 - i);
+ }
+ return Result;
+ }
+*/
+ template<glm::length_t L, typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> bitfieldReverseLoop(glm::vec<L, T, Q> const& v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldReverse' only accept integer values");
+
+ glm::vec<L, T, Q> Result(0);
+ T const BitSize = static_cast<T>(sizeof(T) * 8);
+ for(T i = 0; i < BitSize; ++i)
+ {
+ glm::vec<L, T, Q> const BitSet(v & (static_cast<T>(1) << i));
+ glm::vec<L, T, Q> const BitFirst(BitSet >> i);
+ Result |= BitFirst << (BitSize - 1 - i);
+ }
+ return Result;
+ }
+
+ template<typename T>
+ GLM_FUNC_QUALIFIER T bitfieldReverseLoop(T v)
+ {
+ return bitfieldReverseLoop(glm::vec<1, T>(v)).x;
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint32 bitfieldReverseUint32(glm::uint32 x)
+ {
+ x = (x & 0x55555555) << 1 | (x & 0xAAAAAAAA) >> 1;
+ x = (x & 0x33333333) << 2 | (x & 0xCCCCCCCC) >> 2;
+ x = (x & 0x0F0F0F0F) << 4 | (x & 0xF0F0F0F0) >> 4;
+ x = (x & 0x00FF00FF) << 8 | (x & 0xFF00FF00) >> 8;
+ x = (x & 0x0000FFFF) << 16 | (x & 0xFFFF0000) >> 16;
+ return x;
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint64 bitfieldReverseUint64(glm::uint64 x)
+ {
+ x = (x & 0x5555555555555555) << 1 | (x & 0xAAAAAAAAAAAAAAAA) >> 1;
+ x = (x & 0x3333333333333333) << 2 | (x & 0xCCCCCCCCCCCCCCCC) >> 2;
+ x = (x & 0x0F0F0F0F0F0F0F0F) << 4 | (x & 0xF0F0F0F0F0F0F0F0) >> 4;
+ x = (x & 0x00FF00FF00FF00FF) << 8 | (x & 0xFF00FF00FF00FF00) >> 8;
+ x = (x & 0x0000FFFF0000FFFF) << 16 | (x & 0xFFFF0000FFFF0000) >> 16;
+ x = (x & 0x00000000FFFFFFFF) << 32 | (x & 0xFFFFFFFF00000000) >> 32;
+ return x;
+ }
+
+ template<bool EXEC = false>
+ struct compute_bitfieldReverseStep
+ {
+ template<glm::length_t L, typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER static glm::vec<L, T, Q> call(glm::vec<L, T, Q> const& v, T, T)
+ {
+ return v;
+ }
+ };
+
+ template<>
+ struct compute_bitfieldReverseStep<true>
+ {
+ template<glm::length_t L, typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER static glm::vec<L, T, Q> call(glm::vec<L, T, Q> const& v, T Mask, T Shift)
+ {
+ return (v & Mask) << Shift | (v & (~Mask)) >> Shift;
+ }
+ };
+
+ template<glm::length_t L, typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> bitfieldReverseOps(glm::vec<L, T, Q> const& v)
+ {
+ glm::vec<L, T, Q> x(v);
+ x = compute_bitfieldReverseStep<sizeof(T) * 8 >= 2>::call(x, static_cast<T>(0x5555555555555555ull), static_cast<T>( 1));
+ x = compute_bitfieldReverseStep<sizeof(T) * 8 >= 4>::call(x, static_cast<T>(0x3333333333333333ull), static_cast<T>( 2));
+ x = compute_bitfieldReverseStep<sizeof(T) * 8 >= 8>::call(x, static_cast<T>(0x0F0F0F0F0F0F0F0Full), static_cast<T>( 4));
+ x = compute_bitfieldReverseStep<sizeof(T) * 8 >= 16>::call(x, static_cast<T>(0x00FF00FF00FF00FFull), static_cast<T>( 8));
+ x = compute_bitfieldReverseStep<sizeof(T) * 8 >= 32>::call(x, static_cast<T>(0x0000FFFF0000FFFFull), static_cast<T>(16));
+ x = compute_bitfieldReverseStep<sizeof(T) * 8 >= 64>::call(x, static_cast<T>(0x00000000FFFFFFFFull), static_cast<T>(32));
+ return x;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType bitfieldReverseOps(genType x)
+ {
+ return bitfieldReverseOps(glm::vec<1, genType, glm::defaultp>(x)).x;
+ }
+
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ genType Return;
+ result Result;
+ };
+
+ typedef type<glm::uint> typeU32;
+
+ typeU32 const Data32[] =
+ {
+ {0x00000001, 0x80000000, SUCCESS},
+ {0x0000000f, 0xf0000000, SUCCESS},
+ {0x000000ff, 0xff000000, SUCCESS},
+ {0xf0000000, 0x0000000f, SUCCESS},
+ {0xff000000, 0x000000ff, SUCCESS},
+ {0xffffffff, 0xffffffff, SUCCESS},
+ {0x00000000, 0x00000000, SUCCESS}
+ };
+
+ typedef type<glm::uint64> typeU64;
+
+ typeU64 const Data64[] =
+ {
+ {0x00000000000000ff, 0xff00000000000000, SUCCESS},
+ {0x000000000000000f, 0xf000000000000000, SUCCESS},
+ {0xf000000000000000, 0x000000000000000f, SUCCESS},
+ {0xffffffffffffffff, 0xffffffffffffffff, SUCCESS},
+ {0x0000000000000000, 0x0000000000000000, SUCCESS}
+ };
+
+ static int test32_bitfieldReverse()
+ {
+ int Error = 0;
+ std::size_t const Count = sizeof(Data32) / sizeof(typeU32);
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::uint Return = glm::bitfieldReverse(Data32[i].Value);
+
+ bool Compare = Data32[i].Return == Return;
+
+ if(Data32[i].Result == SUCCESS)
+ Error += Compare ? 0 : 1;
+ else
+ Error += Compare ? 1 : 0;
+ }
+
+ return Error;
+ }
+
+ static int test32_bitfieldReverseLoop()
+ {
+ int Error = 0;
+ std::size_t const Count = sizeof(Data32) / sizeof(typeU32);
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::uint Return = bitfieldReverseLoop(Data32[i].Value);
+
+ bool Compare = Data32[i].Return == Return;
+
+ if(Data32[i].Result == SUCCESS)
+ Error += Compare ? 0 : 1;
+ else
+ Error += Compare ? 1 : 0;
+ }
+
+ return Error;
+ }
+
+ static int test32_bitfieldReverseUint32()
+ {
+ int Error = 0;
+ std::size_t const Count = sizeof(Data32) / sizeof(typeU32);
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::uint Return = bitfieldReverseUint32(Data32[i].Value);
+
+ bool Compare = Data32[i].Return == Return;
+
+ if(Data32[i].Result == SUCCESS)
+ Error += Compare ? 0 : 1;
+ else
+ Error += Compare ? 1 : 0;
+ }
+
+ return Error;
+ }
+
+ static int test32_bitfieldReverseOps()
+ {
+ int Error = 0;
+ std::size_t const Count = sizeof(Data32) / sizeof(typeU32);
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::uint Return = bitfieldReverseOps(Data32[i].Value);
+
+ bool Compare = Data32[i].Return == Return;
+
+ if(Data32[i].Result == SUCCESS)
+ Error += Compare ? 0 : 1;
+ else
+ Error += Compare ? 1 : 0;
+ }
+
+ return Error;
+ }
+
+ static int test64_bitfieldReverse()
+ {
+ int Error = 0;
+ std::size_t const Count = sizeof(Data64) / sizeof(typeU64);
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::uint64 Return = glm::bitfieldReverse(Data64[i].Value);
+
+ bool Compare = Data64[i].Return == Return;
+
+ if(Data64[i].Result == SUCCESS)
+ Error += Compare ? 0 : 1;
+ else
+ Error += Compare ? 1 : 0;
+ }
+
+ return Error;
+ }
+
+ static int test64_bitfieldReverseLoop()
+ {
+ int Error = 0;
+ std::size_t const Count = sizeof(Data64) / sizeof(typeU64);
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::uint64 Return = bitfieldReverseLoop(Data64[i].Value);
+
+ bool Compare = Data64[i].Return == Return;
+
+ if(Data32[i].Result == SUCCESS)
+ Error += Compare ? 0 : 1;
+ else
+ Error += Compare ? 1 : 0;
+ }
+
+ return Error;
+ }
+
+ static int test64_bitfieldReverseUint64()
+ {
+ int Error = 0;
+ std::size_t const Count = sizeof(Data64) / sizeof(typeU64);
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::uint64 Return = bitfieldReverseUint64(Data64[i].Value);
+
+ bool Compare = Data64[i].Return == Return;
+
+ if(Data64[i].Result == SUCCESS)
+ Error += Compare ? 0 : 1;
+ else
+ Error += Compare ? 1 : 0;
+ }
+
+ return Error;
+ }
+
+ static int test64_bitfieldReverseOps()
+ {
+ int Error = 0;
+ std::size_t const Count = sizeof(Data64) / sizeof(typeU64);
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::uint64 Return = bitfieldReverseOps(Data64[i].Value);
+
+ bool Compare = Data64[i].Return == Return;
+
+ if(Data64[i].Result == SUCCESS)
+ Error += Compare ? 0 : 1;
+ else
+ Error += Compare ? 1 : 0;
+ }
+
+ return Error;
+ }
+
+ static int test()
+ {
+ int Error = 0;
+
+ Error += test32_bitfieldReverse();
+ Error += test32_bitfieldReverseLoop();
+ Error += test32_bitfieldReverseUint32();
+ Error += test32_bitfieldReverseOps();
+
+ Error += test64_bitfieldReverse();
+ Error += test64_bitfieldReverseLoop();
+ Error += test64_bitfieldReverseUint64();
+ Error += test64_bitfieldReverseOps();
+
+ return Error;
+ }
+
+ static int perf32(glm::uint32 Count)
+ {
+ int Error = 0;
+
+ std::vector<glm::uint32> Data;
+ Data.resize(static_cast<std::size_t>(Count));
+
+ std::clock_t Timestamps0 = std::clock();
+
+ for(glm::uint32 k = 0; k < Count; ++k)
+ Data[k] = glm::bitfieldReverse(k);
+
+ std::clock_t Timestamps1 = std::clock();
+
+ for(glm::uint32 k = 0; k < Count; ++k)
+ Data[k] = bitfieldReverseLoop(k);
+
+ std::clock_t Timestamps2 = std::clock();
+
+ for(glm::uint32 k = 0; k < Count; ++k)
+ Data[k] = bitfieldReverseUint32(k);
+
+ std::clock_t Timestamps3 = std::clock();
+
+ for(glm::uint32 k = 0; k < Count; ++k)
+ Data[k] = bitfieldReverseOps(k);
+
+ std::clock_t Timestamps4 = std::clock();
+
+ std::printf("glm::bitfieldReverse: %d clocks\n", static_cast<int>(Timestamps1 - Timestamps0));
+ std::printf("bitfieldReverseLoop: %d clocks\n", static_cast<int>(Timestamps2 - Timestamps1));
+ std::printf("bitfieldReverseUint32: %d clocks\n", static_cast<int>(Timestamps3 - Timestamps2));
+ std::printf("bitfieldReverseOps: %d clocks\n", static_cast<int>(Timestamps4 - Timestamps3));
+
+ return Error;
+ }
+
+ static int perf64(glm::uint64 Count)
+ {
+ int Error = 0;
+
+ std::vector<glm::uint64> Data;
+ Data.resize(static_cast<std::size_t>(Count));
+
+ std::clock_t Timestamps0 = std::clock();
+
+ for(glm::uint64 k = 0; k < Count; ++k)
+ Data[static_cast<std::size_t>(k)] = glm::bitfieldReverse(k);
+
+ std::clock_t Timestamps1 = std::clock();
+
+ for(glm::uint64 k = 0; k < Count; ++k)
+ Data[static_cast<std::size_t>(k)] = bitfieldReverseLoop<glm::uint64>(k);
+
+ std::clock_t Timestamps2 = std::clock();
+
+ for(glm::uint64 k = 0; k < Count; ++k)
+ Data[static_cast<std::size_t>(k)] = bitfieldReverseUint64(k);
+
+ std::clock_t Timestamps3 = std::clock();
+
+ for(glm::uint64 k = 0; k < Count; ++k)
+ Data[static_cast<std::size_t>(k)] = bitfieldReverseOps(k);
+
+ std::clock_t Timestamps4 = std::clock();
+
+ std::printf("glm::bitfieldReverse - 64: %d clocks\n", static_cast<int>(Timestamps1 - Timestamps0));
+ std::printf("bitfieldReverseLoop - 64: %d clocks\n", static_cast<int>(Timestamps2 - Timestamps1));
+ std::printf("bitfieldReverseUint - 64: %d clocks\n", static_cast<int>(Timestamps3 - Timestamps2));
+ std::printf("bitfieldReverseOps - 64: %d clocks\n", static_cast<int>(Timestamps4 - Timestamps3));
+
+ return Error;
+ }
+
+ static int perf(std::size_t Samples)
+ {
+ int Error = 0;
+
+ Error += perf32(static_cast<glm::uint32>(Samples));
+ Error += perf64(static_cast<glm::uint64>(Samples));
+
+ return Error;
+ }
+}//bitfieldReverse
+
+namespace findMSB
+{
+ template<typename genType, typename retType>
+ struct type
+ {
+ genType Value;
+ retType Return;
+ };
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ template<typename genIUType>
+ static int findMSB_intrinsic(genIUType Value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findMSB' only accept integer values");
+
+ if(Value == 0)
+ return -1;
+
+ unsigned long Result(0);
+ _BitScanReverse(&Result, Value);
+ return int(Result);
+ }
+# endif//GLM_HAS_BITSCAN_WINDOWS
+
+# if GLM_ARCH & GLM_ARCH_AVX && GLM_COMPILER & GLM_COMPILER_VC
+ template<typename genIUType>
+ static int findMSB_avx(genIUType Value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findMSB' only accept integer values");
+
+ if(Value == 0)
+ return -1;
+
+ return int(_tzcnt_u32(Value));
+ }
+# endif//GLM_ARCH & GLM_ARCH_AVX && GLM_PLATFORM & GLM_PLATFORM_WINDOWS
+
+ template<typename genIUType>
+ static int findMSB_095(genIUType Value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findMSB' only accept integer values");
+
+ if(Value == genIUType(0) || Value == genIUType(-1))
+ return -1;
+ else if(Value > 0)
+ {
+ genIUType Bit = genIUType(-1);
+ for(genIUType tmp = Value; tmp > 0; tmp >>= 1, ++Bit){}
+ return static_cast<int>(Bit);
+ }
+ else //if(Value < 0)
+ {
+ int const BitCount(sizeof(genIUType) * 8);
+ int MostSignificantBit(-1);
+ for(int BitIndex(0); BitIndex < BitCount; ++BitIndex)
+ MostSignificantBit = (Value & (1 << BitIndex)) ? MostSignificantBit : BitIndex;
+ assert(MostSignificantBit >= 0);
+ return MostSignificantBit;
+ }
+ }
+
+ template<typename genIUType>
+ static int findMSB_nlz1(genIUType x)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findMSB' only accept integer values");
+
+ if (x == 0)
+ return -1;
+
+ int n = 0;
+ if (x <= 0x0000FFFF) {n = n +16; x = x <<16;}
+ if (x <= 0x00FFFFFF) {n = n + 8; x = x << 8;}
+ if (x <= 0x0FFFFFFF) {n = n + 4; x = x << 4;}
+ if (x <= 0x3FFFFFFF) {n = n + 2; x = x << 2;}
+ if (x <= 0x7FFFFFFF) {n = n + 1;}
+ return 31 - n;
+ }
+
+ static int findMSB_nlz2(unsigned int x)
+ {
+ unsigned int y;
+ int n = 32;
+
+ y = x >>16; if (y != 0) {n = n -16; x = y;}
+ y = x >> 8; if (y != 0) {n = n - 8; x = y;}
+ y = x >> 4; if (y != 0) {n = n - 4; x = y;}
+ y = x >> 2; if (y != 0) {n = n - 2; x = y;}
+ y = x >> 1; if (y != 0) return n - 2;
+ return 32 - (n - static_cast<int>(x));
+ }
+
+ static int findMSB_pop(unsigned int x)
+ {
+ x = x | (x >> 1);
+ x = x | (x >> 2);
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >>16);
+ return 31 - glm::bitCount(~x);
+ }
+
+ static int perf_int(std::size_t Count)
+ {
+ type<int, int> const Data[] =
+ {
+ {0x00000000, -1},
+ {0x00000001, 0},
+ {0x00000002, 1},
+ {0x00000003, 1},
+ {0x00000004, 2},
+ {0x00000005, 2},
+ {0x00000007, 2},
+ {0x00000008, 3},
+ {0x00000010, 4},
+ {0x00000020, 5},
+ {0x00000040, 6},
+ {0x00000080, 7},
+ {0x00000100, 8},
+ {0x00000200, 9},
+ {0x00000400, 10},
+ {0x00000800, 11},
+ {0x00001000, 12},
+ {0x00002000, 13},
+ {0x00004000, 14},
+ {0x00008000, 15},
+ {0x00010000, 16},
+ {0x00020000, 17},
+ {0x00040000, 18},
+ {0x00080000, 19},
+ {0x00100000, 20},
+ {0x00200000, 21},
+ {0x00400000, 22},
+ {0x00800000, 23},
+ {0x01000000, 24},
+ {0x02000000, 25},
+ {0x04000000, 26},
+ {0x08000000, 27},
+ {0x10000000, 28},
+ {0x20000000, 29},
+ {0x40000000, 30}
+ };
+
+ int Error(0);
+
+ std::clock_t Timestamps0 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int, int>); ++i)
+ {
+ int Result = glm::findMSB(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps1 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int, int>); ++i)
+ {
+ int Result = findMSB_nlz1(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps2 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int, int>); ++i)
+ {
+ int Result = findMSB_nlz2(static_cast<unsigned int>(Data[i].Value));
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps3 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int, int>); ++i)
+ {
+ int Result = findMSB_095(static_cast<unsigned int>(Data[i].Value));
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps4 = std::clock();
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int, int>); ++i)
+ {
+ int Result = findMSB_intrinsic(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+# endif//GLM_HAS_BITSCAN_WINDOWS
+
+ std::clock_t Timestamps5 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int, int>); ++i)
+ {
+ int Result = findMSB_pop(static_cast<unsigned int>(Data[i].Value));
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps6 = std::clock();
+
+# if GLM_ARCH & GLM_ARCH_AVX && GLM_COMPILER & GLM_COMPILER_VC
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int, int>); ++i)
+ {
+ int Result = findMSB_avx(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps7 = std::clock();
+# endif
+
+ std::printf("glm::findMSB: %d clocks\n", static_cast<int>(Timestamps1 - Timestamps0));
+ std::printf("findMSB - nlz1: %d clocks\n", static_cast<int>(Timestamps2 - Timestamps1));
+ std::printf("findMSB - nlz2: %d clocks\n", static_cast<int>(Timestamps3 - Timestamps2));
+ std::printf("findMSB - 0.9.5: %d clocks\n", static_cast<int>(Timestamps4 - Timestamps3));
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ std::printf("findMSB - intrinsics: %d clocks\n", static_cast<int>(Timestamps5 - Timestamps4));
+# endif//GLM_HAS_BITSCAN_WINDOWS
+ std::printf("findMSB - pop: %d clocks\n", static_cast<int>(Timestamps6 - Timestamps5));
+
+# if GLM_ARCH & GLM_ARCH_AVX && GLM_COMPILER & GLM_COMPILER_VC
+ std::printf("findMSB - avx tzcnt: %d clocks\n", static_cast<int>(Timestamps7 - Timestamps6));
+# endif//GLM_ARCH & GLM_ARCH_AVX && GLM_PLATFORM & GLM_PLATFORM_WINDOWS
+
+ return Error;
+ }
+
+ static int test_ivec4()
+ {
+ type<glm::ivec4, glm::ivec4> const Data[] =
+ {
+ {glm::ivec4(0x00000000), glm::ivec4(-1)},
+ {glm::ivec4(0x00000001), glm::ivec4( 0)},
+ {glm::ivec4(0x00000002), glm::ivec4( 1)},
+ {glm::ivec4(0x00000003), glm::ivec4( 1)},
+ {glm::ivec4(0x00000004), glm::ivec4( 2)},
+ {glm::ivec4(0x00000005), glm::ivec4( 2)},
+ {glm::ivec4(0x00000007), glm::ivec4( 2)},
+ {glm::ivec4(0x00000008), glm::ivec4( 3)},
+ {glm::ivec4(0x00000010), glm::ivec4( 4)},
+ {glm::ivec4(0x00000020), glm::ivec4( 5)},
+ {glm::ivec4(0x00000040), glm::ivec4( 6)},
+ {glm::ivec4(0x00000080), glm::ivec4( 7)},
+ {glm::ivec4(0x00000100), glm::ivec4( 8)},
+ {glm::ivec4(0x00000200), glm::ivec4( 9)},
+ {glm::ivec4(0x00000400), glm::ivec4(10)},
+ {glm::ivec4(0x00000800), glm::ivec4(11)},
+ {glm::ivec4(0x00001000), glm::ivec4(12)},
+ {glm::ivec4(0x00002000), glm::ivec4(13)},
+ {glm::ivec4(0x00004000), glm::ivec4(14)},
+ {glm::ivec4(0x00008000), glm::ivec4(15)},
+ {glm::ivec4(0x00010000), glm::ivec4(16)},
+ {glm::ivec4(0x00020000), glm::ivec4(17)},
+ {glm::ivec4(0x00040000), glm::ivec4(18)},
+ {glm::ivec4(0x00080000), glm::ivec4(19)},
+ {glm::ivec4(0x00100000), glm::ivec4(20)},
+ {glm::ivec4(0x00200000), glm::ivec4(21)},
+ {glm::ivec4(0x00400000), glm::ivec4(22)},
+ {glm::ivec4(0x00800000), glm::ivec4(23)},
+ {glm::ivec4(0x01000000), glm::ivec4(24)},
+ {glm::ivec4(0x02000000), glm::ivec4(25)},
+ {glm::ivec4(0x04000000), glm::ivec4(26)},
+ {glm::ivec4(0x08000000), glm::ivec4(27)},
+ {glm::ivec4(0x10000000), glm::ivec4(28)},
+ {glm::ivec4(0x20000000), glm::ivec4(29)},
+ {glm::ivec4(0x40000000), glm::ivec4(30)}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<glm::ivec4, glm::ivec4>); ++i)
+ {
+ glm::ivec4 Result0 = glm::findMSB(Data[i].Value);
+ Error += glm::all(glm::equal(Data[i].Return, Result0)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ static int test_int()
+ {
+ typedef type<glm::uint, int> entry;
+
+ entry const Data[] =
+ {
+ {0x00000000, -1},
+ {0x00000001, 0},
+ {0x00000002, 1},
+ {0x00000003, 1},
+ {0x00000004, 2},
+ {0x00000005, 2},
+ {0x00000007, 2},
+ {0x00000008, 3},
+ {0x00000010, 4},
+ {0x00000020, 5},
+ {0x00000040, 6},
+ {0x00000080, 7},
+ {0x00000100, 8},
+ {0x00000200, 9},
+ {0x00000400, 10},
+ {0x00000800, 11},
+ {0x00001000, 12},
+ {0x00002000, 13},
+ {0x00004000, 14},
+ {0x00008000, 15},
+ {0x00010000, 16},
+ {0x00020000, 17},
+ {0x00040000, 18},
+ {0x00080000, 19},
+ {0x00100000, 20},
+ {0x00200000, 21},
+ {0x00400000, 22},
+ {0x00800000, 23},
+ {0x01000000, 24},
+ {0x02000000, 25},
+ {0x04000000, 26},
+ {0x08000000, 27},
+ {0x10000000, 28},
+ {0x20000000, 29},
+ {0x40000000, 30}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(entry); ++i)
+ {
+ int Result0 = glm::findMSB(Data[i].Value);
+ Error += Data[i].Return == Result0 ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(entry); ++i)
+ {
+ int Result0 = findMSB_nlz1(Data[i].Value);
+ Error += Data[i].Return == Result0 ? 0 : 1;
+ }
+/*
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(entry); ++i)
+ {
+ int Result0 = findMSB_nlz2(Data[i].Value);
+ Error += Data[i].Return == Result0 ? 0 : 1;
+ }
+*/
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(entry); ++i)
+ {
+ int Result0 = findMSB_095(Data[i].Value);
+ Error += Data[i].Return == Result0 ? 0 : 1;
+ }
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(entry); ++i)
+ {
+ int Result0 = findMSB_intrinsic(Data[i].Value);
+ Error += Data[i].Return == Result0 ? 0 : 1;
+ }
+# endif//GLM_HAS_BITSCAN_WINDOWS
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(entry); ++i)
+ {
+ int Result0 = findMSB_pop(Data[i].Value);
+ Error += Data[i].Return == Result0 ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ static int test()
+ {
+ int Error(0);
+
+ Error += test_ivec4();
+ Error += test_int();
+
+ return Error;
+ }
+
+ static int perf(std::size_t Samples)
+ {
+ int Error(0);
+
+ Error += perf_int(Samples);
+
+ return Error;
+ }
+}//findMSB
+
+namespace findLSB
+{
+ template<typename genType, typename retType>
+ struct type
+ {
+ genType Value;
+ retType Return;
+ };
+
+ typedef type<int, int> entry;
+
+ entry const DataI32[] =
+ {
+ {0x00000001, 0},
+ {0x00000003, 0},
+ {0x00000002, 1},
+ // {0x80000000, 31}, // Clang generates an error with this
+ {0x00010000, 16},
+ {0x7FFF0000, 16},
+ {0x7F000000, 24},
+ {0x7F00FF00, 8},
+ {0x00000000, -1}
+ };
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ template<typename genIUType>
+ static int findLSB_intrinsic(genIUType Value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findLSB' only accept integer values");
+
+ if(Value == 0)
+ return -1;
+
+ unsigned long Result(0);
+ _BitScanForward(&Result, Value);
+ return int(Result);
+ }
+# endif
+
+ template<typename genIUType>
+ static int findLSB_095(genIUType Value)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findLSB' only accept integer values");
+ if(Value == 0)
+ return -1;
+
+ genIUType Bit;
+ for(Bit = genIUType(0); !(Value & (1 << Bit)); ++Bit){}
+ return Bit;
+ }
+
+ template<typename genIUType>
+ static int findLSB_ntz2(genIUType x)
+ {
+ if(x == 0)
+ return -1;
+
+ return glm::bitCount(~x & (x - static_cast<genIUType>(1)));
+ }
+
+ template<typename genIUType>
+ static int findLSB_branchfree(genIUType x)
+ {
+ bool IsNull(x == 0);
+ int const Keep(!IsNull);
+ int const Discard(IsNull);
+
+ return static_cast<int>(glm::bitCount(~x & (x - static_cast<genIUType>(1)))) * Keep + Discard * -1;
+ }
+
+ static int test_int()
+ {
+ int Error(0);
+
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = glm::findLSB(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = findLSB_095(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = findLSB_intrinsic(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+# endif
+
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = findLSB_ntz2(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = findLSB_branchfree(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ static int test()
+ {
+ int Error(0);
+
+ Error += test_int();
+
+ return Error;
+ }
+
+ static int perf_int(std::size_t Count)
+ {
+ int Error(0);
+
+ std::clock_t Timestamps0 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = glm::findLSB(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps1 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = findLSB_095(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps2 = std::clock();
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = findLSB_intrinsic(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+# endif
+
+ std::clock_t Timestamps3 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = findLSB_ntz2(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps4 = std::clock();
+
+ for(std::size_t k = 0; k < Count; ++k)
+ for(std::size_t i = 0; i < sizeof(DataI32) / sizeof(entry); ++i)
+ {
+ int Result = findLSB_branchfree(DataI32[i].Value);
+ Error += DataI32[i].Return == Result ? 0 : 1;
+ }
+
+ std::clock_t Timestamps5 = std::clock();
+
+ std::printf("glm::findLSB: %d clocks\n", static_cast<int>(Timestamps1 - Timestamps0));
+ std::printf("findLSB - 0.9.5: %d clocks\n", static_cast<int>(Timestamps2 - Timestamps1));
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ std::printf("findLSB - intrinsics: %d clocks\n", static_cast<int>(Timestamps3 - Timestamps2));
+# endif
+
+ std::printf("findLSB - ntz2: %d clocks\n", static_cast<int>(Timestamps4 - Timestamps3));
+ std::printf("findLSB - branchfree: %d clocks\n", static_cast<int>(Timestamps5 - Timestamps4));
+
+ return Error;
+ }
+
+ static int perf(std::size_t Samples)
+ {
+ int Error(0);
+
+ Error += perf_int(Samples);
+
+ return Error;
+ }
+}//findLSB
+
+namespace uaddCarry
+{
+ static int test()
+ {
+ int Error(0);
+
+ {
+ glm::uint x = std::numeric_limits<glm::uint>::max();
+ glm::uint y = 0;
+ glm::uint Carry = 0;
+ glm::uint Result = glm::uaddCarry(x, y, Carry);
+
+ Error += Carry == 0 ? 0 : 1;
+ Error += Result == std::numeric_limits<glm::uint>::max() ? 0 : 1;
+ }
+
+ {
+ glm::uint x = std::numeric_limits<glm::uint>::max();
+ glm::uint y = 1;
+ glm::uint Carry = 0;
+ glm::uint Result = glm::uaddCarry(x, y, Carry);
+
+ Error += Carry == 1 ? 0 : 1;
+ Error += Result == 0 ? 0 : 1;
+ }
+
+ {
+ glm::uvec1 x(std::numeric_limits<glm::uint>::max());
+ glm::uvec1 y(0);
+ glm::uvec1 Carry(0);
+ glm::uvec1 Result(glm::uaddCarry(x, y, Carry));
+
+ Error += glm::all(glm::equal(Carry, glm::uvec1(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(Result, glm::uvec1(std::numeric_limits<glm::uint>::max()))) ? 0 : 1;
+ }
+
+ {
+ glm::uvec1 x(std::numeric_limits<glm::uint>::max());
+ glm::uvec1 y(1);
+ glm::uvec1 Carry(0);
+ glm::uvec1 Result(glm::uaddCarry(x, y, Carry));
+
+ Error += glm::all(glm::equal(Carry, glm::uvec1(1))) ? 0 : 1;
+ Error += glm::all(glm::equal(Result, glm::uvec1(0))) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace uaddCarry
+
+namespace usubBorrow
+{
+ static int test()
+ {
+ int Error(0);
+
+ {
+ glm::uint x = 16;
+ glm::uint y = 17;
+ glm::uint Borrow = 0;
+ glm::uint Result = glm::usubBorrow(x, y, Borrow);
+
+ Error += Borrow == 1 ? 0 : 1;
+ Error += Result == 1 ? 0 : 1;
+ }
+
+ {
+ glm::uvec1 x(16);
+ glm::uvec1 y(17);
+ glm::uvec1 Borrow(0);
+ glm::uvec1 Result(glm::usubBorrow(x, y, Borrow));
+
+ Error += glm::all(glm::equal(Borrow, glm::uvec1(1))) ? 0 : 1;
+ Error += glm::all(glm::equal(Result, glm::uvec1(1))) ? 0 : 1;
+ }
+
+ {
+ glm::uvec2 x(16);
+ glm::uvec2 y(17);
+ glm::uvec2 Borrow(0);
+ glm::uvec2 Result(glm::usubBorrow(x, y, Borrow));
+
+ Error += glm::all(glm::equal(Borrow, glm::uvec2(1))) ? 0 : 1;
+ Error += glm::all(glm::equal(Result, glm::uvec2(1))) ? 0 : 1;
+ }
+
+ {
+ glm::uvec3 x(16);
+ glm::uvec3 y(17);
+ glm::uvec3 Borrow(0);
+ glm::uvec3 Result(glm::usubBorrow(x, y, Borrow));
+
+ Error += glm::all(glm::equal(Borrow, glm::uvec3(1))) ? 0 : 1;
+ Error += glm::all(glm::equal(Result, glm::uvec3(1))) ? 0 : 1;
+ }
+
+ {
+ glm::uvec4 x(16);
+ glm::uvec4 y(17);
+ glm::uvec4 Borrow(0);
+ glm::uvec4 Result(glm::usubBorrow(x, y, Borrow));
+
+ Error += glm::all(glm::equal(Borrow, glm::uvec4(1))) ? 0 : 1;
+ Error += glm::all(glm::equal(Result, glm::uvec4(1))) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace usubBorrow
+
+namespace umulExtended
+{
+ static int test()
+ {
+ int Error(0);
+
+ {
+ glm::uint x = 2;
+ glm::uint y = 3;
+ glm::uint msb = 0;
+ glm::uint lsb = 0;
+ glm::umulExtended(x, y, msb, lsb);
+
+ Error += msb == 0 ? 0 : 1;
+ Error += lsb == 6 ? 0 : 1;
+ }
+
+ {
+ glm::uvec1 x(2);
+ glm::uvec1 y(3);
+ glm::uvec1 msb(0);
+ glm::uvec1 lsb(0);
+ glm::umulExtended(x, y, msb, lsb);
+
+ Error += glm::all(glm::equal(msb, glm::uvec1(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(lsb, glm::uvec1(6))) ? 0 : 1;
+ }
+
+ {
+ glm::uvec2 x(2);
+ glm::uvec2 y(3);
+ glm::uvec2 msb(0);
+ glm::uvec2 lsb(0);
+ glm::umulExtended(x, y, msb, lsb);
+
+ Error += glm::all(glm::equal(msb, glm::uvec2(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(lsb, glm::uvec2(6))) ? 0 : 1;
+ }
+
+ {
+ glm::uvec3 x(2);
+ glm::uvec3 y(3);
+ glm::uvec3 msb(0);
+ glm::uvec3 lsb(0);
+ glm::umulExtended(x, y, msb, lsb);
+
+ Error += glm::all(glm::equal(msb, glm::uvec3(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(lsb, glm::uvec3(6))) ? 0 : 1;
+ }
+
+ {
+ glm::uvec4 x(2);
+ glm::uvec4 y(3);
+ glm::uvec4 msb(0);
+ glm::uvec4 lsb(0);
+ glm::umulExtended(x, y, msb, lsb);
+
+ Error += glm::all(glm::equal(msb, glm::uvec4(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(lsb, glm::uvec4(6))) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace umulExtended
+
+namespace imulExtended
+{
+ static int test()
+ {
+ int Error(0);
+
+ {
+ int x = 2;
+ int y = 3;
+ int msb = 0;
+ int lsb = 0;
+ glm::imulExtended(x, y, msb, lsb);
+
+ Error += msb == 0 ? 0 : 1;
+ Error += lsb == 6 ? 0 : 1;
+ }
+
+ {
+ glm::ivec1 x(2);
+ glm::ivec1 y(3);
+ glm::ivec1 msb(0);
+ glm::ivec1 lsb(0);
+ glm::imulExtended(x, y, msb, lsb);
+
+ Error += glm::all(glm::equal(msb, glm::ivec1(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(lsb, glm::ivec1(6))) ? 0 : 1;
+ }
+
+ {
+ glm::ivec2 x(2);
+ glm::ivec2 y(3);
+ glm::ivec2 msb(0);
+ glm::ivec2 lsb(0);
+ glm::imulExtended(x, y, msb, lsb);
+
+ Error += glm::all(glm::equal(msb, glm::ivec2(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(lsb, glm::ivec2(6))) ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 x(2);
+ glm::ivec3 y(3);
+ glm::ivec3 msb(0);
+ glm::ivec3 lsb(0);
+ glm::imulExtended(x, y, msb, lsb);
+
+ Error += glm::all(glm::equal(msb, glm::ivec3(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(lsb, glm::ivec3(6))) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 x(2);
+ glm::ivec4 y(3);
+ glm::ivec4 msb(0);
+ glm::ivec4 lsb(0);
+ glm::imulExtended(x, y, msb, lsb);
+
+ Error += glm::all(glm::equal(msb, glm::ivec4(0))) ? 0 : 1;
+ Error += glm::all(glm::equal(lsb, glm::ivec4(6))) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace imulExtended
+
+namespace bitCount
+{
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ genType Return;
+ };
+
+ type<int> const DataI32[] =
+ {
+ {0x00000001, 1},
+ {0x00000003, 2},
+ {0x00000002, 1},
+ {0x7fffffff, 31},
+ {0x00000000, 0}
+ };
+
+ template<typename T>
+ inline int bitCount_if(T v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitCount' only accept integer values");
+
+ int Count(0);
+ for(T i = 0, n = static_cast<T>(sizeof(T) * 8); i < n; ++i)
+ {
+ if(v & static_cast<T>(1 << i))
+ ++Count;
+ }
+ return Count;
+ }
+
+ template<typename T>
+ inline int bitCount_vec(T v)
+ {
+ GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitCount' only accept integer values");
+
+ int Count(0);
+ for(T i = 0, n = static_cast<T>(sizeof(T) * 8); i < n; ++i)
+ {
+ Count += static_cast<int>((v >> i) & static_cast<T>(1));
+ }
+ return Count;
+ }
+
+ template<bool EXEC = false>
+ struct compute_bitfieldBitCountStep
+ {
+ template<glm::length_t L, typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER static glm::vec<L, T, Q> call(glm::vec<L, T, Q> const& v, T, T)
+ {
+ return v;
+ }
+ };
+
+ template<>
+ struct compute_bitfieldBitCountStep<true>
+ {
+ template<glm::length_t L, typename T, glm::qualifier Q>
+ GLM_FUNC_QUALIFIER static glm::vec<L, T, Q> call(glm::vec<L, T, Q> const& v, T Mask, T Shift)
+ {
+ return (v & Mask) + ((v >> Shift) & Mask);
+ }
+ };
+
+ template<glm::length_t L, typename T, glm::qualifier Q>
+ static glm::vec<L, int, Q> bitCount_bitfield(glm::vec<L, T, Q> const& v)
+ {
+ glm::vec<L, typename glm::detail::make_unsigned<T>::type, Q> x(*reinterpret_cast<glm::vec<L, typename glm::detail::make_unsigned<T>::type, Q> const *>(&v));
+ x = compute_bitfieldBitCountStep<sizeof(T) * 8 >= 2>::call(x, static_cast<typename glm::detail::make_unsigned<T>::type>(0x5555555555555555ull), static_cast<typename glm::detail::make_unsigned<T>::type>( 1));
+ x = compute_bitfieldBitCountStep<sizeof(T) * 8 >= 4>::call(x, static_cast<typename glm::detail::make_unsigned<T>::type>(0x3333333333333333ull), static_cast<typename glm::detail::make_unsigned<T>::type>( 2));
+ x = compute_bitfieldBitCountStep<sizeof(T) * 8 >= 8>::call(x, static_cast<typename glm::detail::make_unsigned<T>::type>(0x0F0F0F0F0F0F0F0Full), static_cast<typename glm::detail::make_unsigned<T>::type>( 4));
+ x = compute_bitfieldBitCountStep<sizeof(T) * 8 >= 16>::call(x, static_cast<typename glm::detail::make_unsigned<T>::type>(0x00FF00FF00FF00FFull), static_cast<typename glm::detail::make_unsigned<T>::type>( 8));
+ x = compute_bitfieldBitCountStep<sizeof(T) * 8 >= 32>::call(x, static_cast<typename glm::detail::make_unsigned<T>::type>(0x0000FFFF0000FFFFull), static_cast<typename glm::detail::make_unsigned<T>::type>(16));
+ x = compute_bitfieldBitCountStep<sizeof(T) * 8 >= 64>::call(x, static_cast<typename glm::detail::make_unsigned<T>::type>(0x00000000FFFFFFFFull), static_cast<typename glm::detail::make_unsigned<T>::type>(32));
+ return glm::vec<L, int, Q>(x);
+ }
+
+ template<typename genType>
+ static int bitCount_bitfield(genType x)
+ {
+ return bitCount_bitfield(glm::vec<1, genType, glm::defaultp>(x)).x;
+ }
+
+ static int perf(std::size_t Size)
+ {
+ int Error(0);
+
+ std::vector<int> v;
+ v.resize(Size);
+
+ std::vector<glm::ivec4> w;
+ w.resize(Size);
+
+
+ std::clock_t TimestampsA = std::clock();
+
+ // bitCount - TimeIf
+ {
+ for(std::size_t i = 0, n = v.size(); i < n; ++i)
+ v[i] = bitCount_if(static_cast<int>(i));
+ }
+
+ std::clock_t TimestampsB = std::clock();
+
+ // bitCount - TimeVec
+ {
+ for(std::size_t i = 0, n = v.size(); i < n; ++i)
+ v[i] = bitCount_vec(i);
+ }
+
+ std::clock_t TimestampsC = std::clock();
+
+ // bitCount - TimeDefault
+ {
+ for(std::size_t i = 0, n = v.size(); i < n; ++i)
+ v[i] = glm::bitCount(i);
+ }
+
+ std::clock_t TimestampsD = std::clock();
+
+ // bitCount - TimeVec4
+ {
+ for(std::size_t i = 0, n = v.size(); i < n; ++i)
+ w[i] = glm::bitCount(glm::ivec4(static_cast<int>(i)));
+ }
+
+ std::clock_t TimestampsE = std::clock();
+
+ {
+ for(std::size_t i = 0, n = v.size(); i < n; ++i)
+ v[i] = bitCount_bitfield(static_cast<int>(i));
+ }
+
+ std::clock_t TimestampsF = std::clock();
+
+ std::printf("bitCount - TimeIf %d\n", static_cast<int>(TimestampsB - TimestampsA));
+ std::printf("bitCount - TimeVec %d\n", static_cast<int>(TimestampsC - TimestampsB));
+ std::printf("bitCount - TimeDefault %d\n", static_cast<int>(TimestampsD - TimestampsC));
+ std::printf("bitCount - TimeVec4 %d\n", static_cast<int>(TimestampsE - TimestampsD));
+ std::printf("bitCount - bitfield %d\n", static_cast<int>(TimestampsF - TimestampsE));
+
+ return Error;
+ }
+
+ static int test()
+ {
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(DataI32) / sizeof(type<int>); i < n; ++i)
+ {
+ int ResultA = glm::bitCount(DataI32[i].Value);
+ int ResultB = bitCount_if(DataI32[i].Value);
+ int ResultC = bitCount_vec(DataI32[i].Value);
+ int ResultE = bitCount_bitfield(DataI32[i].Value);
+
+ Error += DataI32[i].Return == ResultA ? 0 : 1;
+ Error += DataI32[i].Return == ResultB ? 0 : 1;
+ Error += DataI32[i].Return == ResultC ? 0 : 1;
+ Error += DataI32[i].Return == ResultE ? 0 : 1;
+
+ assert(!Error);
+ }
+
+ return Error;
+ }
+}//bitCount
+
+int main()
+{
+ int Error = 0;
+
+ Error += ::bitCount::test();
+ Error += ::bitfieldReverse::test();
+ Error += ::findMSB::test();
+ Error += ::findLSB::test();
+ Error += ::umulExtended::test();
+ Error += ::imulExtended::test();
+ Error += ::uaddCarry::test();
+ Error += ::usubBorrow::test();
+ Error += ::bitfieldInsert::test();
+ Error += ::bitfieldExtract::test();
+
+# ifdef NDEBUG
+ std::size_t const Samples = 1000;
+# else
+ std::size_t const Samples = 1;
+# endif
+
+ ::bitCount::perf(Samples);
+ ::bitfieldReverse::perf(Samples);
+ ::findMSB::perf(Samples);
+ ::findLSB::perf(Samples);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_func_integer_bit_count.cpp b/3rdparty/glm/source/test/core/core_func_integer_bit_count.cpp
new file mode 100644
index 0000000..0fa11fb
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_integer_bit_count.cpp
@@ -0,0 +1,291 @@
+// This has the programs for computing the number of 1-bits
+// in a word, or byte, etc.
+// Max line length is 57, to fit in hacker.book.
+#include <cstdio>
+#include <cstdlib> //To define "exit", req'd by XLC.
+#include <ctime>
+
+unsigned rotatel(unsigned x, int n)
+{
+ if (static_cast<unsigned>(n) > 63) { std::printf("rotatel, n out of range.\n"); std::exit(1);}
+ return (x << n) | (x >> (32 - n));
+}
+
+int pop0(unsigned x)
+{
+ x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x & 0x0F0F0F0F) + ((x >> 4) & 0x0F0F0F0F);
+ x = (x & 0x00FF00FF) + ((x >> 8) & 0x00FF00FF);
+ x = (x & 0x0000FFFF) + ((x >>16) & 0x0000FFFF);
+ return x;
+}
+
+int pop1(unsigned x)
+{
+ x = x - ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x = x + (x >> 8);
+ x = x + (x >> 16);
+ return x & 0x0000003F;
+}
+/* Note: an alternative to the last three executable lines above is:
+ return x*0x01010101 >> 24;
+if your machine has a fast multiplier (suggested by Jari Kirma). */
+
+int pop2(unsigned x)
+{
+ unsigned n;
+
+ n = (x >> 1) & 033333333333; // Count bits in
+ x = x - n; // each 3-bit
+ n = (n >> 1) & 033333333333; // field.
+ x = x - n;
+ x = (x + (x >> 3)) & 030707070707; // 6-bit sums.
+ return x%63; // Add 6-bit sums.
+}
+
+/* An alternative to the "return" statement above is:
+ return ((x * 0404040404) >> 26) + // Add 6-bit sums.
+ (x >> 30);
+which runs faster on most machines (suggested by Norbert Juffa). */
+
+int pop3(unsigned x)
+{
+ unsigned n;
+
+ n = (x >> 1) & 0x77777777; // Count bits in
+ x = x - n; // each 4-bit
+ n = (n >> 1) & 0x77777777; // field.
+ x = x - n;
+ n = (n >> 1) & 0x77777777;
+ x = x - n;
+ x = (x + (x >> 4)) & 0x0F0F0F0F; // Get byte sums.
+ x = x*0x01010101; // Add the bytes.
+ return x >> 24;
+}
+
+int pop4(unsigned x)
+{
+ int n;
+
+ n = 0;
+ while (x != 0) {
+ n = n + 1;
+ x = x & (x - 1);
+ }
+ return n;
+}
+
+int pop5(unsigned x)
+{
+ int i, sum;
+
+ // Rotate and sum method // Shift right & subtract
+
+ sum = x; // sum = x;
+ for (i = 1; i <= 31; i++) { // while (x != 0) {
+ x = rotatel(x, 1); // x = x >> 1;
+ sum = sum + x; // sum = sum - x;
+ } // }
+ return -sum; // return sum;
+}
+
+int pop5a(unsigned x)
+{
+ int sum;
+
+ // Shift right & subtract
+
+ sum = x;
+ while (x != 0) {
+ x = x >> 1;
+ sum = sum - x;
+ }
+ return sum;
+}
+
+int pop6(unsigned x)
+{ // Table lookup.
+ static char table[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8};
+
+ return table[x & 0xFF] +
+ table[(x >> 8) & 0xFF] +
+ table[(x >> 16) & 0xFF] +
+ table[(x >> 24)];
+}
+
+// The following works only for 8-bit quantities.
+int pop7(unsigned x)
+{
+ x = x*0x08040201; // Make 4 copies.
+ x = x >> 3; // So next step hits proper bits.
+ x = x & 0x11111111; // Every 4th bit.
+ x = x*0x11111111; // Sum the digits (each 0 or 1).
+ x = x >> 28; // Position the result.
+ return x;
+}
+
+// The following works only for 7-bit quantities.
+int pop8(unsigned x)
+{
+ x = x*0x02040810; // Make 4 copies, left-adjusted.
+ x = x & 0x11111111; // Every 4th bit.
+ x = x*0x11111111; // Sum the digits (each 0 or 1).
+ x = x >> 28; // Position the result.
+ return x;
+}
+
+// The following works only for 15-bit quantities.
+int pop9(unsigned x)
+{
+ unsigned long long y;
+ y = x * 0x0002000400080010ULL;
+ y = y & 0x1111111111111111ULL;
+ y = y * 0x1111111111111111ULL;
+ y = y >> 60;
+ return static_cast<int>(y);
+}
+
+int errors;
+void error(int x, int y)
+{
+ errors = errors + 1;
+ std::printf("Error for x = %08x, got %08x\n", x, y);
+}
+
+int main()
+{
+# ifdef NDEBUG
+
+ int i, n;
+ static unsigned test[] = {0,0, 1,1, 2,1, 3,2, 4,1, 5,2, 6,2, 7,3,
+ 8,1, 9,2, 10,2, 11,3, 12,2, 13,3, 14,3, 15,4, 16,1, 17,2,
+ 0x3F,6, 0x40,1, 0x41,2, 0x7f,7, 0x80,1, 0x81,2, 0xfe,7, 0xff,8,
+ 0x4000,1, 0x4001,2, 0x7000,3, 0x7fff,15,
+ 0x55555555,16, 0xAAAAAAAA, 16, 0xFF000000,8, 0xC0C0C0C0,8,
+ 0x0FFFFFF0,24, 0x80000000,1, 0xFFFFFFFF,32};
+
+ std::size_t const Count = 1000000;
+
+ n = sizeof(test)/4;
+
+ std::clock_t TimestampBeg = 0;
+ std::clock_t TimestampEnd = 0;
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (pop0(test[i]) != test[i+1]) error(test[i], pop0(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop0: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (pop1(test[i]) != test[i+1]) error(test[i], pop1(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop1: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (pop2(test[i]) != test[i+1]) error(test[i], pop2(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop2: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (pop3(test[i]) != test[i+1]) error(test[i], pop3(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop3: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (pop4(test[i]) != test[i+1]) error(test[i], pop4(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop4: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (pop5(test[i]) != test[i+1]) error(test[i], pop5(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop5: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (pop5a(test[i]) != test[i+1]) error(test[i], pop5a(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop5a: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (pop6(test[i]) != test[i+1]) error(test[i], pop6(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop6: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if ((test[i] & 0xffffff00) == 0)
+ if (pop7(test[i]) != test[i+1]) error(test[i], pop7(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop7: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if ((test[i] & 0xffffff80) == 0)
+ if (pop8(test[i]) != test[i+1]) error(test[i], pop8(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop8: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if ((test[i] & 0xffff8000) == 0)
+ if (pop9(test[i]) != test[i+1]) error(test[i], pop9(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("pop9: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ if (errors == 0)
+ std::printf("Passed all %d cases.\n", static_cast<int>(sizeof(test)/8));
+
+# endif//NDEBUG
+}
diff --git a/3rdparty/glm/source/test/core/core_func_integer_find_lsb.cpp b/3rdparty/glm/source/test/core/core_func_integer_find_lsb.cpp
new file mode 100644
index 0000000..7b42d33
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_integer_find_lsb.cpp
@@ -0,0 +1,416 @@
+#include <glm/glm.hpp>
+#include <cstdio>
+#include <cstdlib> //To define "exit", req'd by XLC.
+#include <ctime>
+
+int nlz(unsigned x)
+{
+ int pop(unsigned x);
+
+ x = x | (x >> 1);
+ x = x | (x >> 2);
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >>16);
+ return pop(~x);
+}
+
+int pop(unsigned x)
+{
+ x = x - ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x = x + (x << 8);
+ x = x + (x << 16);
+ return x >> 24;
+}
+
+int ntz1(unsigned x)
+{
+ return 32 - nlz(~x & (x-1));
+}
+
+int ntz2(unsigned x)
+{
+ return pop(~x & (x - 1));
+}
+
+int ntz3(unsigned x)
+{
+ int n;
+
+ if (x == 0) return(32);
+ n = 1;
+ if ((x & 0x0000FFFF) == 0) {n = n +16; x = x >>16;}
+ if ((x & 0x000000FF) == 0) {n = n + 8; x = x >> 8;}
+ if ((x & 0x0000000F) == 0) {n = n + 4; x = x >> 4;}
+ if ((x & 0x00000003) == 0) {n = n + 2; x = x >> 2;}
+ return n - (x & 1);
+}
+
+int ntz4(unsigned x)
+{
+ unsigned y;
+ int n;
+
+ if (x == 0) return 32;
+ n = 31;
+ y = x <<16; if (y != 0) {n = n -16; x = y;}
+ y = x << 8; if (y != 0) {n = n - 8; x = y;}
+ y = x << 4; if (y != 0) {n = n - 4; x = y;}
+ y = x << 2; if (y != 0) {n = n - 2; x = y;}
+ y = x << 1; if (y != 0) {n = n - 1;}
+ return n;
+}
+
+int ntz4a(unsigned x)
+{
+ unsigned y;
+ int n;
+
+ if (x == 0) return 32;
+ n = 31;
+ y = x <<16; if (y != 0) {n = n -16; x = y;}
+ y = x << 8; if (y != 0) {n = n - 8; x = y;}
+ y = x << 4; if (y != 0) {n = n - 4; x = y;}
+ y = x << 2; if (y != 0) {n = n - 2; x = y;}
+ n = n - ((x << 1) >> 31);
+ return n;
+}
+
+int ntz5(char x)
+{
+ if (x & 15) {
+ if (x & 3) {
+ if (x & 1) return 0;
+ else return 1;
+ }
+ else if (x & 4) return 2;
+ else return 3;
+ }
+ else if (x & 0x30) {
+ if (x & 0x10) return 4;
+ else return 5;
+ }
+ else if (x & 0x40) return 6;
+ else if (x) return 7;
+ else return 8;
+}
+
+int ntz6(unsigned x)
+{
+ int n;
+
+ x = ~x & (x - 1);
+ n = 0; // n = 32;
+ while(x != 0)
+ { // while (x != 0) {
+ n = n + 1; // n = n - 1;
+ x = x >> 1; // x = x + x;
+ } // }
+ return n; // return n;
+}
+
+int ntz6a(unsigned x)
+{
+ int n = 32;
+
+ while (x != 0) {
+ n = n - 1;
+ x = x + x;
+ }
+ return n;
+}
+
+/* Dean Gaudet's algorithm. To be most useful there must be a good way
+to evaluate the C "conditional expression" (a?b:c construction) without
+branching. The result of a?b:c is b if a is true (nonzero), and c if a
+is false (0).
+ For example, a compare to zero op that sets a target GPR to 1 if the
+operand is 0, and to 0 if the operand is nonzero, will do it. With this
+instruction, the algorithm is entirely branch-free. But the most
+interesting thing about it is the high degree of parallelism. All six
+lines with conditional expressions can be executed in parallel (on a
+machine with sufficient computational units).
+ Although the instruction count is 30 measured statically, it could
+execute in only 10 cycles on a machine with sufficient parallelism.
+ The first two uses of y can instead be x, which would increase the
+useful parallelism on most machines (the assignments to y, bz, and b4
+could then all run in parallel). */
+
+int ntz7(unsigned x)
+{
+ unsigned y, bz, b4, b3, b2, b1, b0;
+
+ y = x & -x; // Isolate rightmost 1-bit.
+ bz = y ? 0 : 1; // 1 if y = 0.
+ b4 = (y & 0x0000FFFF) ? 0 : 16;
+ b3 = (y & 0x00FF00FF) ? 0 : 8;
+ b2 = (y & 0x0F0F0F0F) ? 0 : 4;
+ b1 = (y & 0x33333333) ? 0 : 2;
+ b0 = (y & 0x55555555) ? 0 : 1;
+ return bz + b4 + b3 + b2 + b1 + b0;
+}
+
+// This file has divisions by zero to test isnan
+#if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(disable : 4800)
+#endif
+
+int ntz7_christophe(unsigned x)
+{
+ unsigned y, bz, b4, b3, b2, b1, b0;
+
+ y = x & -x; // Isolate rightmost 1-bit.
+ bz = unsigned(!bool(y)); // 1 if y = 0.
+ b4 = unsigned(!bool(y & 0x0000FFFF)) * 16;
+ b3 = unsigned(!bool(y & 0x00FF00FF)) * 8;
+ b2 = unsigned(!bool(y & 0x0F0F0F0F)) * 4;
+ b1 = unsigned(!bool(y & 0x33333333)) * 2;
+ b0 = unsigned(!bool(y & 0x55555555)) * 1;
+ return bz + b4 + b3 + b2 + b1 + b0;
+}
+
+/* Below is David Seal's algorithm, found at
+http://www.ciphersbyritter.com/NEWS4/BITCT.HTM Table
+entries marked "u" are unused. 6 ops including a
+multiply, plus an indexed load. */
+
+#define u 99
+int ntz8(unsigned x)
+{
+ static char table[64] =
+ {32, 0, 1,12, 2, 6, u,13, 3, u, 7, u, u, u, u,14,
+ 10, 4, u, u, 8, u, u,25, u, u, u, u, u,21,27,15,
+ 31,11, 5, u, u, u, u, u, 9, u, u,24, u, u,20,26,
+ 30, u, u, u, u,23, u,19, 29, u,22,18,28,17,16, u};
+
+ x = (x & -x)*0x0450FBAF;
+ return table[x >> 26];
+}
+
+/* Seal's algorithm with multiply expanded.
+9 elementary ops plus an indexed load. */
+
+int ntz8a(unsigned x)
+{
+ static char table[64] =
+ {32, 0, 1,12, 2, 6, u,13, 3, u, 7, u, u, u, u,14,
+ 10, 4, u, u, 8, u, u,25, u, u, u, u, u,21,27,15,
+ 31,11, 5, u, u, u, u, u, 9, u, u,24, u, u,20,26,
+ 30, u, u, u, u,23, u,19, 29, u,22,18,28,17,16, u};
+
+ x = (x & -x);
+ x = (x << 4) + x; // x = x*17.
+ x = (x << 6) + x; // x = x*65.
+ x = (x << 16) - x; // x = x*65535.
+ return table[x >> 26];
+}
+
+/* Reiser's algorithm. Three ops including a "remainder,"
+plus an indexed load. */
+
+int ntz9(unsigned x)
+{
+ static char table[37] = {
+ 32, 0, 1, 26, 2, 23, 27,
+ u, 3, 16, 24, 30, 28, 11, u, 13, 4,
+ 7, 17, u, 25, 22, 31, 15, 29, 10, 12,
+ 6, u, 21, 14, 9, 5, 20, 8, 19, 18};
+
+ x = (x & -x)%37;
+ return table[x];
+}
+
+/* Using a de Bruijn sequence. This is a table lookup with a 32-entry
+table. The de Bruijn sequence used here is
+ 0000 0100 1101 0111 0110 0101 0001 1111,
+obtained from Danny Dube's October 3, 1997, posting in
+comp.compression.research. Thanks to Norbert Juffa for this reference. */
+
+int ntz10(unsigned x) {
+
+ static char table[32] =
+ { 0, 1, 2,24, 3,19, 6,25, 22, 4,20,10,16, 7,12,26,
+ 31,23,18, 5,21, 9,15,11, 30,17, 8,14,29,13,28,27};
+
+ if (x == 0) return 32;
+ x = (x & -x)*0x04D7651F;
+ return table[x >> 27];
+}
+
+/* Norbert Juffa's code, answer to exercise 1 of Chapter 5 (2nd ed). */
+
+#define SLOW_MUL
+int ntz11 (unsigned int n) {
+
+ static unsigned char tab[32] =
+ { 0, 1, 2, 24, 3, 19, 6, 25,
+ 22, 4, 20, 10, 16, 7, 12, 26,
+ 31, 23, 18, 5, 21, 9, 15, 11,
+ 30, 17, 8, 14, 29, 13, 28, 27
+ };
+ unsigned int k;
+ n = n & (-n); /* isolate lsb */
+ printf("n = %d\n", n);
+#if defined(SLOW_MUL)
+ k = (n << 11) - n;
+ k = (k << 2) + k;
+ k = (k << 8) + n;
+ k = (k << 5) - k;
+#else
+ k = n * 0x4d7651f;
+#endif
+ return n ? tab[k>>27] : 32;
+}
+
+int errors;
+void error(int x, int y) {
+ errors = errors + 1;
+ std::printf("Error for x = %08x, got %d\n", x, y);
+}
+
+int main()
+{
+# ifdef NDEBUG
+
+ int i, m, n;
+ static unsigned test[] = {0,32, 1,0, 2,1, 3,0, 4,2, 5,0, 6,1, 7,0,
+ 8,3, 9,0, 16,4, 32,5, 64,6, 128,7, 255,0, 256,8, 512,9, 1024,10,
+ 2048,11, 4096,12, 8192,13, 16384,14, 32768,15, 65536,16,
+ 0x20000,17, 0x40000,18, 0x80000,19, 0x100000,20, 0x200000,21,
+ 0x400000,22, 0x800000,23, 0x1000000,24, 0x2000000,25,
+ 0x4000000,26, 0x8000000,27, 0x10000000,28, 0x20000000,29,
+ 0x40000000,30, 0x80000000,31, 0xFFFFFFF0,4, 0x3000FF00,8,
+ 0xC0000000,30, 0x60000000,29, 0x00011000, 12};
+
+ std::size_t const Count = 1000;
+
+ n = sizeof(test)/4;
+
+ std::clock_t TimestampBeg = 0;
+ std::clock_t TimestampEnd = 0;
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz1(test[i]) != test[i+1]) error(test[i], ntz1(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz1: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz2(test[i]) != test[i+1]) error(test[i], ntz2(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz2: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz3(test[i]) != test[i+1]) error(test[i], ntz3(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz3: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz4(test[i]) != test[i+1]) error(test[i], ntz4(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz4: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz4a(test[i]) != test[i+1]) error(test[i], ntz4a(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz4a: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for(std::size_t k = 0; k < Count; ++k)
+ for(i = 0; i < n; i += 2)
+ {
+ m = test[i+1];
+ if(m > 8)
+ m = 8;
+ if(ntz5(static_cast<char>(test[i])) != m)
+ error(test[i], ntz5(static_cast<char>(test[i])));
+ }
+ TimestampEnd = std::clock();
+
+ std::printf("ntz5: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz6(test[i]) != test[i+1]) error(test[i], ntz6(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz6: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz6a(test[i]) != test[i+1]) error(test[i], ntz6a(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz6a: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz7(test[i]) != test[i+1]) error(test[i], ntz7(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz7: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz7_christophe(test[i]) != test[i+1]) error(test[i], ntz7(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz7_christophe: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz8(test[i]) != test[i+1]) error(test[i], ntz8(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz8: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz8a(test[i]) != test[i+1]) error(test[i], ntz8a(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz8a: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz9(test[i]) != test[i+1]) error(test[i], ntz9(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz9: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (ntz10(test[i]) != test[i+1]) error(test[i], ntz10(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("ntz10: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ if (errors == 0)
+ std::printf("Passed all %d cases.\n", static_cast<int>(sizeof(test)/8));
+
+# endif//NDEBUG
+}
diff --git a/3rdparty/glm/source/test/core/core_func_integer_find_msb.cpp b/3rdparty/glm/source/test/core/core_func_integer_find_msb.cpp
new file mode 100644
index 0000000..c435467
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_integer_find_msb.cpp
@@ -0,0 +1,440 @@
+#include <glm/glm.hpp>
+#include <cstdio>
+#include <cstdlib> // To define "exit", req'd by XLC.
+#include <ctime>
+
+#define LE 1 // 1 for little-endian, 0 for big-endian.
+
+int pop(unsigned x) {
+ x = x - ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x = x + (x << 8);
+ x = x + (x << 16);
+ return x >> 24;
+}
+
+int nlz1(unsigned x) {
+ int n;
+
+ if (x == 0) return(32);
+ n = 0;
+ if (x <= 0x0000FFFF) {n = n +16; x = x <<16;}
+ if (x <= 0x00FFFFFF) {n = n + 8; x = x << 8;}
+ if (x <= 0x0FFFFFFF) {n = n + 4; x = x << 4;}
+ if (x <= 0x3FFFFFFF) {n = n + 2; x = x << 2;}
+ if (x <= 0x7FFFFFFF) {n = n + 1;}
+ return n;
+}
+
+int nlz1a(unsigned x) {
+ int n;
+
+/* if (x == 0) return(32); */
+ if (static_cast<int>(x) <= 0) return (~x >> 26) & 32;
+ n = 1;
+ if ((x >> 16) == 0) {n = n +16; x = x <<16;}
+ if ((x >> 24) == 0) {n = n + 8; x = x << 8;}
+ if ((x >> 28) == 0) {n = n + 4; x = x << 4;}
+ if ((x >> 30) == 0) {n = n + 2; x = x << 2;}
+ n = n - (x >> 31);
+ return n;
+}
+// On basic Risc, 12 to 20 instructions.
+
+int nlz2(unsigned x) {
+ unsigned y;
+ int n;
+
+ n = 32;
+ y = x >>16; if (y != 0) {n = n -16; x = y;}
+ y = x >> 8; if (y != 0) {n = n - 8; x = y;}
+ y = x >> 4; if (y != 0) {n = n - 4; x = y;}
+ y = x >> 2; if (y != 0) {n = n - 2; x = y;}
+ y = x >> 1; if (y != 0) return n - 2;
+ return n - x;
+}
+
+// As above but coded as a loop for compactness:
+// 23 to 33 basic Risc instructions.
+int nlz2a(unsigned x) {
+ unsigned y;
+ int n, c;
+
+ n = 32;
+ c = 16;
+ do {
+ y = x >> c; if (y != 0) {n = n - c; x = y;}
+ c = c >> 1;
+ } while (c != 0);
+ return n - x;
+}
+
+int nlz3(int x) {
+ int y, n;
+
+ n = 0;
+ y = x;
+L: if (x < 0) return n;
+ if (y == 0) return 32 - n;
+ n = n + 1;
+ x = x << 1;
+ y = y >> 1;
+ goto L;
+}
+
+int nlz4(unsigned x) {
+ int y, m, n;
+
+ y = -(x >> 16); // If left half of x is 0,
+ m = (y >> 16) & 16; // set n = 16. If left half
+ n = 16 - m; // is nonzero, set n = 0 and
+ x = x >> m; // shift x right 16.
+ // Now x is of the form 0000xxxx.
+ y = x - 0x100; // If positions 8-15 are 0,
+ m = (y >> 16) & 8; // add 8 to n and shift x left 8.
+ n = n + m;
+ x = x << m;
+
+ y = x - 0x1000; // If positions 12-15 are 0,
+ m = (y >> 16) & 4; // add 4 to n and shift x left 4.
+ n = n + m;
+ x = x << m;
+
+ y = x - 0x4000; // If positions 14-15 are 0,
+ m = (y >> 16) & 2; // add 2 to n and shift x left 2.
+ n = n + m;
+ x = x << m;
+
+ y = x >> 14; // Set y = 0, 1, 2, or 3.
+ m = y & ~(y >> 1); // Set m = 0, 1, 2, or 2 resp.
+ return n + 2 - m;
+}
+
+int nlz5(unsigned x) {
+ int pop(unsigned x);
+
+ x = x | (x >> 1);
+ x = x | (x >> 2);
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >>16);
+ return pop(~x);
+}
+
+/* The four programs below are not valid ANSI C programs. This is
+because they refer to the same storage locations as two different types.
+However, they work with xlc/AIX, gcc/AIX, and gcc/NT. If you try to
+code them more compactly by declaring a variable xx to be "double," and
+then using
+
+ n = 1054 - (*((unsigned *)&xx + LE) >> 20);
+
+then you are violating not only the rule above, but also the ANSI C
+rule that pointer arithmetic can be performed only on pointers to
+array elements.
+ When coded with the above statement, the program fails with xlc,
+gcc/AIX, and gcc/NT, at some optimization levels.
+ BTW, these programs use the "anonymous union" feature of C++, not
+available in C. */
+
+int nlz6(unsigned k)
+{
+ union {
+ unsigned asInt[2];
+ double asDouble;
+ };
+ int n;
+
+ asDouble = static_cast<double>(k) + 0.5;
+ n = 1054 - (asInt[LE] >> 20);
+ return n;
+}
+
+int nlz7(unsigned k)
+{
+ union {
+ unsigned asInt[2];
+ double asDouble;
+ };
+ int n;
+
+ asDouble = static_cast<double>(k);
+ n = 1054 - (asInt[LE] >> 20);
+ n = (n & 31) + (n >> 9);
+ return n;
+}
+
+ /* In single qualifier, round-to-nearest mode, the basic method fails for:
+ k = 0, k = 01FFFFFF, 03FFFFFE <= k <= 03FFFFFF,
+ 07FFFFFC <= k <= 07FFFFFF,
+ 0FFFFFF8 <= k <= 0FFFFFFF,
+ ...
+ 7FFFFFC0 <= k <= 7FFFFFFF.
+ FFFFFF80 <= k <= FFFFFFFF.
+ For k = 0 it gives 158, and for the other values it is too low by 1. */
+
+int nlz8(unsigned k)
+{
+ union {
+ unsigned asInt;
+ float asFloat;
+ };
+ int n;
+
+ k = k & ~(k >> 1); /* Fix problem with rounding. */
+ asFloat = static_cast<float>(k) + 0.5f;
+ n = 158 - (asInt >> 23);
+ return n;
+}
+
+/* The example below shows how to make a macro for nlz. It uses an
+extension to the C and C++ languages that is provided by the GNU C/C++
+compiler, namely, that of allowing statements and declarations in
+expressions (see "Using and Porting GNU CC", by Richard M. Stallman
+(1998). The underscores are necessary to protect against the
+possibility that the macro argument will conflict with one of its local
+variables, e.g., NLZ(k). */
+
+int nlz9(unsigned k)
+{
+ union {
+ unsigned asInt;
+ float asFloat;
+ };
+ int n;
+
+ k = k & ~(k >> 1); /* Fix problem with rounding. */
+ asFloat = static_cast<float>(k);
+ n = 158 - (asInt >> 23);
+ n = (n & 31) + (n >> 6); /* Fix problem with k = 0. */
+ return n;
+}
+
+/* Below are three nearly equivalent programs for computing the number
+of leading zeros in a word. This material is not in HD, but may be in a
+future edition.
+ Immediately below is Robert Harley's algorithm, found at the
+comp.arch newsgroup entry dated 7/12/96, pointed out to me by Norbert
+Juffa.
+ Table entries marked "u" are unused. 14 ops including a multiply,
+plus an indexed load.
+ The smallest multiplier that works is 0x045BCED1 = 17*65*129*513 (all
+of form 2**k + 1). There are no multipliers of three terms of the form
+2**k +- 1 that work, with a table size of 64 or 128. There are some,
+with a table size of 64, if you precede the multiplication with x = x -
+(x >> 1), but that seems less elegant. There are also some if you use a
+table size of 256, the smallest is 0x01033CBF = 65*255*1025 (this would
+save two instructions in the form of this algorithm with the
+multiplication expanded into shifts and adds, but the table size is
+getting a bit large). */
+
+#define u 99
+int nlz10(unsigned x)
+{
+ static char table[64] =
+ {32,31, u,16, u,30, 3, u, 15, u, u, u,29,10, 2, u,
+ u, u,12,14,21, u,19, u, u,28, u,25, u, 9, 1, u,
+ 17, u, 4, u, u, u,11, u, 13,22,20, u,26, u, u,18,
+ 5, u, u,23, u,27, u, 6, u,24, 7, u, 8, u, 0, u};
+
+ x = x | (x >> 1); // Propagate leftmost
+ x = x | (x >> 2); // 1-bit to the right.
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >>16);
+ x = x*0x06EB14F9; // Multiplier is 7*255**3.
+ return table[x >> 26];
+}
+
+/* Harley's algorithm with multiply expanded.
+19 elementary ops plus an indexed load. */
+
+int nlz10a(unsigned x)
+{
+ static char table[64] =
+ {32,31, u,16, u,30, 3, u, 15, u, u, u,29,10, 2, u,
+ u, u,12,14,21, u,19, u, u,28, u,25, u, 9, 1, u,
+ 17, u, 4, u, u, u,11, u, 13,22,20, u,26, u, u,18,
+ 5, u, u,23, u,27, u, 6, u,24, 7, u, 8, u, 0, u};
+
+ x = x | (x >> 1); // Propagate leftmost
+ x = x | (x >> 2); // 1-bit to the right.
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >> 16);
+ x = (x << 3) - x; // Multiply by 7.
+ x = (x << 8) - x; // Multiply by 255.
+ x = (x << 8) - x; // Again.
+ x = (x << 8) - x; // Again.
+ return table[x >> 26];
+}
+
+/* Julius Goryavsky's version of Harley's algorithm.
+17 elementary ops plus an indexed load, if the machine
+has "and not." */
+
+int nlz10b(unsigned x)
+{
+ static char table[64] =
+ {32,20,19, u, u,18, u, 7, 10,17, u, u,14, u, 6, u,
+ u, 9, u,16, u, u, 1,26, u,13, u, u,24, 5, u, u,
+ u,21, u, 8,11, u,15, u, u, u, u, 2,27, 0,25, u,
+ 22, u,12, u, u, 3,28, u, 23, u, 4,29, u, u,30,31};
+
+ x = x | (x >> 1); // Propagate leftmost
+ x = x | (x >> 2); // 1-bit to the right.
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x & ~(x >> 16);
+ x = x*0xFD7049FF; // Activate this line or the following 3.
+ // x = (x << 9) - x; // Multiply by 511.
+ // x = (x << 11) - x; // Multiply by 2047.
+ // x = (x << 14) - x; // Multiply by 16383.
+ return table[x >> 26];
+}
+
+int errors;
+void error(int x, int y)
+{
+ errors = errors + 1;
+ std::printf("Error for x = %08x, got %d\n", x, y);
+}
+
+int main()
+{
+# ifdef NDEBUG
+
+ int i, n;
+ static unsigned test[] = {0,32, 1,31, 2,30, 3,30, 4,29, 5,29, 6,29,
+ 7,29, 8,28, 9,28, 16,27, 32,26, 64,25, 128,24, 255,24, 256,23,
+ 512,22, 1024,21, 2048,20, 4096,19, 8192,18, 16384,17, 32768,16,
+ 65536,15, 0x20000,14, 0x40000,13, 0x80000,12, 0x100000,11,
+ 0x200000,10, 0x400000,9, 0x800000,8, 0x1000000,7, 0x2000000,6,
+ 0x4000000,5, 0x8000000,4, 0x0FFFFFFF,4, 0x10000000,3,
+ 0x3000FFFF,2, 0x50003333,1, 0x7FFFFFFF,1, 0x80000000,0,
+ 0xFFFFFFFF,0};
+ std::size_t const Count = 1000;
+
+ n = sizeof(test)/4;
+
+ std::clock_t TimestampBeg = 0;
+ std::clock_t TimestampEnd = 0;
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz1(test[i]) != test[i+1]) error(test[i], nlz1(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz1: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz1a(test[i]) != test[i+1]) error(test[i], nlz1a(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz1a: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz2(test[i]) != test[i+1]) error(test[i], nlz2(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz2: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz2a(test[i]) != test[i+1]) error(test[i], nlz2a(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz2a: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz3(test[i]) != test[i+1]) error(test[i], nlz3(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz3: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz4(test[i]) != test[i+1]) error(test[i], nlz4(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz4: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz5(test[i]) != test[i+1]) error(test[i], nlz5(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz5: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz6(test[i]) != test[i+1]) error(test[i], nlz6(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz6: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz7(test[i]) != test[i+1]) error(test[i], nlz7(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz7: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz8(test[i]) != test[i+1]) error(test[i], nlz8(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz8: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz9(test[i]) != test[i+1]) error(test[i], nlz9(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz9: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz10(test[i]) != test[i+1]) error(test[i], nlz10(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz10: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz10a(test[i]) != test[i+1]) error(test[i], nlz10a(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz10a: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ TimestampBeg = std::clock();
+ for (std::size_t k = 0; k < Count; ++k)
+ for (i = 0; i < n; i += 2) {
+ if (nlz10b(test[i]) != test[i+1]) error(test[i], nlz10b(test[i]));}
+ TimestampEnd = std::clock();
+
+ std::printf("nlz10b: %d clocks\n", static_cast<int>(TimestampEnd - TimestampBeg));
+
+ if (errors == 0)
+ std::printf("Passed all %d cases.\n", static_cast<int>(sizeof(test)/8));
+
+# endif//NDEBUG
+}
diff --git a/3rdparty/glm/source/test/core/core_func_matrix.cpp b/3rdparty/glm/source/test/core/core_func_matrix.cpp
new file mode 100644
index 0000000..c5b2007
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_matrix.cpp
@@ -0,0 +1,312 @@
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/matrix_transform.hpp>
+#include <glm/ext/scalar_constants.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+#include <ctime>
+#include <cstdio>
+
+using namespace glm;
+
+int test_matrixCompMult()
+{
+ int Error(0);
+
+ {
+ mat2 m(0, 1, 2, 3);
+ mat2 n = matrixCompMult(m, m);
+ mat2 expected = mat2(0, 1, 4, 9);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat2x3 m(0, 1, 2, 3, 4, 5);
+ mat2x3 n = matrixCompMult(m, m);
+ mat2x3 expected = mat2x3(0, 1, 4, 9, 16, 25);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat2x4 m(0, 1, 2, 3, 4, 5, 6, 7);
+ mat2x4 n = matrixCompMult(m, m);
+ mat2x4 expected = mat2x4(0, 1, 4, 9, 16, 25, 36, 49);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat3 m(0, 1, 2, 3, 4, 5, 6, 7, 8);
+ mat3 n = matrixCompMult(m, m);
+ mat3 expected = mat3(0, 1, 4, 9, 16, 25, 36, 49, 64);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat3x2 m(0, 1, 2, 3, 4, 5);
+ mat3x2 n = matrixCompMult(m, m);
+ mat3x2 expected = mat3x2(0, 1, 4, 9, 16, 25);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat3x4 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+ mat3x4 n = matrixCompMult(m, m);
+ mat3x4 expected = mat3x4(0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat4 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ mat4 n = matrixCompMult(m, m);
+ mat4 expected = mat4(0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat4x2 m(0, 1, 2, 3, 4, 5, 6, 7);
+ mat4x2 n = matrixCompMult(m, m);
+ mat4x2 expected = mat4x2(0, 1, 4, 9, 16, 25, 36, 49);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat4x3 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+ mat4x3 n = matrixCompMult(m, m);
+ mat4x3 expected = mat4x3(0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121);
+ Error += all(equal(n, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_outerProduct()
+{
+ { glm::mat2 m = glm::outerProduct(glm::vec2(1.0f), glm::vec2(1.0f)); }
+ { glm::mat3 m = glm::outerProduct(glm::vec3(1.0f), glm::vec3(1.0f)); }
+ { glm::mat4 m = glm::outerProduct(glm::vec4(1.0f), glm::vec4(1.0f)); }
+
+ { glm::mat2x3 m = glm::outerProduct(glm::vec3(1.0f), glm::vec2(1.0f)); }
+ { glm::mat2x4 m = glm::outerProduct(glm::vec4(1.0f), glm::vec2(1.0f)); }
+
+ { glm::mat3x2 m = glm::outerProduct(glm::vec2(1.0f), glm::vec3(1.0f)); }
+ { glm::mat3x4 m = glm::outerProduct(glm::vec4(1.0f), glm::vec3(1.0f)); }
+
+ { glm::mat4x2 m = glm::outerProduct(glm::vec2(1.0f), glm::vec4(1.0f)); }
+ { glm::mat4x3 m = glm::outerProduct(glm::vec3(1.0f), glm::vec4(1.0f)); }
+
+ return 0;
+}
+
+int test_transpose()
+{
+ int Error(0);
+
+ {
+ mat2 const m(0, 1, 2, 3);
+ mat2 const t = transpose(m);
+ mat2 const expected = mat2(0, 2, 1, 3);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat2x3 m(0, 1, 2, 3, 4, 5);
+ mat3x2 t = transpose(m);
+ mat3x2 const expected = mat3x2(0, 3, 1, 4, 2, 5);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat2x4 m(0, 1, 2, 3, 4, 5, 6, 7);
+ mat4x2 t = transpose(m);
+ mat4x2 const expected = mat4x2(0, 4, 1, 5, 2, 6, 3, 7);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat3 m(0, 1, 2, 3, 4, 5, 6, 7, 8);
+ mat3 t = transpose(m);
+ mat3 const expected = mat3(0, 3, 6, 1, 4, 7, 2, 5, 8);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat3x2 m(0, 1, 2, 3, 4, 5);
+ mat2x3 t = transpose(m);
+ mat2x3 const expected = mat2x3(0, 2, 4, 1, 3, 5);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat3x4 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+ mat4x3 t = transpose(m);
+ mat4x3 const expected = mat4x3(0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat4 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ mat4 t = transpose(m);
+ mat4 const expected = mat4(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat4x2 m(0, 1, 2, 3, 4, 5, 6, 7);
+ mat2x4 t = transpose(m);
+ mat2x4 const expected = mat2x4(0, 2, 4, 6, 1, 3, 5, 7);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ mat4x3 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+ mat3x4 t = transpose(m);
+ mat3x4 const expected = mat3x4(0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11);
+ Error += all(equal(t, expected, epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_determinant()
+{
+
+
+ return 0;
+}
+
+int test_inverse()
+{
+ int Error = 0;
+
+ {
+ glm::mat4x4 A4x4(
+ glm::vec4(1, 0, 1, 0),
+ glm::vec4(0, 1, 0, 0),
+ glm::vec4(0, 0, 1, 0),
+ glm::vec4(0, 0, 0, 1));
+ glm::mat4x4 B4x4 = inverse(A4x4);
+ glm::mat4x4 I4x4 = A4x4 * B4x4;
+ glm::mat4x4 Identity(1);
+ Error += all(equal(I4x4, Identity, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat3x3 A3x3(
+ glm::vec3(1, 0, 1),
+ glm::vec3(0, 1, 0),
+ glm::vec3(0, 0, 1));
+ glm::mat3x3 B3x3 = glm::inverse(A3x3);
+ glm::mat3x3 I3x3 = A3x3 * B3x3;
+ glm::mat3x3 Identity(1);
+ Error += all(equal(I3x3, Identity, epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::mat2x2 A2x2(
+ glm::vec2(1, 1),
+ glm::vec2(0, 1));
+ glm::mat2x2 B2x2 = glm::inverse(A2x2);
+ glm::mat2x2 I2x2 = A2x2 * B2x2;
+ glm::mat2x2 Identity(1);
+ Error += all(equal(I2x2, Identity, epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_inverse_simd()
+{
+ int Error = 0;
+
+ glm::mat4x4 const Identity(1);
+
+ glm::mat4x4 const A4x4(
+ glm::vec4(1, 0, 1, 0),
+ glm::vec4(0, 1, 0, 0),
+ glm::vec4(0, 0, 1, 0),
+ glm::vec4(0, 0, 0, 1));
+ glm::mat4x4 const B4x4 = glm::inverse(A4x4);
+ glm::mat4x4 const I4x4 = A4x4 * B4x4;
+
+ Error += glm::all(glm::equal(I4x4, Identity, 0.001f)) ? 0 : 1;
+
+ return Error;
+}
+
+template<typename VEC3, typename MAT4>
+int test_inverse_perf(std::size_t Count, std::size_t Instance, char const * Message)
+{
+ std::vector<MAT4> TestInputs;
+ TestInputs.resize(Count);
+ std::vector<MAT4> TestOutputs;
+ TestOutputs.resize(TestInputs.size());
+
+ VEC3 Axis(glm::normalize(VEC3(1.0f, 2.0f, 3.0f)));
+
+ for(std::size_t i = 0; i < TestInputs.size(); ++i)
+ {
+ typename MAT4::value_type f = static_cast<typename MAT4::value_type>(i + Instance) * typename MAT4::value_type(0.1) + typename MAT4::value_type(0.1);
+ TestInputs[i] = glm::rotate(glm::translate(MAT4(1), Axis * f), f, Axis);
+ //TestInputs[i] = glm::translate(MAT4(1), Axis * f);
+ }
+
+ std::clock_t StartTime = std::clock();
+
+ for(std::size_t i = 0; i < TestInputs.size(); ++i)
+ TestOutputs[i] = glm::inverse(TestInputs[i]);
+
+ std::clock_t EndTime = std::clock();
+
+ for(std::size_t i = 0; i < TestInputs.size(); ++i)
+ TestOutputs[i] = TestOutputs[i] * TestInputs[i];
+
+ typename MAT4::value_type Diff(0);
+ for(std::size_t Entry = 0; Entry < TestOutputs.size(); ++Entry)
+ {
+ MAT4 i(1.0);
+ MAT4 m(TestOutputs[Entry]);
+ for(glm::length_t y = 0; y < m.length(); ++y)
+ for(glm::length_t x = 0; x < m[y].length(); ++x)
+ Diff = glm::max(m[y][x], i[y][x]);
+ }
+
+ //glm::uint Ulp = 0;
+ //Ulp = glm::max(glm::float_distance(*Dst, *Src), Ulp);
+
+ std::printf("inverse<%s>(%f): %lu\n", Message, static_cast<double>(Diff), EndTime - StartTime);
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+ Error += test_matrixCompMult();
+ Error += test_outerProduct();
+ Error += test_transpose();
+ Error += test_determinant();
+ Error += test_inverse();
+ Error += test_inverse_simd();
+
+# ifdef NDEBUG
+ std::size_t const Samples = 1000;
+# else
+ std::size_t const Samples = 1;
+# endif//NDEBUG
+
+ for(std::size_t i = 0; i < 1; ++i)
+ {
+ Error += test_inverse_perf<glm::vec3, glm::mat4>(Samples, i, "mat4");
+ Error += test_inverse_perf<glm::dvec3, glm::dmat4>(Samples, i, "dmat4");
+ }
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_func_noise.cpp b/3rdparty/glm/source/test/core/core_func_noise.cpp
new file mode 100644
index 0000000..4f0b430
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_noise.cpp
@@ -0,0 +1,7 @@
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_func_packing.cpp b/3rdparty/glm/source/test/core/core_func_packing.cpp
new file mode 100644
index 0000000..c3cd14a
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_packing.cpp
@@ -0,0 +1,156 @@
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/packing.hpp>
+#include <vector>
+
+int test_packUnorm2x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2(1.0f, 0.0f));
+ A.push_back(glm::vec2(0.5f, 0.7f));
+ A.push_back(glm::vec2(0.1f, 0.2f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::uint32 C = glm::packUnorm2x16(B);
+ glm::vec2 D = glm::unpackUnorm2x16(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 65535.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm2x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2( 1.0f, 0.0f));
+ A.push_back(glm::vec2(-0.5f,-0.7f));
+ A.push_back(glm::vec2(-0.1f, 0.1f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::uint32 C = glm::packSnorm2x16(B);
+ glm::vec2 D = glm::unpackSnorm2x16(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 32767.0f * 2.0f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm4x8()
+{
+ int Error = 0;
+
+ glm::uint32 Packed = glm::packUnorm4x8(glm::vec4(1.0f, 0.5f, 0.0f, 1.0f));
+ glm::u8vec4 Vec(255, 128, 0, 255);
+ glm::uint32 & Ref = *reinterpret_cast<glm::uint32*>(&Vec[0]);
+
+ Error += Packed == Ref ? 0 : 1;
+
+ std::vector<glm::vec4> A;
+ A.push_back(glm::vec4(1.0f, 0.7f, 0.3f, 0.0f));
+ A.push_back(glm::vec4(0.5f, 0.1f, 0.2f, 0.3f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec4 B(A[i]);
+ glm::uint32 C = glm::packUnorm4x8(B);
+ glm::vec4 D = glm::unpackUnorm4x8(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 255.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm4x8()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> A;
+ A.push_back(glm::vec4( 1.0f, 0.0f,-0.5f,-1.0f));
+ A.push_back(glm::vec4(-0.7f,-0.1f, 0.1f, 0.7f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec4 B(A[i]);
+ glm::uint32 C = glm::packSnorm4x8(B);
+ glm::vec4 D = glm::unpackSnorm4x8(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 127.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packHalf2x16()
+{
+ int Error = 0;
+/*
+ std::vector<glm::hvec2> A;
+ A.push_back(glm::hvec2(glm::half( 1.0f), glm::half( 2.0f)));
+ A.push_back(glm::hvec2(glm::half(-1.0f), glm::half(-2.0f)));
+ A.push_back(glm::hvec2(glm::half(-1.1f), glm::half( 1.1f)));
+*/
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2( 1.0f, 2.0f));
+ A.push_back(glm::vec2(-1.0f,-2.0f));
+ A.push_back(glm::vec2(-1.1f, 1.1f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::uint C = glm::packHalf2x16(B);
+ glm::vec2 D = glm::unpackHalf2x16(C);
+ //Error += B == D ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 127.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packDouble2x32()
+{
+ int Error = 0;
+
+ std::vector<glm::uvec2> A;
+ A.push_back(glm::uvec2( 1, 2));
+ A.push_back(glm::uvec2(-1,-2));
+ A.push_back(glm::uvec2(-1000, 1100));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::uvec2 B(A[i]);
+ double C = glm::packDouble2x32(B);
+ glm::uvec2 D = glm::unpackDouble2x32(C);
+ Error += B == D ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_packSnorm4x8();
+ Error += test_packUnorm4x8();
+ Error += test_packSnorm2x16();
+ Error += test_packUnorm2x16();
+ Error += test_packHalf2x16();
+ Error += test_packDouble2x32();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_func_swizzle.cpp b/3rdparty/glm/source/test/core/core_func_swizzle.cpp
new file mode 100644
index 0000000..9758533
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_swizzle.cpp
@@ -0,0 +1,164 @@
+#define GLM_FORCE_SWIZZLE
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/glm.hpp>
+
+static int test_ivec2_swizzle()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ {
+ glm::ivec2 A(1, 2);
+ glm::ivec2 B = A.yx();
+ glm::ivec2 C = B.yx();
+
+ Error += A != B ? 0 : 1;
+ Error += A == C ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::ivec2 A(1, 2);
+ glm::ivec2 B = A.yx;
+ glm::ivec2 C = A.yx;
+
+ Error += A != B ? 0 : 1;
+ Error += B == C ? 0 : 1;
+
+ B.xy = B.yx;
+ C.xy = C.yx;
+
+ Error += B == C ? 0 : 1;
+
+ glm::ivec2 D(0, 0);
+ D.yx = A.xy;
+ Error += A.yx() == D ? 0 : 1;
+
+ glm::ivec2 E = A.yx;
+ Error += E == D ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE
+
+ return Error;
+}
+
+int test_ivec3_swizzle()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ {
+ glm::ivec3 A(1, 2, 3);
+ glm::ivec3 B = A.zyx();
+ glm::ivec3 C = B.zyx();
+
+ Error += A != B ? 0 : 1;
+ Error += A == C ? 0 : 1;
+ }
+# endif
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::ivec3 const A(1, 2, 3);
+ glm::ivec2 B = A.yx;
+ glm::ivec2 C = A.yx;
+
+ Error += A.yx() == B ? 0 : 1;
+ Error += B == C ? 0 : 1;
+
+ B.xy = B.yx;
+ C.xy = C.yx;
+
+ Error += B == C ? 0 : 1;
+
+ glm::ivec2 D(0, 0);
+ D.yx = A.xy;
+
+ Error += A.yx() == D ? 0 : 1;
+
+ glm::ivec2 E(0, 0);
+ E.xy = A.xy();
+
+ Error += E == A.xy() ? 0 : 1;
+ Error += E.xy() == A.xy() ? 0 : 1;
+
+ glm::ivec3 const F = A.xxx + A.xxx;
+ Error += F == glm::ivec3(2) ? 0 : 1;
+
+ glm::ivec3 const G = A.xxx - A.xxx;
+ Error += G == glm::ivec3(0) ? 0 : 1;
+
+ glm::ivec3 const H = A.xxx * A.xxx;
+ Error += H == glm::ivec3(1) ? 0 : 1;
+
+ glm::ivec3 const I = A.xxx / A.xxx;
+ Error += I == glm::ivec3(1) ? 0 : 1;
+
+ glm::ivec3 J(1, 2, 3);
+ J.xyz += glm::ivec3(1);
+ Error += J == glm::ivec3(2, 3, 4) ? 0 : 1;
+
+ glm::ivec3 K(1, 2, 3);
+ K.xyz += A.xyz;
+ Error += K == glm::ivec3(2, 4, 6) ? 0 : 1;
+ }
+# endif
+
+ return Error;
+}
+
+int test_ivec4_swizzle()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ {
+ glm::ivec4 A(1, 2, 3, 4);
+ glm::ivec4 B = A.wzyx();
+ glm::ivec4 C = B.wzyx();
+
+ Error += A != B ? 0 : 1;
+ Error += A == C ? 0 : 1;
+ }
+# endif
+
+ return Error;
+}
+
+int test_vec4_swizzle()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ {
+ glm::vec4 A(1, 2, 3, 4);
+ glm::vec4 B = A.wzyx();
+ glm::vec4 C = B.wzyx();
+
+ Error += glm::any(glm::notEqual(A, B, 0.0001f)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+
+ float D = glm::dot(C.wzyx(), C.xyzw());
+ Error += glm::equal(D, 20.f, 0.001f) ? 0 : 1;
+ }
+# endif
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_ivec2_swizzle();
+ Error += test_ivec3_swizzle();
+ Error += test_ivec4_swizzle();
+ Error += test_vec4_swizzle();
+
+ return Error;
+}
+
+
+
diff --git a/3rdparty/glm/source/test/core/core_func_trigonometric.cpp b/3rdparty/glm/source/test/core/core_func_trigonometric.cpp
new file mode 100644
index 0000000..3172340
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_trigonometric.cpp
@@ -0,0 +1,10 @@
+#include <glm/trigonometric.hpp>
+
+int main()
+{
+ int Error = 0;
+
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_func_vector_relational.cpp b/3rdparty/glm/source/test/core/core_func_vector_relational.cpp
new file mode 100644
index 0000000..0a4e7e7
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_func_vector_relational.cpp
@@ -0,0 +1,180 @@
+#include <glm/vec2.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec4.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/gtc/vec1.hpp>
+
+static int test_not()
+{
+ int Error = 0;
+
+ {
+ glm::bvec1 v(false);
+ Error += glm::all(glm::not_(v)) ? 0 : 1;
+ }
+
+ {
+ glm::bvec2 v(false);
+ Error += glm::all(glm::not_(v)) ? 0 : 1;
+ }
+
+ {
+ glm::bvec3 v(false);
+ Error += glm::all(glm::not_(v)) ? 0 : 1;
+ }
+
+ {
+ glm::bvec4 v(false);
+ Error += glm::all(glm::not_(v)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_less()
+{
+ int Error = 0;
+
+ {
+ glm::vec2 const A(1, 2);
+ glm::vec2 const B(2, 3);
+ Error += glm::all(glm::lessThan(A, B)) ? 0: 1;
+ Error += glm::all(glm::lessThanEqual(A, B)) ? 0: 1;
+ }
+
+ {
+ glm::vec3 const A(1, 2, 3);
+ glm::vec3 const B(2, 3, 4);
+ Error += glm::all(glm::lessThan(A, B)) ? 0: 1;
+ Error += glm::all(glm::lessThanEqual(A, B)) ? 0: 1;
+ }
+
+ {
+ glm::vec4 const A(1, 2, 3, 4);
+ glm::vec4 const B(2, 3, 4, 5);
+ Error += glm::all(glm::lessThan(A, B)) ? 0: 1;
+ Error += glm::all(glm::lessThanEqual(A, B)) ? 0: 1;
+ }
+
+ {
+ glm::ivec2 const A(1, 2);
+ glm::ivec2 const B(2, 3);
+ Error += glm::all(glm::lessThan(A, B)) ? 0: 1;
+
+ glm::ivec2 const C(1, 3);
+ Error += glm::all(glm::lessThanEqual(A, C)) ? 0: 1;
+ }
+
+ {
+ glm::ivec3 const A(1, 2, 3);
+ glm::ivec3 const B(2, 3, 4);
+ Error += glm::all(glm::lessThan(A, B)) ? 0: 1;
+
+ glm::ivec3 const C(1, 3, 4);
+ Error += glm::all(glm::lessThanEqual(A, C)) ? 0: 1;
+ }
+
+ {
+ glm::ivec4 const A(1, 2, 3, 4);
+ glm::ivec4 const B(2, 3, 4, 5);
+ Error += glm::all(glm::lessThan(A, B)) ? 0: 1;
+
+ glm::ivec4 const C(1, 3, 4, 5);
+ Error += glm::all(glm::lessThanEqual(A, C)) ? 0: 1;
+ }
+
+ return Error;
+}
+
+static int test_greater()
+{
+ int Error = 0;
+
+ {
+ glm::vec2 const A(1, 2);
+ glm::vec2 const B(2, 3);
+ Error += glm::all(glm::greaterThan(B, A)) ? 0: 1;
+ Error += glm::all(glm::greaterThanEqual(B, A)) ? 0: 1;
+ }
+
+ {
+ glm::vec3 const A(1, 2, 3);
+ glm::vec3 const B(2, 3, 4);
+ Error += glm::all(glm::greaterThan(B, A)) ? 0: 1;
+ Error += glm::all(glm::greaterThanEqual(B, A)) ? 0: 1;
+ }
+
+ {
+ glm::vec4 const A(1, 2, 3, 4);
+ glm::vec4 const B(2, 3, 4, 5);
+ Error += glm::all(glm::greaterThan(B, A)) ? 0: 1;
+ Error += glm::all(glm::greaterThanEqual(B, A)) ? 0: 1;
+ }
+
+ {
+ glm::ivec2 const A(1, 2);
+ glm::ivec2 const B(2, 3);
+ Error += glm::all(glm::greaterThan(B, A)) ? 0: 1;
+
+ glm::ivec2 const C(1, 3);
+ Error += glm::all(glm::greaterThanEqual(C, A)) ? 0: 1;
+ }
+
+ {
+ glm::ivec3 const A(1, 2, 3);
+ glm::ivec3 const B(2, 3, 4);
+ Error += glm::all(glm::greaterThan(B, A)) ? 0: 1;
+
+ glm::ivec3 const C(1, 3, 4);
+ Error += glm::all(glm::greaterThanEqual(C, A)) ? 0: 1;
+ }
+
+ {
+ glm::ivec4 const A(1, 2, 3, 4);
+ glm::ivec4 const B(2, 3, 4, 5);
+ Error += glm::all(glm::greaterThan(B, A)) ? 0: 1;
+
+ glm::ivec4 const C(1, 3, 4, 5);
+ Error += glm::all(glm::greaterThanEqual(C, A)) ? 0: 1;
+ }
+
+ return Error;
+}
+
+static int test_equal()
+{
+ int Error = 0;
+
+ {
+ glm::ivec2 const A(1, 2);
+ glm::ivec2 const B(1, 2);
+ Error += glm::all(glm::equal(B, A)) ? 0: 1;
+ }
+
+ {
+ glm::ivec3 const A(1, 2, 3);
+ glm::ivec3 const B(1, 2, 3);
+ Error += glm::all(glm::equal(B, A)) ? 0: 1;
+ }
+
+ {
+ glm::ivec4 const A(1, 2, 3, 4);
+ glm::ivec4 const B(1, 2, 3, 4);
+ Error += glm::all(glm::equal(B, A)) ? 0: 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_not();
+ Error += test_less();
+ Error += test_greater();
+ Error += test_equal();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_setup_force_cxx98.cpp b/3rdparty/glm/source/test/core/core_setup_force_cxx98.cpp
new file mode 100644
index 0000000..32bb63c
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_setup_force_cxx98.cpp
@@ -0,0 +1,12 @@
+#ifndef GLM_FORCE_CXX98
+# define GLM_FORCE_CXX98
+#endif
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_setup_force_size_t_length.cpp b/3rdparty/glm/source/test/core/core_setup_force_size_t_length.cpp
new file mode 100644
index 0000000..36010e3
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_setup_force_size_t_length.cpp
@@ -0,0 +1,22 @@
+#define GLM_FORCE_SIZE_T_LENGTH
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+template <typename genType>
+genType add(genType const& a, genType const& b)
+{
+ genType result(0);
+ for(glm::length_t i = 0; i < a.length(); ++i)
+ result[i] = a[i] + b[i];
+ return result;
+}
+
+int main()
+{
+ int Error = 0;
+
+ glm::ivec4 v(1);
+ Error += add(v, v) == glm::ivec4(2) ? 0 : 1;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_setup_message.cpp b/3rdparty/glm/source/test/core/core_setup_message.cpp
new file mode 100644
index 0000000..7594743
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_setup_message.cpp
@@ -0,0 +1,230 @@
+#define GLM_FORCE_MESSAGES
+#include <glm/vec3.hpp>
+#include <cstdio>
+
+int test_compiler()
+{
+ int Error(0);
+
+ if(GLM_COMPILER & GLM_COMPILER_VC)
+ {
+ switch(GLM_COMPILER)
+ {
+ case GLM_COMPILER_VC12:
+ std::printf("Visual C++ 12 - 2013\n");
+ break;
+ case GLM_COMPILER_VC14:
+ std::printf("Visual C++ 14 - 2015\n");
+ break;
+ case GLM_COMPILER_VC15:
+ std::printf("Visual C++ 15 - 2017\n");
+ break;
+ case GLM_COMPILER_VC15_3:
+ std::printf("Visual C++ 15.3 - 2017\n");
+ break;
+ case GLM_COMPILER_VC15_5:
+ std::printf("Visual C++ 15.5 - 2017\n");
+ break;
+ case GLM_COMPILER_VC15_6:
+ std::printf("Visual C++ 15.6 - 2017\n");
+ break;
+ case GLM_COMPILER_VC15_7:
+ std::printf("Visual C++ 15.7 - 2017\n");
+ break;
+ case GLM_COMPILER_VC15_8:
+ std::printf("Visual C++ 15.8 - 2017\n");
+ break;
+ case GLM_COMPILER_VC15_9:
+ std::printf("Visual C++ 15.9 - 2017\n");
+ break;
+ case GLM_COMPILER_VC16:
+ std::printf("Visual C++ 16 - 2019\n");
+ break;
+ default:
+ std::printf("Visual C++ version not detected\n");
+ Error += 1;
+ break;
+ }
+ }
+ else if(GLM_COMPILER & GLM_COMPILER_GCC)
+ {
+ switch(GLM_COMPILER)
+ {
+ case GLM_COMPILER_GCC46:
+ std::printf("GCC 4.6\n");
+ break;
+ case GLM_COMPILER_GCC47:
+ std::printf("GCC 4.7\n");
+ break;
+ case GLM_COMPILER_GCC48:
+ std::printf("GCC 4.8\n");
+ break;
+ case GLM_COMPILER_GCC49:
+ std::printf("GCC 4.9\n");
+ break;
+ case GLM_COMPILER_GCC5:
+ std::printf("GCC 5\n");
+ break;
+ case GLM_COMPILER_GCC6:
+ std::printf("GCC 6\n");
+ break;
+ case GLM_COMPILER_GCC7:
+ std::printf("GCC 7\n");
+ break;
+ case GLM_COMPILER_GCC8:
+ std::printf("GCC 8\n");
+ break;
+ default:
+ std::printf("GCC version not detected\n");
+ Error += 1;
+ break;
+ }
+ }
+ else if(GLM_COMPILER & GLM_COMPILER_CUDA)
+ {
+ std::printf("CUDA\n");
+ }
+ else if(GLM_COMPILER & GLM_COMPILER_CLANG)
+ {
+ switch(GLM_COMPILER)
+ {
+ case GLM_COMPILER_CLANG34:
+ std::printf("Clang 3.4\n");
+ break;
+ case GLM_COMPILER_CLANG35:
+ std::printf("Clang 3.5\n");
+ break;
+ case GLM_COMPILER_CLANG36:
+ std::printf("Clang 3.6\n");
+ break;
+ case GLM_COMPILER_CLANG37:
+ std::printf("Clang 3.7\n");
+ break;
+ case GLM_COMPILER_CLANG38:
+ std::printf("Clang 3.8\n");
+ break;
+ case GLM_COMPILER_CLANG39:
+ std::printf("Clang 3.9\n");
+ break;
+ case GLM_COMPILER_CLANG40:
+ std::printf("Clang 4.0\n");
+ break;
+ case GLM_COMPILER_CLANG41:
+ std::printf("Clang 4.1\n");
+ break;
+ case GLM_COMPILER_CLANG42:
+ std::printf("Clang 4.2\n");
+ break;
+ default:
+ std::printf("LLVM version not detected\n");
+ break;
+ }
+ }
+ else if(GLM_COMPILER & GLM_COMPILER_INTEL)
+ {
+ switch(GLM_COMPILER)
+ {
+ case GLM_COMPILER_INTEL14:
+ std::printf("ICC 14 - 2013 SP1\n");
+ break;
+ case GLM_COMPILER_INTEL15:
+ std::printf("ICC 15 - 2015\n");
+ break;
+ case GLM_COMPILER_INTEL16:
+ std::printf("ICC 16 - 2017\n");
+ break;
+ case GLM_COMPILER_INTEL17:
+ std::printf("ICC 17 - 20XX\n");
+ break;
+ default:
+ std::printf("Intel compiler version not detected\n");
+ Error += 1;
+ break;
+ }
+ }
+ else
+ {
+ std::printf("Undetected compiler\n");
+ Error += 1;
+ }
+
+ return Error;
+}
+
+int test_model()
+{
+ int Error = 0;
+
+ Error += ((sizeof(void*) == 4) && (GLM_MODEL == GLM_MODEL_32)) || ((sizeof(void*) == 8) && (GLM_MODEL == GLM_MODEL_64)) ? 0 : 1;
+
+ if(GLM_MODEL == GLM_MODEL_32)
+ std::printf("GLM_MODEL_32\n");
+ else if(GLM_MODEL == GLM_MODEL_64)
+ std::printf("GLM_MODEL_64\n");
+
+ return Error;
+}
+
+int test_instruction_set()
+{
+ int Error = 0;
+
+ std::printf("GLM_ARCH: ");
+
+ if(GLM_ARCH & GLM_ARCH_ARM_BIT)
+ std::printf("ARM ");
+ if(GLM_ARCH & GLM_ARCH_NEON_BIT)
+ std::printf("NEON ");
+ if(GLM_ARCH & GLM_ARCH_AVX2_BIT)
+ std::printf("AVX2 ");
+ if(GLM_ARCH & GLM_ARCH_AVX_BIT)
+ std::printf("AVX ");
+ if(GLM_ARCH & GLM_ARCH_SSE42_BIT)
+ std::printf("SSE4.2 ");
+ if(GLM_ARCH & GLM_ARCH_SSE41_BIT)
+ std::printf("SSE4.1 ");
+ if(GLM_ARCH & GLM_ARCH_SSSE3_BIT)
+ std::printf("SSSE3 ");
+ if(GLM_ARCH & GLM_ARCH_SSE3_BIT)
+ std::printf("SSE3 ");
+ if(GLM_ARCH & GLM_ARCH_SSE2_BIT)
+ std::printf("SSE2 ");
+
+ std::printf("\n");
+
+ return Error;
+}
+
+int test_cpp_version()
+{
+ std::printf("__cplusplus: %d\n", static_cast<int>(__cplusplus));
+
+ return 0;
+}
+
+int test_operators()
+{
+ glm::ivec3 A(1);
+ glm::ivec3 B(1);
+ bool R = A != B;
+ bool S = A == B;
+
+ return (S && !R) ? 0 : 1;
+}
+
+int main()
+{
+ int Error = 0;
+
+# if !defined(GLM_FORCE_PLATFORM_UNKNOWN) && !defined(GLM_FORCE_COMPILER_UNKNOWN) && !defined(GLM_FORCE_ARCH_UNKNOWN) && !defined(GLM_FORCE_CXX_UNKNOWN)
+
+ Error += test_cpp_version();
+ Error += test_compiler();
+ Error += test_model();
+ Error += test_instruction_set();
+ Error += test_operators();
+
+# endif
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_setup_platform_unknown.cpp b/3rdparty/glm/source/test/core/core_setup_platform_unknown.cpp
new file mode 100644
index 0000000..9feaee3
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_setup_platform_unknown.cpp
@@ -0,0 +1,21 @@
+#ifndef GLM_FORCE_PLATFORM_UNKNOWN
+# define GLM_FORCE_PLATFORM_UNKNOWN
+#endif
+#ifndef GLM_FORCE_COMPILER_UNKNOWN
+# define GLM_FORCE_COMPILER_UNKNOWN
+#endif
+#ifndef GLM_FORCE_ARCH_UNKNOWN
+# define GLM_FORCE_ARCH_UNKNOWN
+#endif
+#ifndef GLM_FORCE_CXX_UNKNOWN
+# define GLM_FORCE_CXX_UNKNOWN
+#endif
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_setup_precision.cpp b/3rdparty/glm/source/test/core/core_setup_precision.cpp
new file mode 100644
index 0000000..b44bc50
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_setup_precision.cpp
@@ -0,0 +1,58 @@
+#define GLM_FORCE_INLINE
+#define GLM_PRECISION_HIGHP_FLOAT
+#include <glm/glm.hpp>
+#include <glm/ext.hpp>
+
+static int test_mat()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::mat2) == sizeof(glm::highp_mat2) ? 0 : 1;
+ Error += sizeof(glm::mat3) == sizeof(glm::highp_mat3) ? 0 : 1;
+ Error += sizeof(glm::mat4) == sizeof(glm::highp_mat4) ? 0 : 1;
+
+ Error += sizeof(glm::mat2x2) == sizeof(glm::highp_mat2x2) ? 0 : 1;
+ Error += sizeof(glm::mat2x3) == sizeof(glm::highp_mat2x3) ? 0 : 1;
+ Error += sizeof(glm::mat2x4) == sizeof(glm::highp_mat2x4) ? 0 : 1;
+ Error += sizeof(glm::mat3x2) == sizeof(glm::highp_mat3x2) ? 0 : 1;
+ Error += sizeof(glm::mat3x3) == sizeof(glm::highp_mat3x3) ? 0 : 1;
+ Error += sizeof(glm::mat3x4) == sizeof(glm::highp_mat3x4) ? 0 : 1;
+ Error += sizeof(glm::mat4x2) == sizeof(glm::highp_mat4x2) ? 0 : 1;
+ Error += sizeof(glm::mat4x3) == sizeof(glm::highp_mat4x3) ? 0 : 1;
+ Error += sizeof(glm::mat4x4) == sizeof(glm::highp_mat4x4) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_vec()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::vec2) == sizeof(glm::highp_vec2) ? 0 : 1;
+ Error += sizeof(glm::vec3) == sizeof(glm::highp_vec3) ? 0 : 1;
+ Error += sizeof(glm::vec4) == sizeof(glm::highp_vec4) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_dvec()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::dvec2) == sizeof(glm::highp_dvec2) ? 0 : 1;
+ Error += sizeof(glm::dvec3) == sizeof(glm::highp_dvec3) ? 0 : 1;
+ Error += sizeof(glm::dvec4) == sizeof(glm::highp_dvec4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_mat();
+ Error += test_vec();
+ Error += test_dvec();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_aligned.cpp b/3rdparty/glm/source/test/core/core_type_aligned.cpp
new file mode 100644
index 0000000..dff0939
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_aligned.cpp
@@ -0,0 +1,92 @@
+#define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES
+#include <glm/glm.hpp>
+
+#if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+#include <type_traits>
+
+static_assert(sizeof(glm::bvec4) > sizeof(glm::bvec2), "Invalid sizeof");
+static_assert(sizeof(glm::ivec4) > sizeof(glm::uvec2), "Invalid sizeof");
+static_assert(sizeof(glm::dvec4) > sizeof(glm::dvec2), "Invalid sizeof");
+
+static_assert(sizeof(glm::bvec4) == sizeof(glm::bvec3), "Invalid sizeof");
+static_assert(sizeof(glm::uvec4) == sizeof(glm::uvec3), "Invalid sizeof");
+static_assert(sizeof(glm::dvec4) == sizeof(glm::dvec3), "Invalid sizeof");
+
+static int test_storage_aligned()
+{
+ int Error = 0;
+
+ size_t size1_aligned = sizeof(glm::detail::storage<1, int, true>::type);
+ Error += size1_aligned == sizeof(int) * 1 ? 0 : 1;
+ size_t size2_aligned = sizeof(glm::detail::storage<2, int, true>::type);
+ Error += size2_aligned == sizeof(int) * 2 ? 0 : 1;
+ size_t size4_aligned = sizeof(glm::detail::storage<4, int, true>::type);
+ Error += size4_aligned == sizeof(int) * 4 ? 0 : 1;
+
+ size_t align1_aligned = alignof(glm::detail::storage<1, int, true>::type);
+ Error += align1_aligned == 4 ? 0 : 1;
+ size_t align2_aligned = alignof(glm::detail::storage<2, int, true>::type);
+ Error += align2_aligned == 8 ? 0 : 1;
+ size_t align4_aligned = alignof(glm::detail::storage<4, int, true>::type);
+ Error += align4_aligned == 16 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_storage_unaligned()
+{
+ int Error = 0;
+
+ size_t align1_unaligned = alignof(glm::detail::storage<1, int, false>::type);
+ Error += align1_unaligned == sizeof(int) ? 0 : 1;
+ size_t align2_unaligned = alignof(glm::detail::storage<2, int, false>::type);
+ Error += align2_unaligned == sizeof(int) ? 0 : 1;
+ size_t align3_unaligned = alignof(glm::detail::storage<3, int, false>::type);
+ Error += align3_unaligned == sizeof(int) ? 0 : 1;
+ size_t align4_unaligned = alignof(glm::detail::storage<4, int, false>::type);
+ Error += align4_unaligned == sizeof(int) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_vec3_aligned()
+{
+ int Error = 0;
+
+ struct Struct1
+ {
+ glm::vec4 A;
+ float B;
+ glm::vec3 C;
+ };
+
+ std::size_t const Size1 = sizeof(Struct1);
+ Error += Size1 == 48 ? 0 : 1;
+
+ struct Struct2
+ {
+ glm::vec4 A;
+ glm::vec3 B;
+ float C;
+ };
+
+ std::size_t const Size2 = sizeof(Struct2);
+ Error += Size2 == 48 ? 0 : 1;
+
+ return Error;
+}
+
+#endif
+
+int main()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+ Error += test_storage_aligned();
+ Error += test_storage_unaligned();
+ Error += test_vec3_aligned();
+# endif
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_cast.cpp b/3rdparty/glm/source/test/core/core_type_cast.cpp
new file mode 100644
index 0000000..7ff1901
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_cast.cpp
@@ -0,0 +1,146 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/glm.hpp>
+#include <algorithm>
+#include <vector>
+#include <iterator>
+
+struct my_vec2
+{
+ operator glm::vec2() { return glm::vec2(x, y); }
+ float x, y;
+};
+
+int test_vec2_cast()
+{
+ glm::vec2 A(1.0f, 2.0f);
+ glm::lowp_vec2 B(A);
+ glm::mediump_vec2 C(A);
+ glm::highp_vec2 D(A);
+
+ glm::vec2 E = static_cast<glm::vec2>(A);
+ glm::lowp_vec2 F = static_cast<glm::lowp_vec2>(A);
+ glm::mediump_vec2 G = static_cast<glm::mediump_vec2>(A);
+ glm::highp_vec2 H = static_cast<glm::highp_vec2>(A);
+
+ my_vec2 I;
+ glm::vec2 J = static_cast<glm::vec2>(I);
+ glm::vec2 K(7.8f);
+
+ int Error(0);
+
+ Error += glm::all(glm::equal(A, E, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(B, F, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(C, G, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(D, H, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_vec3_cast()
+{
+ glm::vec3 A(1.0f, 2.0f, 3.0f);
+ glm::lowp_vec3 B(A);
+ glm::mediump_vec3 C(A);
+ glm::highp_vec3 D(A);
+
+ glm::vec3 E = static_cast<glm::vec3>(A);
+ glm::lowp_vec3 F = static_cast<glm::lowp_vec3>(A);
+ glm::mediump_vec3 G = static_cast<glm::mediump_vec3>(A);
+ glm::highp_vec3 H = static_cast<glm::highp_vec3>(A);
+
+ int Error(0);
+
+ Error += glm::all(glm::equal(A, E, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(B, F, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(C, G, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(D, H, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_vec4_cast()
+{
+ glm::vec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::lowp_vec4 B(A);
+ glm::mediump_vec4 C(A);
+ glm::highp_vec4 D(A);
+
+ glm::vec4 E = static_cast<glm::vec4>(A);
+ glm::lowp_vec4 F = static_cast<glm::lowp_vec4>(A);
+ glm::mediump_vec4 G = static_cast<glm::mediump_vec4>(A);
+ glm::highp_vec4 H = static_cast<glm::highp_vec4>(A);
+
+ int Error(0);
+
+ Error += glm::all(glm::equal(A, E, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(B, F, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(C, G, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(D, H, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_std_copy()
+{
+ int Error = 0;
+
+ {
+ std::vector<int> High;
+ High.resize(64);
+ std::vector<int> Medium(High.size());
+
+ std::copy(High.begin(), High.end(), Medium.begin());
+
+ *Medium.begin() = *High.begin();
+ }
+
+ {
+ std::vector<glm::dvec4> High4;
+ High4.resize(64);
+ std::vector<glm::vec4> Medium4(High4.size());
+
+ std::copy(High4.begin(), High4.end(), Medium4.begin());
+
+ *Medium4.begin() = *High4.begin();
+ }
+
+ {
+ std::vector<glm::dvec3> High3;
+ High3.resize(64);
+ std::vector<glm::vec3> Medium3(High3.size());
+
+ std::copy(High3.begin(), High3.end(), Medium3.begin());
+
+ *Medium3.begin() = *High3.begin();
+ }
+
+ {
+ std::vector<glm::dvec2> High2;
+ High2.resize(64);
+ std::vector<glm::vec2> Medium2(High2.size());
+
+ std::copy(High2.begin(), High2.end(), Medium2.begin());
+
+ *Medium2.begin() = *High2.begin();
+ }
+
+ glm::dvec4 v1;
+ glm::vec4 v2;
+
+ v2 = v1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_std_copy();
+ Error += test_vec2_cast();
+ Error += test_vec3_cast();
+ Error += test_vec4_cast();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_ctor.cpp b/3rdparty/glm/source/test/core/core_type_ctor.cpp
new file mode 100644
index 0000000..078fcdf
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_ctor.cpp
@@ -0,0 +1,351 @@
+#include <glm/gtc/vec1.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/glm.hpp>
+
+static int test_vec1_ctor()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+ {
+ union pack
+ {
+ glm::vec1 f;
+ glm::ivec1 i;
+ } A, B;
+
+ A.f = glm::vec1(0);
+ Error += glm::all(glm::equal(A.i, glm::ivec1(0))) ? 0 : 1;
+
+ B.f = glm::vec1(1);
+ Error += glm::all(glm::equal(B.i, glm::ivec1(1065353216))) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+
+ return Error;
+}
+
+static int test_vec2_ctor()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+ {
+ union pack
+ {
+ glm::vec2 f;
+ glm::ivec2 i;
+ } A, B;
+
+ A.f = glm::vec2(0);
+ Error += glm::all(glm::equal(A.i, glm::ivec2(0))) ? 0 : 1;
+
+ B.f = glm::vec2(1);
+ Error += glm::all(glm::equal(B.i, glm::ivec2(1065353216))) ? 0 : 1;
+ }
+# endif
+
+ return Error;
+}
+
+static int test_vec3_ctor()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+ {
+ union pack
+ {
+ glm::vec3 f;
+ glm::ivec3 i;
+ } A, B;
+
+ A.f = glm::vec3(0);
+ Error += glm::all(glm::equal(A.i, glm::ivec3(0))) ? 0 : 1;
+
+ B.f = glm::vec3(1);
+ Error += glm::all(glm::equal(B.i, glm::ivec3(1065353216))) ? 0 : 1;
+ }
+# endif
+
+ return Error;
+}
+
+static int test_vec4_ctor()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE
+ {
+ union pack
+ {
+ glm::vec4 f;
+ glm::ivec4 i;
+ } A, B;
+
+ A.f = glm::vec4(0);
+ Error += glm::all(glm::equal(A.i, glm::ivec4(0))) ? 0 : 1;
+
+ B.f = glm::vec4(1);
+ Error += glm::all(glm::equal(B.i, glm::ivec4(1065353216))) ? 0 : 1;
+ }
+# endif
+
+ return Error;
+}
+
+static int test_mat2x2_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat2x2 f;
+ glm::mat2x2 i;
+ } A, B;
+
+ A.f = glm::mat2x2(0);
+ Error += glm::all(glm::equal(A.i[0], glm::vec2(0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat2x2(1);
+ Error += glm::all(glm::equal(B.i[0], glm::vec2(1, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_mat2x3_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat2x3 f;
+ glm::mat2x3 i;
+ } A, B;
+
+ A.f = glm::mat2x3(0);
+ Error += glm::all(glm::equal(A.i[0], glm::vec3(0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat2x3(1);
+ Error += glm::all(glm::equal(B.i[0], glm::vec3(1, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_mat2x4_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat2x4 f;
+ glm::mat2x4 i;
+ } A, B;
+
+ A.f = glm::mat2x4(0);
+ glm::vec4 const C(0, 0, 0, 0);
+ Error += glm::all(glm::equal(A.i[0], C, glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat2x4(1);
+ glm::vec4 const D(1, 0, 0, 0);
+ Error += glm::all(glm::equal(B.i[0], D, glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_mat3x2_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat3x2 f;
+ glm::mat3x2 i;
+ } A, B;
+
+ A.f = glm::mat3x2(0);
+ Error += glm::all(glm::equal(A.i[0], glm::vec2(0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat3x2(1);
+ Error += glm::all(glm::equal(B.i[0], glm::vec2(1, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_mat3x3_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat3x3 f;
+ glm::mat3x3 i;
+ } A, B;
+
+ A.f = glm::mat3x3(0);
+ Error += glm::all(glm::equal(A.i[0], glm::vec3(0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat3x3(1);
+ Error += glm::all(glm::equal(B.i[0], glm::vec3(1, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_mat3x4_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat3x4 f;
+ glm::mat3x4 i;
+ } A, B;
+
+ A.f = glm::mat3x4(0);
+ Error += glm::all(glm::equal(A.i[0], glm::vec4(0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat3x4(1);
+ Error += glm::all(glm::equal(B.i[0], glm::vec4(1, 0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_mat4x2_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat4x2 f;
+ glm::mat4x2 i;
+ } A, B;
+
+ A.f = glm::mat4x2(0);
+ Error += glm::all(glm::equal(A.i[0], glm::vec2(0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat4x2(1);
+ Error += glm::all(glm::equal(B.i[0], glm::vec2(1, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_mat4x3_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat4x3 f;
+ glm::mat4x3 i;
+ } A, B;
+
+ A.f = glm::mat4x3(0);
+ Error += glm::all(glm::equal(A.i[0], glm::vec3(0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat4x3(1);
+ Error += glm::all(glm::equal(B.i[0], glm::vec3(1, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_mat4x4_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::mat4 f;
+ glm::mat4 i;
+ } A, B;
+
+ A.f = glm::mat4(0);
+ Error += glm::all(glm::equal(A.i[0], glm::vec4(0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::mat4(1);
+ Error += glm::all(glm::equal(B.i[0], glm::vec4(1, 0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+static int test_quat_ctor()
+{
+ int Error = 0;
+
+# if GLM_LANG & GLM_LANG_CXX11_FLAG
+ {
+ union pack
+ {
+ glm::quat f;
+ glm::quat i;
+ } A, B;
+
+ A.f = glm::quat(0, 0, 0, 0);
+ Error += glm::all(glm::equal(A.i, glm::quat(0, 0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+
+ B.f = glm::quat(1, 1, 1, 1);
+ Error += glm::all(glm::equal(B.i, glm::quat(1, 1, 1, 1), glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_LANG & GLM_LANG_CXX11_FLAG
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_vec1_ctor();
+ Error += test_vec2_ctor();
+ Error += test_vec3_ctor();
+ Error += test_vec4_ctor();
+ Error += test_mat2x2_ctor();
+ Error += test_mat2x3_ctor();
+ Error += test_mat2x4_ctor();
+ Error += test_mat3x2_ctor();
+ Error += test_mat3x3_ctor();
+ Error += test_mat3x4_ctor();
+ Error += test_mat4x2_ctor();
+ Error += test_mat4x3_ctor();
+ Error += test_mat4x4_ctor();
+ Error += test_quat_ctor();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_int.cpp b/3rdparty/glm/source/test/core/core_type_int.cpp
new file mode 100644
index 0000000..2631509
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_int.cpp
@@ -0,0 +1,26 @@
+#include <glm/glm.hpp>
+#include <glm/ext/scalar_int_sized.hpp>
+
+static int test_bit_operator()
+{
+ int Error = 0;
+
+ glm::ivec4 const a(1);
+ glm::ivec4 const b = ~a;
+ Error += glm::all(glm::equal(b, glm::ivec4(-2))) ? 0 : 1;
+
+ glm::int32 const c(1);
+ glm::int32 const d = ~c;
+ Error += d == -2 ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_bit_operator();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_length.cpp b/3rdparty/glm/source/test/core/core_type_length.cpp
new file mode 100644
index 0000000..f088cb3
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_length.cpp
@@ -0,0 +1,78 @@
+#include <glm/glm.hpp>
+
+static int test_length_mat_non_squared()
+{
+ int Error = 0;
+
+ Error += glm::mat2x3().length() == 2 ? 0 : 1;
+ Error += glm::mat2x4().length() == 2 ? 0 : 1;
+ Error += glm::mat3x2().length() == 3 ? 0 : 1;
+ Error += glm::mat3x4().length() == 3 ? 0 : 1;
+ Error += glm::mat4x2().length() == 4 ? 0 : 1;
+ Error += glm::mat4x3().length() == 4 ? 0 : 1;
+
+ Error += glm::dmat2x3().length() == 2 ? 0 : 1;
+ Error += glm::dmat2x4().length() == 2 ? 0 : 1;
+ Error += glm::dmat3x2().length() == 3 ? 0 : 1;
+ Error += glm::dmat3x4().length() == 3 ? 0 : 1;
+ Error += glm::dmat4x2().length() == 4 ? 0 : 1;
+ Error += glm::dmat4x3().length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_length_mat()
+{
+ int Error = 0;
+
+ Error += glm::mat2().length() == 2 ? 0 : 1;
+ Error += glm::mat3().length() == 3 ? 0 : 1;
+ Error += glm::mat4().length() == 4 ? 0 : 1;
+ Error += glm::mat2x2().length() == 2 ? 0 : 1;
+ Error += glm::mat3x3().length() == 3 ? 0 : 1;
+ Error += glm::mat4x4().length() == 4 ? 0 : 1;
+
+ Error += glm::dmat2().length() == 2 ? 0 : 1;
+ Error += glm::dmat3().length() == 3 ? 0 : 1;
+ Error += glm::dmat4().length() == 4 ? 0 : 1;
+ Error += glm::dmat2x2().length() == 2 ? 0 : 1;
+ Error += glm::dmat3x3().length() == 3 ? 0 : 1;
+ Error += glm::dmat4x4().length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_length_vec()
+{
+ int Error = 0;
+
+ Error += glm::vec2().length() == 2 ? 0 : 1;
+ Error += glm::vec3().length() == 3 ? 0 : 1;
+ Error += glm::vec4().length() == 4 ? 0 : 1;
+
+ Error += glm::ivec2().length() == 2 ? 0 : 1;
+ Error += glm::ivec3().length() == 3 ? 0 : 1;
+ Error += glm::ivec4().length() == 4 ? 0 : 1;
+
+ Error += glm::uvec2().length() == 2 ? 0 : 1;
+ Error += glm::uvec3().length() == 3 ? 0 : 1;
+ Error += glm::uvec4().length() == 4 ? 0 : 1;
+
+ Error += glm::dvec2().length() == 2 ? 0 : 1;
+ Error += glm::dvec3().length() == 3 ? 0 : 1;
+ Error += glm::dvec4().length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_length_vec();
+ Error += test_length_mat();
+ Error += test_length_mat_non_squared();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_type_mat2x2.cpp b/3rdparty/glm/source/test/core/core_type_mat2x2.cpp
new file mode 100644
index 0000000..2f8b018
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat2x2.cpp
@@ -0,0 +1,177 @@
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/matrix.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+
+int test_operators()
+{
+ glm::mat2x2 l(1.0f);
+ glm::mat2x2 m(1.0f);
+ glm::vec2 u(1.0f);
+ glm::vec2 v(1.0f);
+ float x = 1.0f;
+ glm::vec2 a = m * u;
+ glm::vec2 b = v * m;
+ glm::mat2x2 n = x / m;
+ glm::mat2x2 o = m / x;
+ glm::mat2x2 p = x * m;
+ glm::mat2x2 q = m * x;
+ bool R = glm::any(glm::notEqual(m, q, glm::epsilon<float>()));
+ bool S = glm::all(glm::equal(m, l, glm::epsilon<float>()));
+
+ return (S && !R) ? 0 : 1;
+}
+
+int test_inverse()
+{
+ int Error(0);
+
+ {
+ glm::mat2 const Matrix(1, 2, 3, 4);
+ glm::mat2 const Inverse = glm::inverse(Matrix);
+ glm::mat2 const Identity = Matrix * Inverse;
+
+ Error += glm::all(glm::equal(Identity[0], glm::vec2(1.0f, 0.0f), glm::vec2(0.01f))) ? 0 : 1;
+ Error += glm::all(glm::equal(Identity[1], glm::vec2(0.0f, 1.0f), glm::vec2(0.01f))) ? 0 : 1;
+ }
+
+ {
+ glm::mat2 const Matrix(1, 2, 3, 4);
+ glm::mat2 const Identity = Matrix / Matrix;
+
+ Error += glm::all(glm::equal(Identity[0], glm::vec2(1.0f, 0.0f), glm::vec2(0.01f))) ? 0 : 1;
+ Error += glm::all(glm::equal(Identity[1], glm::vec2(0.0f, 1.0f), glm::vec2(0.01f))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_ctr()
+{
+ int Error = 0;
+
+ {
+ glm::mediump_mat2x2 const A(1.0f);
+ glm::highp_mat2x2 const B(A);
+ glm::mediump_mat2x2 const C(B);
+
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+#if GLM_HAS_INITIALIZER_LISTS
+ glm::mat2x2 m0(
+ glm::vec2(0, 1),
+ glm::vec2(2, 3));
+
+ glm::mat2x2 m1{0, 1, 2, 3};
+
+ glm::mat2x2 m2{
+ {0, 1},
+ {2, 3}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+ std::vector<glm::mat2x2> v1{
+ {0, 1, 2, 3},
+ {0, 1, 2, 3}
+ };
+
+ std::vector<glm::mat2x2> v2{
+ {
+ { 0, 1},
+ { 4, 5}
+ },
+ {
+ { 0, 1},
+ { 4, 5}
+ }
+ };
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+namespace cast
+{
+ template<typename genType>
+ int entry()
+ {
+ int Error = 0;
+
+ genType A(1.0f);
+ glm::mat2 B(A);
+ glm::mat2 Identity(1.0f);
+
+ Error += glm::all(glm::equal(B, Identity, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += entry<glm::mat2x2>();
+ Error += entry<glm::mat2x3>();
+ Error += entry<glm::mat2x4>();
+ Error += entry<glm::mat3x2>();
+ Error += entry<glm::mat3x3>();
+ Error += entry<glm::mat3x4>();
+ Error += entry<glm::mat4x2>();
+ Error += entry<glm::mat4x3>();
+ Error += entry<glm::mat4x4>();
+
+ return Error;
+ }
+}//namespace cast
+
+int test_size()
+{
+ int Error = 0;
+
+ Error += 16 == sizeof(glm::mat2x2) ? 0 : 1;
+ Error += 32 == sizeof(glm::dmat2x2) ? 0 : 1;
+ Error += glm::mat2x2().length() == 2 ? 0 : 1;
+ Error += glm::dmat2x2().length() == 2 ? 0 : 1;
+ Error += glm::mat2x2::length() == 2 ? 0 : 1;
+ Error += glm::dmat2x2::length() == 2 ? 0 : 1;
+
+ return Error;
+}
+
+int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat2x2::length() == 2, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += cast::test();
+ Error += test_ctr();
+ Error += test_operators();
+ Error += test_inverse();
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_mat2x3.cpp b/3rdparty/glm/source/test/core/core_type_mat2x3.cpp
new file mode 100644
index 0000000..e3ad76b
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat2x3.cpp
@@ -0,0 +1,142 @@
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+
+static int test_operators()
+{
+ glm::mat2x3 l(1.0f);
+ glm::mat2x3 m(1.0f);
+ glm::vec2 u(1.0f);
+ glm::vec3 v(1.0f);
+ float x = 1.0f;
+ glm::vec3 a = m * u;
+ glm::vec2 b = v * m;
+ glm::mat2x3 n = x / m;
+ glm::mat2x3 o = m / x;
+ glm::mat2x3 p = x * m;
+ glm::mat2x3 q = m * x;
+ bool R = glm::any(glm::notEqual(m, q, glm::epsilon<float>()));
+ bool S = glm::all(glm::equal(m, l, glm::epsilon<float>()));
+
+ return (S && !R) ? 0 : 1;
+}
+
+int test_ctr()
+{
+ int Error(0);
+
+#if GLM_HAS_INITIALIZER_LISTS
+ glm::mat2x3 m0(
+ glm::vec3(0, 1, 2),
+ glm::vec3(3, 4, 5));
+
+ glm::mat2x3 m1{0, 1, 2, 3, 4, 5};
+
+ glm::mat2x3 m2{
+ {0, 1, 2},
+ {3, 4, 5}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+ std::vector<glm::mat2x3> v1{
+ {0, 1, 2, 3, 4, 5},
+ {0, 1, 2, 3, 4, 5}
+ };
+
+ std::vector<glm::mat2x3> v2{
+ {
+ { 0, 1, 2},
+ { 4, 5, 6}
+ },
+ {
+ { 0, 1, 2},
+ { 4, 5, 6}
+ }
+ };
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+namespace cast
+{
+ template<typename genType>
+ int entry()
+ {
+ int Error = 0;
+
+ genType A(1.0f);
+ glm::mat2x3 B(A);
+ glm::mat2x3 Identity(1.0f);
+
+ Error += glm::all(glm::equal(B, Identity, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += entry<glm::mat2x2>();
+ Error += entry<glm::mat2x3>();
+ Error += entry<glm::mat2x4>();
+ Error += entry<glm::mat3x2>();
+ Error += entry<glm::mat3x3>();
+ Error += entry<glm::mat3x4>();
+ Error += entry<glm::mat4x2>();
+ Error += entry<glm::mat4x3>();
+ Error += entry<glm::mat4x4>();
+
+ return Error;
+ }
+}//namespace cast
+
+int test_size()
+{
+ int Error = 0;
+
+ Error += 24 == sizeof(glm::mat2x3) ? 0 : 1;
+ Error += 48 == sizeof(glm::dmat2x3) ? 0 : 1;
+ Error += glm::mat2x3().length() == 2 ? 0 : 1;
+ Error += glm::dmat2x3().length() == 2 ? 0 : 1;
+ Error += glm::mat2x3::length() == 2 ? 0 : 1;
+ Error += glm::dmat2x3::length() == 2 ? 0 : 1;
+
+ return Error;
+}
+
+int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat2x3::length() == 2, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += cast::test();
+ Error += test_ctr();
+ Error += test_operators();
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_mat2x4.cpp b/3rdparty/glm/source/test/core/core_type_mat2x4.cpp
new file mode 100644
index 0000000..ade3a44
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat2x4.cpp
@@ -0,0 +1,147 @@
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+
+static int test_operators()
+{
+ glm::mat2x4 l(1.0f);
+ glm::mat2x4 m(1.0f);
+ glm::vec2 u(1.0f);
+ glm::vec4 v(1.0f);
+ float x = 1.0f;
+ glm::vec4 a = m * u;
+ glm::vec2 b = v * m;
+ glm::mat2x4 n = x / m;
+ glm::mat2x4 o = m / x;
+ glm::mat2x4 p = x * m;
+ glm::mat2x4 q = m * x;
+ bool R = glm::any(glm::notEqual(m, q, glm::epsilon<float>()));
+ bool S = glm::all(glm::equal(m, l, glm::epsilon<float>()));
+
+ return (S && !R) ? 0 : 1;
+}
+
+int test_ctr()
+{
+ int Error(0);
+
+#if(GLM_HAS_INITIALIZER_LISTS)
+ glm::mat2x4 m0(
+ glm::vec4(0, 1, 2, 3),
+ glm::vec4(4, 5, 6, 7));
+
+ glm::mat2x4 m1{0, 1, 2, 3, 4, 5, 6, 7};
+
+ glm::mat2x4 m2{
+ {0, 1, 2, 3},
+ {4, 5, 6, 7}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+ std::vector<glm::mat2x4> v1{
+ {0, 1, 2, 3, 4, 5, 6, 7},
+ {0, 1, 2, 3, 4, 5, 6, 7}
+ };
+
+ std::vector<glm::mat2x4> v2{
+ {
+ { 0, 1, 2, 3},
+ { 4, 5, 6, 7}
+ },
+ {
+ { 0, 1, 2, 3},
+ { 4, 5, 6, 7}
+ }
+ };
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+namespace cast
+{
+ template<typename genType>
+ int entry()
+ {
+ int Error = 0;
+
+ genType A(1.0f);
+ glm::mat2x4 B(A);
+ glm::mat2x4 Identity(1.0f);
+
+ for(glm::length_t i = 0, length = B.length(); i < length; ++i)
+ Error += glm::all(glm::epsilonEqual(B[i], Identity[i], glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += entry<glm::mat2x2>();
+ Error += entry<glm::mat2x3>();
+ Error += entry<glm::mat2x4>();
+ Error += entry<glm::mat3x2>();
+ Error += entry<glm::mat3x3>();
+ Error += entry<glm::mat3x4>();
+ Error += entry<glm::mat4x2>();
+ Error += entry<glm::mat4x3>();
+ Error += entry<glm::mat4x4>();
+
+ return Error;
+ }
+}//namespace cast
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += 32 == sizeof(glm::mat2x4) ? 0 : 1;
+ Error += 64 == sizeof(glm::dmat2x4) ? 0 : 1;
+ Error += glm::mat2x4().length() == 2 ? 0 : 1;
+ Error += glm::dmat2x4().length() == 2 ? 0 : 1;
+ Error += glm::mat2x4::length() == 2 ? 0 : 1;
+ Error += glm::dmat2x4::length() == 2 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat2x4::length() == 2, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += cast::test();
+ Error += test_ctr();
+ Error += test_operators();
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
+
+
+
diff --git a/3rdparty/glm/source/test/core/core_type_mat3x2.cpp b/3rdparty/glm/source/test/core/core_type_mat3x2.cpp
new file mode 100644
index 0000000..7a40f90
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat3x2.cpp
@@ -0,0 +1,148 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+
+static bool test_operators()
+{
+ glm::mat3x2 l(1.0f);
+ glm::mat3x2 m(1.0f);
+ glm::vec3 u(1.0f);
+ glm::vec2 v(1.0f);
+ float x = 1.0f;
+ glm::vec2 a = m * u;
+ glm::vec3 b = v * m;
+ glm::mat3x2 n = x / m;
+ glm::mat3x2 o = m / x;
+ glm::mat3x2 p = x * m;
+ glm::mat3x2 q = m * x;
+ bool R = glm::any(glm::notEqual(m, q, glm::epsilon<float>()));
+ bool S = glm::all(glm::equal(m, l, glm::epsilon<float>()));
+
+ return (S && !R) ? 0 : 1;
+}
+
+int test_ctr()
+{
+ int Error(0);
+
+#if(GLM_HAS_INITIALIZER_LISTS)
+ glm::mat3x2 m0(
+ glm::vec2(0, 1),
+ glm::vec2(2, 3),
+ glm::vec2(4, 5));
+
+ glm::mat3x2 m1{0, 1, 2, 3, 4, 5};
+
+ glm::mat3x2 m2{
+ {0, 1},
+ {2, 3},
+ {4, 5}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+ std::vector<glm::mat3x2> v1{
+ {0, 1, 2, 3, 4, 5},
+ {0, 1, 2, 3, 4, 5}
+ };
+
+ std::vector<glm::mat3x2> v2{
+ {
+ { 0, 1},
+ { 2, 3},
+ { 4, 5}
+ },
+ {
+ { 0, 1},
+ { 2, 3},
+ { 4, 5}
+ }
+ };
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+namespace cast
+{
+ template<typename genType>
+ int entry()
+ {
+ int Error = 0;
+
+ genType A(1.0f);
+ glm::mat3x2 B(A);
+ glm::mat3x2 Identity(1.0f);
+
+ Error += glm::all(glm::equal(B, Identity, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += entry<glm::mat2x2>();
+ Error += entry<glm::mat2x3>();
+ Error += entry<glm::mat2x4>();
+ Error += entry<glm::mat3x2>();
+ Error += entry<glm::mat3x3>();
+ Error += entry<glm::mat3x4>();
+ Error += entry<glm::mat4x2>();
+ Error += entry<glm::mat4x3>();
+ Error += entry<glm::mat4x4>();
+
+ return Error;
+ }
+}//namespace cast
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += 24 == sizeof(glm::mat3x2) ? 0 : 1;
+ Error += 48 == sizeof(glm::dmat3x2) ? 0 : 1;
+ Error += glm::mat3x2().length() == 3 ? 0 : 1;
+ Error += glm::dmat3x2().length() == 3 ? 0 : 1;
+ Error += glm::mat3x2::length() == 3 ? 0 : 1;
+ Error += glm::dmat3x2::length() == 3 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat3x2::length() == 3, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += cast::test();
+ Error += test_ctr();
+ Error += test_operators();
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/core/core_type_mat3x3.cpp b/3rdparty/glm/source/test/core/core_type_mat3x3.cpp
new file mode 100644
index 0000000..99e1f41
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat3x3.cpp
@@ -0,0 +1,197 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/matrix.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+
+static int test_mat3x3()
+{
+ glm::dmat3 Mat0(
+ glm::dvec3(0.6f, 0.2f, 0.3f),
+ glm::dvec3(0.2f, 0.7f, 0.5f),
+ glm::dvec3(0.3f, 0.5f, 0.7f));
+ glm::dmat3 Inv0 = glm::inverse(Mat0);
+ glm::dmat3 Res0 = Mat0 * Inv0;
+
+ return glm::all(glm::equal(Res0, glm::dmat3(1.0), 0.01)) ? 0 : 1;
+}
+
+static int test_operators()
+{
+ glm::mat3x3 l(1.0f);
+ glm::mat3x3 m(1.0f);
+ glm::vec3 u(1.0f);
+ glm::vec3 v(1.0f);
+ float x = 1.0f;
+ glm::vec3 a = m * u;
+ glm::vec3 b = v * m;
+ glm::mat3x3 n = x / m;
+ glm::mat3x3 o = m / x;
+ glm::mat3x3 p = x * m;
+ glm::mat3x3 q = m * x;
+ bool R = glm::any(glm::notEqual(m, q, glm::epsilon<float>()));
+ bool S = glm::all(glm::equal(m, l, glm::epsilon<float>()));
+
+ return (S && !R) ? 0 : 1;
+}
+
+static int test_inverse()
+{
+ int Error(0);
+
+ {
+ glm::mat3 const Matrix(
+ glm::vec3(0.6f, 0.2f, 0.3f),
+ glm::vec3(0.2f, 0.7f, 0.5f),
+ glm::vec3(0.3f, 0.5f, 0.7f));
+ glm::mat3 const Inverse = glm::inverse(Matrix);
+ glm::mat3 const Identity = Matrix * Inverse;
+
+ Error += glm::all(glm::equal(Identity[0], glm::vec3(1.0f, 0.0f, 0.0f), glm::vec3(0.01f))) ? 0 : 1;
+ Error += glm::all(glm::equal(Identity[1], glm::vec3(0.0f, 1.0f, 0.0f), glm::vec3(0.01f))) ? 0 : 1;
+ Error += glm::all(glm::equal(Identity[2], glm::vec3(0.0f, 0.0f, 1.0f), glm::vec3(0.01f))) ? 0 : 1;
+ }
+
+ {
+ glm::mat3 const Matrix(
+ glm::vec3(0.6f, 0.2f, 0.3f),
+ glm::vec3(0.2f, 0.7f, 0.5f),
+ glm::vec3(0.3f, 0.5f, 0.7f));
+ glm::mat3 const Identity = Matrix / Matrix;
+
+ Error += glm::all(glm::equal(Identity[0], glm::vec3(1.0f, 0.0f, 0.0f), glm::vec3(0.01f))) ? 0 : 1;
+ Error += glm::all(glm::equal(Identity[1], glm::vec3(0.0f, 1.0f, 0.0f), glm::vec3(0.01f))) ? 0 : 1;
+ Error += glm::all(glm::equal(Identity[2], glm::vec3(0.0f, 0.0f, 1.0f), glm::vec3(0.01f))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_ctr()
+{
+ int Error(0);
+
+#if(GLM_HAS_INITIALIZER_LISTS)
+ glm::mat3x3 m0(
+ glm::vec3(0, 1, 2),
+ glm::vec3(3, 4, 5),
+ glm::vec3(6, 7, 8));
+
+ glm::mat3x3 m1{0, 1, 2, 3, 4, 5, 6, 7, 8};
+
+ glm::mat3x3 m2{
+ {0, 1, 2},
+ {3, 4, 5},
+ {6, 7, 8}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+ std::vector<glm::mat3x3> v1{
+ {0, 1, 2, 3, 4, 5, 6, 7, 8},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8}
+ };
+
+ std::vector<glm::mat3x3> v2{
+ {
+ { 0, 1, 2},
+ { 3, 4, 5},
+ { 6, 7, 8}
+ },
+ {
+ { 0, 1, 2},
+ { 3, 4, 5},
+ { 6, 7, 8}
+ }
+ };
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+namespace cast
+{
+ template<typename genType>
+ int entry()
+ {
+ int Error = 0;
+
+ genType A(1.0f);
+ glm::mat3x3 B(A);
+ glm::mat3x3 Identity(1.0f);
+
+ Error += glm::all(glm::equal(B, Identity, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += entry<glm::mat2x2>();
+ Error += entry<glm::mat2x3>();
+ Error += entry<glm::mat2x4>();
+ Error += entry<glm::mat3x2>();
+ Error += entry<glm::mat3x3>();
+ Error += entry<glm::mat3x4>();
+ Error += entry<glm::mat4x2>();
+ Error += entry<glm::mat4x3>();
+ Error += entry<glm::mat4x4>();
+
+ return Error;
+ }
+}//namespace cast
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += 36 == sizeof(glm::mat3x3) ? 0 : 1;
+ Error += 72 == sizeof(glm::dmat3x3) ? 0 : 1;
+ Error += glm::mat3x3().length() == 3 ? 0 : 1;
+ Error += glm::dmat3x3().length() == 3 ? 0 : 1;
+ Error += glm::mat3x3::length() == 3 ? 0 : 1;
+ Error += glm::dmat3x3::length() == 3 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat3x3::length() == 3, "GLM: Failed constexpr");
+
+ constexpr glm::mat3x3 const Z(0.0f);
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += cast::test();
+ Error += test_ctr();
+ Error += test_mat3x3();
+ Error += test_operators();
+ Error += test_inverse();
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_type_mat3x4.cpp b/3rdparty/glm/source/test/core/core_type_mat3x4.cpp
new file mode 100644
index 0000000..97d4574
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat3x4.cpp
@@ -0,0 +1,149 @@
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+
+static bool test_operators()
+{
+ glm::mat3x4 l(1.0f);
+ glm::mat3x4 m(1.0f);
+ glm::vec3 u(1.0f);
+ glm::vec4 v(1.0f);
+ float x = 1.0f;
+ glm::vec4 a = m * u;
+ glm::vec3 b = v * m;
+ glm::mat3x4 n = x / m;
+ glm::mat3x4 o = m / x;
+ glm::mat3x4 p = x * m;
+ glm::mat3x4 q = m * x;
+ bool R = glm::any(glm::notEqual(m, q, glm::epsilon<float>()));
+ bool S = glm::all(glm::equal(m, l, glm::epsilon<float>()));
+
+ return (S && !R) ? 0 : 1;
+}
+
+int test_ctr()
+{
+ int Error(0);
+
+#if(GLM_HAS_INITIALIZER_LISTS)
+ glm::mat3x4 m0(
+ glm::vec4(0, 1, 2, 3),
+ glm::vec4(4, 5, 6, 7),
+ glm::vec4(8, 9, 10, 11));
+
+ glm::mat3x4 m1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+
+ glm::mat3x4 m2{
+ {0, 1, 2, 3},
+ {4, 5, 6, 7},
+ {8, 9, 10, 11}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+ std::vector<glm::mat3x4> v1{
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
+ };
+
+ std::vector<glm::mat3x4> v2{
+ {
+ { 0, 1, 2, 3},
+ { 4, 5, 6, 7},
+ { 8, 9, 10, 11}
+ },
+ {
+ { 0, 1, 2, 3},
+ { 4, 5, 6, 7},
+ { 8, 9, 10, 11}
+ }
+ };
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+namespace cast
+{
+ template<typename genType>
+ int entry()
+ {
+ int Error = 0;
+
+ genType A(1.0f);
+ glm::mat3x4 B(A);
+ glm::mat3x4 Identity(1.0f);
+
+ for(glm::length_t i = 0, length = B.length(); i < length; ++i)
+ Error += glm::all(glm::epsilonEqual(B[i], Identity[i], glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += entry<glm::mat2x2>();
+ Error += entry<glm::mat2x3>();
+ Error += entry<glm::mat2x4>();
+ Error += entry<glm::mat3x2>();
+ Error += entry<glm::mat3x3>();
+ Error += entry<glm::mat3x4>();
+ Error += entry<glm::mat4x2>();
+ Error += entry<glm::mat4x3>();
+ Error += entry<glm::mat4x4>();
+
+ return Error;
+ }
+}//namespace cast
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += 48 == sizeof(glm::mat3x4) ? 0 : 1;
+ Error += 96 == sizeof(glm::dmat3x4) ? 0 : 1;
+ Error += glm::mat3x4().length() == 3 ? 0 : 1;
+ Error += glm::dmat3x4().length() == 3 ? 0 : 1;
+ Error += glm::mat3x4::length() == 3 ? 0 : 1;
+ Error += glm::dmat3x4::length() == 3 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat3x4::length() == 3, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += cast::test();
+ Error += test_ctr();
+ Error += test_operators();
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_type_mat4x2.cpp b/3rdparty/glm/source/test/core/core_type_mat4x2.cpp
new file mode 100644
index 0000000..7133edc
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat4x2.cpp
@@ -0,0 +1,151 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+
+static int test_operators()
+{
+ glm::mat4x2 l(1.0f);
+ glm::mat4x2 m(1.0f);
+ glm::vec4 u(1.0f);
+ glm::vec2 v(1.0f);
+ float x = 1.0f;
+ glm::vec2 a = m * u;
+ glm::vec4 b = v * m;
+ glm::mat4x2 n = x / m;
+ glm::mat4x2 o = m / x;
+ glm::mat4x2 p = x * m;
+ glm::mat4x2 q = m * x;
+ bool R = glm::any(glm::notEqual(m, q, glm::epsilon<float>()));
+ bool S = glm::all(glm::equal(m, l, glm::epsilon<float>()));
+
+ return (S && !R) ? 0 : 1;
+}
+
+int test_ctr()
+{
+ int Error(0);
+
+#if(GLM_HAS_INITIALIZER_LISTS)
+ glm::mat4x2 m0(
+ glm::vec2(0, 1),
+ glm::vec2(2, 3),
+ glm::vec2(4, 5),
+ glm::vec2(6, 7));
+
+ glm::mat4x2 m1{0, 1, 2, 3, 4, 5, 6, 7};
+
+ glm::mat4x2 m2{
+ {0, 1},
+ {2, 3},
+ {4, 5},
+ {6, 7}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+ std::vector<glm::mat4x2> v1{
+ {0, 1, 2, 3, 4, 5, 6, 7},
+ {0, 1, 2, 3, 4, 5, 6, 7}
+ };
+
+ std::vector<glm::mat4x2> v2{
+ {
+ { 0, 1},
+ { 4, 5},
+ { 8, 9},
+ { 12, 13}
+ },
+ {
+ { 0, 1},
+ { 4, 5},
+ { 8, 9},
+ { 12, 13}
+ }
+ };
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+namespace cast
+{
+ template<typename genType>
+ int entry()
+ {
+ int Error = 0;
+
+ genType A(1.0f);
+ glm::mat4x2 B(A);
+ glm::mat4x2 Identity(1.0f);
+
+ Error += glm::all(glm::equal(B, Identity, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += entry<glm::mat2x2>();
+ Error += entry<glm::mat2x3>();
+ Error += entry<glm::mat2x4>();
+ Error += entry<glm::mat3x2>();
+ Error += entry<glm::mat3x3>();
+ Error += entry<glm::mat3x4>();
+ Error += entry<glm::mat4x2>();
+ Error += entry<glm::mat4x3>();
+ Error += entry<glm::mat4x4>();
+
+ return Error;
+ }
+}//namespace cast
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += 32 == sizeof(glm::mat4x2) ? 0 : 1;
+ Error += 64 == sizeof(glm::dmat4x2) ? 0 : 1;
+ Error += glm::mat4x2().length() == 4 ? 0 : 1;
+ Error += glm::dmat4x2().length() == 4 ? 0 : 1;
+ Error += glm::mat4x2::length() == 4 ? 0 : 1;
+ Error += glm::dmat4x2::length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat4x2::length() == 4, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += cast::test();
+ Error += test_ctr();
+ Error += test_operators();
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/core/core_type_mat4x3.cpp b/3rdparty/glm/source/test/core/core_type_mat4x3.cpp
new file mode 100644
index 0000000..1c65e7f
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat4x3.cpp
@@ -0,0 +1,152 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+#include <vector>
+
+static int test_operators()
+{
+ glm::mat4x3 l(1.0f);
+ glm::mat4x3 m(1.0f);
+ glm::vec4 u(1.0f);
+ glm::vec3 v(1.0f);
+ float x = 1.0f;
+ glm::vec3 a = m * u;
+ glm::vec4 b = v * m;
+ glm::mat4x3 n = x / m;
+ glm::mat4x3 o = m / x;
+ glm::mat4x3 p = x * m;
+ glm::mat4x3 q = m * x;
+ bool R = glm::any(glm::notEqual(m, q, glm::epsilon<float>()));
+ bool S = glm::all(glm::equal(m, l, glm::epsilon<float>()));
+
+ return (S && !R) ? 0 : 1;
+}
+
+int test_ctr()
+{
+ int Error(0);
+
+#if(GLM_HAS_INITIALIZER_LISTS)
+ glm::mat4x3 m0(
+ glm::vec3(0, 1, 2),
+ glm::vec3(3, 4, 5),
+ glm::vec3(6, 7, 8),
+ glm::vec3(9, 10, 11));
+
+ glm::mat4x3 m1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+
+ glm::mat4x3 m2{
+ {0, 1, 2},
+ {3, 4, 5},
+ {6, 7, 8},
+ {9, 10, 11}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+ std::vector<glm::mat4x3> v1{
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
+ };
+
+ std::vector<glm::mat4x3> v2{
+ {
+ { 0, 1, 2 },
+ { 4, 5, 6 },
+ { 8, 9, 10 },
+ { 12, 13, 14 }
+ },
+ {
+ { 0, 1, 2 },
+ { 4, 5, 6 },
+ { 8, 9, 10 },
+ { 12, 13, 14 }
+ }
+ };
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+namespace cast
+{
+ template<typename genType>
+ int entry()
+ {
+ int Error = 0;
+
+ genType A(1.0f);
+ glm::mat4x3 B(A);
+ glm::mat4x3 Identity(1.0f);
+
+ Error += glm::all(glm::equal(B, Identity, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += entry<glm::mat2x2>();
+ Error += entry<glm::mat2x3>();
+ Error += entry<glm::mat2x4>();
+ Error += entry<glm::mat3x2>();
+ Error += entry<glm::mat3x3>();
+ Error += entry<glm::mat3x4>();
+ Error += entry<glm::mat4x2>();
+ Error += entry<glm::mat4x3>();
+ Error += entry<glm::mat4x4>();
+
+ return Error;
+ }
+}//namespace cast
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += 48 == sizeof(glm::mat4x3) ? 0 : 1;
+ Error += 96 == sizeof(glm::dmat4x3) ? 0 : 1;
+ Error += glm::mat4x3().length() == 4 ? 0 : 1;
+ Error += glm::dmat4x3().length() == 4 ? 0 : 1;
+ Error += glm::mat4x3::length() == 4 ? 0 : 1;
+ Error += glm::dmat4x3::length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat4x3::length() == 4, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += cast::test();
+ Error += test_ctr();
+ Error += test_operators();
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/core/core_type_mat4x4.cpp b/3rdparty/glm/source/test/core/core_type_mat4x4.cpp
new file mode 100644
index 0000000..0be87f1
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_mat4x4.cpp
@@ -0,0 +1,218 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/matrix.hpp>
+#include <glm/mat4x4.hpp>
+#include <glm/vec4.hpp>
+#include <vector>
+
+template <typename matType, typename vecType>
+static int test_operators()
+{
+ typedef typename matType::value_type value_type;
+
+ value_type const Epsilon = static_cast<value_type>(0.001);
+
+ int Error = 0;
+
+ matType const M(static_cast<value_type>(2.0f));
+ matType const N(static_cast<value_type>(1.0f));
+ vecType const U(static_cast<value_type>(2.0f));
+
+ {
+ matType const P = N * static_cast<value_type>(2.0f);
+ Error += glm::all(glm::equal(P, M, Epsilon)) ? 0 : 1;
+
+ matType const Q = M / static_cast<value_type>(2.0f);
+ Error += glm::all(glm::equal(Q, N, Epsilon)) ? 0 : 1;
+ }
+
+ {
+ vecType const V = M * U;
+ Error += glm::all(glm::equal(V, vecType(static_cast<value_type>(4.f)), Epsilon)) ? 0 : 1;
+
+ vecType const W = U / M;
+ Error += glm::all(glm::equal(W, vecType(static_cast<value_type>(1.f)), Epsilon)) ? 0 : 1;
+ }
+
+ {
+ matType const O = M * N;
+ Error += glm::all(glm::equal(O, matType(static_cast<value_type>(2.f)), Epsilon)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename matType>
+static int test_inverse()
+{
+ typedef typename matType::value_type value_type;
+
+ value_type const Epsilon = static_cast<value_type>(0.001);
+
+ int Error = 0;
+
+ matType const Identity(static_cast<value_type>(1.0f));
+ matType const Matrix(
+ glm::vec4(0.6f, 0.2f, 0.3f, 0.4f),
+ glm::vec4(0.2f, 0.7f, 0.5f, 0.3f),
+ glm::vec4(0.3f, 0.5f, 0.7f, 0.2f),
+ glm::vec4(0.4f, 0.3f, 0.2f, 0.6f));
+ matType const Inverse = Identity / Matrix;
+ matType const Result = Matrix * Inverse;
+
+ Error += glm::all(glm::equal(Identity, Result, Epsilon)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_ctr()
+{
+ int Error = 0;
+
+#if GLM_HAS_TRIVIAL_QUERIES
+ //Error += std::is_trivially_default_constructible<glm::mat4>::value ? 0 : 1;
+ //Error += std::is_trivially_copy_assignable<glm::mat4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::mat4>::value ? 0 : 1;
+ //Error += std::is_copy_constructible<glm::mat4>::value ? 0 : 1;
+ //Error += std::has_trivial_copy_constructor<glm::mat4>::value ? 0 : 1;
+#endif
+
+#if GLM_HAS_INITIALIZER_LISTS
+ glm::mat4 const m0(
+ glm::vec4(0, 1, 2, 3),
+ glm::vec4(4, 5, 6, 7),
+ glm::vec4(8, 9, 10, 11),
+ glm::vec4(12, 13, 14, 15));
+
+ assert(sizeof(m0) == 4 * 4 * 4);
+
+ glm::vec4 const V{0, 1, 2, 3};
+
+ glm::mat4 const m1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+ glm::mat4 const m2{
+ {0, 1, 2, 3},
+ {4, 5, 6, 7},
+ {8, 9, 10, 11},
+ {12, 13, 14, 15}};
+
+ Error += glm::all(glm::equal(m0, m2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m1, m2, glm::epsilon<float>())) ? 0 : 1;
+
+
+ std::vector<glm::mat4> const m3{
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}};
+
+ glm::mat4 const m4{
+ {1, 0, 0, 0},
+ {0, 1, 0, 0},
+ {0, 0, 1, 0},
+ {0, 0, 0, 1} };
+
+ Error += glm::equal(m4[0][0], 1.0f, 0.0001f) ? 0 : 1;
+ Error += glm::equal(m4[3][3], 1.0f, 0.0001f) ? 0 : 1;
+
+ std::vector<glm::mat4> const v1{
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}};
+
+ std::vector<glm::mat4> const v2{
+ {
+ { 0, 1, 2, 3 },
+ { 4, 5, 6, 7 },
+ { 8, 9, 10, 11 },
+ { 12, 13, 14, 15 }
+ },
+ {
+ { 0, 1, 2, 3 },
+ { 4, 5, 6, 7 },
+ { 8, 9, 10, 11 },
+ { 12, 13, 14, 15 }
+ }};
+
+#endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+static int test_member_alloc_bug()
+{
+ int Error = 0;
+
+ struct repro
+ {
+ repro(){ this->matrix = new glm::mat4(); }
+ ~repro(){delete this->matrix;}
+
+ glm::mat4* matrix;
+ };
+
+ repro Repro;
+
+ return Error;
+}
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += 64 == sizeof(glm::mat4) ? 0 : 1;
+ Error += 128 == sizeof(glm::dmat4) ? 0 : 1;
+ Error += glm::mat4().length() == 4 ? 0 : 1;
+ Error += glm::dmat4().length() == 4 ? 0 : 1;
+ Error += glm::mat4::length() == 4 ? 0 : 1;
+ Error += glm::dmat4::length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::mat4::length() == 4, "GLM: Failed constexpr");
+ constexpr glm::mat4 A(1.f);
+ constexpr glm::mat4 B(1.f);
+ constexpr glm::bvec4 C = glm::equal(A, B, 0.01f);
+ static_assert(glm::all(C), "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_member_alloc_bug();
+ Error += test_ctr();
+
+ Error += test_operators<glm::mat4, glm::vec4>();
+ Error += test_operators<glm::lowp_mat4, glm::lowp_vec4>();
+ Error += test_operators<glm::mediump_mat4, glm::mediump_vec4>();
+ Error += test_operators<glm::highp_mat4, glm::highp_vec4>();
+
+ Error += test_operators<glm::dmat4, glm::dvec4>();
+ Error += test_operators<glm::lowp_dmat4, glm::lowp_dvec4>();
+ Error += test_operators<glm::mediump_dmat4, glm::mediump_dvec4>();
+ Error += test_operators<glm::highp_dmat4, glm::highp_dvec4>();
+
+ Error += test_inverse<glm::mat4>();
+ Error += test_inverse<glm::lowp_mat4>();
+ Error += test_inverse<glm::mediump_mat4>();
+ Error += test_inverse<glm::highp_mat4>();
+
+ Error += test_inverse<glm::dmat4>();
+ Error += test_inverse<glm::lowp_dmat4>();
+ Error += test_inverse<glm::mediump_dmat4>();
+ Error += test_inverse<glm::highp_dmat4>();
+
+ Error += test_size();
+ Error += test_constexpr();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_vec1.cpp b/3rdparty/glm/source/test/core/core_type_vec1.cpp
new file mode 100644
index 0000000..77f3f84
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_vec1.cpp
@@ -0,0 +1,169 @@
+#define GLM_FORCE_SWIZZLE
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/vec2.hpp>
+#include <vector>
+
+static glm::vec1 g1;
+static glm::vec1 g2(1);
+
+int test_vec1_operators()
+{
+ int Error = 0;
+
+ glm::ivec1 A(1);
+ glm::ivec1 B(1);
+ {
+ bool R = A != B;
+ bool S = A == B;
+
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ {
+ A *= 1;
+ B *= 1;
+ A += 1;
+ B += 1;
+
+ bool R = A != B;
+ bool S = A == B;
+
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_vec1_ctor()
+{
+ int Error = 0;
+
+# if GLM_HAS_TRIVIAL_QUERIES
+ // Error += std::is_trivially_default_constructible<glm::vec1>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::vec1>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::vec1>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::dvec1>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::ivec1>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::uvec1>::value ? 0 : 1;
+
+ Error += std::is_copy_constructible<glm::vec1>::value ? 0 : 1;
+# endif
+
+/*
+#if GLM_HAS_INITIALIZER_LISTS
+ {
+ glm::vec1 a{ 0 };
+ std::vector<glm::vec1> v = {
+ {0.f},
+ {4.f},
+ {8.f}};
+ }
+
+ {
+ glm::dvec2 a{ 0 };
+ std::vector<glm::dvec1> v = {
+ {0.0},
+ {4.0},
+ {8.0}};
+ }
+#endif
+*/
+
+ {
+ glm::vec2 A = glm::vec2(2.0f);
+ glm::vec2 B = glm::vec2(2.0f, 3.0f);
+ glm::vec2 C = glm::vec2(2.0f, 3.0);
+ //glm::vec2 D = glm::dvec2(2.0); // Build error TODO: What does the specification says?
+ glm::vec2 E(glm::dvec2(2.0));
+ glm::vec2 F(glm::ivec2(2));
+ }
+
+ return Error;
+}
+
+static int test_vec1_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::vec1) == sizeof(glm::mediump_vec1) ? 0 : 1;
+ Error += 4 == sizeof(glm::mediump_vec1) ? 0 : 1;
+ Error += sizeof(glm::dvec1) == sizeof(glm::highp_dvec1) ? 0 : 1;
+ Error += 8 == sizeof(glm::highp_dvec1) ? 0 : 1;
+ Error += glm::vec1().length() == 1 ? 0 : 1;
+ Error += glm::dvec1().length() == 1 ? 0 : 1;
+ Error += glm::vec1::length() == 1 ? 0 : 1;
+ Error += glm::dvec1::length() == 1 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_vec1_operator_increment()
+{
+ int Error(0);
+
+ glm::ivec1 v0(1);
+ glm::ivec1 v1(v0);
+ glm::ivec1 v2(v0);
+ glm::ivec1 v3 = ++v1;
+ glm::ivec1 v4 = v2++;
+
+ Error += glm::all(glm::equal(v0, v4)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v2)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v3)) ? 0 : 1;
+
+ int i0(1);
+ int i1(i0);
+ int i2(i0);
+ int i3 = ++i1;
+ int i4 = i2++;
+
+ Error += i0 == i4 ? 0 : 1;
+ Error += i1 == i2 ? 0 : 1;
+ Error += i1 == i3 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_swizzle()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::vec1 A = glm::vec1(1.0f);
+ //glm::vec1 B = A.x;
+ glm::vec1 C(A.x);
+
+ //Error += glm::all(glm::equal(A, B)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::vec1::length() == 1, "GLM: Failed constexpr");
+ static_assert(glm::vec1(1.0f).x > 0.0f, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_vec1_size();
+ Error += test_vec1_ctor();
+ Error += test_vec1_operators();
+ Error += test_vec1_operator_increment();
+ Error += test_swizzle();
+ Error += test_constexpr();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_vec2.cpp b/3rdparty/glm/source/test/core/core_type_vec2.cpp
new file mode 100644
index 0000000..308c61f
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_vec2.cpp
@@ -0,0 +1,392 @@
+#define GLM_FORCE_SWIZZLE
+#include <glm/gtc/vec1.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/vector_float1.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/vec2.hpp>
+#include <vector>
+#if GLM_HAS_TRIVIAL_QUERIES
+# include <type_traits>
+#endif
+
+static glm::ivec2 g1;
+static glm::ivec2 g2(1);
+static glm::ivec2 g3(1, 1);
+
+static int test_operators()
+{
+ int Error = 0;
+
+ {
+ glm::ivec2 A(1);
+ glm::ivec2 B(1);
+ Error += A != B ? 1 : 0;
+ Error += A == B ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f);
+ glm::vec2 C = A + 1.0f;
+ A += 1.0f;
+ Error += glm::all(glm::equal(A, glm::vec2(2.0f), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f);
+ glm::vec2 B(2.0f,-1.0f);
+ glm::vec2 C = A + B;
+ A += B;
+ Error += glm::all(glm::equal(A, glm::vec2(3.0f, 0.0f), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f);
+ glm::vec2 C = A - 1.0f;
+ A -= 1.0f;
+ Error += glm::all(glm::equal(A, glm::vec2(0.0f), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f);
+ glm::vec2 B(2.0f,-1.0f);
+ glm::vec2 C = A - B;
+ A -= B;
+ Error += glm::all(glm::equal(A, glm::vec2(-1.0f, 2.0f), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f);
+ glm::vec2 C = A * 2.0f;
+ A *= 2.0f;
+ Error += glm::all(glm::equal(A, glm::vec2(2.0f), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(2.0f);
+ glm::vec2 B(2.0f);
+ glm::vec2 C = A / B;
+ A /= B;
+ Error += glm::all(glm::equal(A, glm::vec2(1.0f), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f, 2.0f);
+ glm::vec2 B(4.0f, 5.0f);
+
+ glm::vec2 C = A + B;
+ Error += glm::all(glm::equal(C, glm::vec2(5, 7), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 D = B - A;
+ Error += glm::all(glm::equal(D, glm::vec2(3, 3), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 E = A * B;
+ Error += glm::all(glm::equal(E, glm::vec2(4, 10), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 F = B / A;
+ Error += glm::all(glm::equal(F, glm::vec2(4, 2.5), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 G = A + 1.0f;
+ Error += glm::all(glm::equal(G, glm::vec2(2, 3), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 H = B - 1.0f;
+ Error += glm::all(glm::equal(H, glm::vec2(3, 4), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 I = A * 2.0f;
+ Error += glm::all(glm::equal(I, glm::vec2(2, 4), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 J = B / 2.0f;
+ Error += glm::all(glm::equal(J, glm::vec2(2, 2.5), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 K = 1.0f + A;
+ Error += glm::all(glm::equal(K, glm::vec2(2, 3), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 L = 1.0f - B;
+ Error += glm::all(glm::equal(L, glm::vec2(-3, -4), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 M = 2.0f * A;
+ Error += glm::all(glm::equal(M, glm::vec2(2, 4), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec2 N = 2.0f / B;
+ Error += glm::all(glm::equal(N, glm::vec2(0.5, 2.0 / 5.0), glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A(1.0f, 2.0f);
+ glm::vec2 B(4.0f, 5.0f);
+
+ A += B;
+ Error += glm::all(glm::equal(A, glm::vec2(5, 7), glm::epsilon<float>())) ? 0 : 1;
+
+ A += 1.0f;
+ Error += glm::all(glm::equal(A, glm::vec2(6, 8), glm::epsilon<float>())) ? 0 : 1;
+ }
+ {
+ glm::ivec2 A(1.0f, 2.0f);
+ glm::ivec2 B(4.0f, 5.0f);
+
+ B -= A;
+ Error += B == glm::ivec2(3, 3) ? 0 : 1;
+
+ B -= 1.0f;
+ Error += B == glm::ivec2(2, 2) ? 0 : 1;
+ }
+ {
+ glm::ivec2 A(1.0f, 2.0f);
+ glm::ivec2 B(4.0f, 5.0f);
+
+ A *= B;
+ Error += A == glm::ivec2(4, 10) ? 0 : 1;
+
+ A *= 2;
+ Error += A == glm::ivec2(8, 20) ? 0 : 1;
+ }
+ {
+ glm::ivec2 A(1.0f, 2.0f);
+ glm::ivec2 B(4.0f, 16.0f);
+
+ B /= A;
+ Error += B == glm::ivec2(4, 8) ? 0 : 1;
+
+ B /= 2.0f;
+ Error += B == glm::ivec2(2, 4) ? 0 : 1;
+ }
+ {
+ glm::ivec2 B(2);
+
+ B /= B.y;
+ Error += B == glm::ivec2(1) ? 0 : 1;
+ }
+
+ {
+ glm::ivec2 A(1.0f, 2.0f);
+ glm::ivec2 B = -A;
+ Error += B == glm::ivec2(-1.0f, -2.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec2 A(1.0f, 2.0f);
+ glm::ivec2 B = --A;
+ Error += B == glm::ivec2(0.0f, 1.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec2 A(1.0f, 2.0f);
+ glm::ivec2 B = A--;
+ Error += B == glm::ivec2(1.0f, 2.0f) ? 0 : 1;
+ Error += A == glm::ivec2(0.0f, 1.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec2 A(1.0f, 2.0f);
+ glm::ivec2 B = ++A;
+ Error += B == glm::ivec2(2.0f, 3.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec2 A(1.0f, 2.0f);
+ glm::ivec2 B = A++;
+ Error += B == glm::ivec2(1.0f, 2.0f) ? 0 : 1;
+ Error += A == glm::ivec2(2.0f, 3.0f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_ctor()
+{
+ int Error = 0;
+
+ {
+ glm::ivec2 A(1);
+ glm::ivec2 B(A);
+ Error += A == B ? 0 : 1;
+ }
+
+# if GLM_HAS_TRIVIAL_QUERIES
+ // Error += std::is_trivially_default_constructible<glm::vec2>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::vec2>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::vec2>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::dvec2>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::ivec2>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::uvec2>::value ? 0 : 1;
+
+ Error += std::is_copy_constructible<glm::vec2>::value ? 0 : 1;
+# endif
+
+#if GLM_HAS_INITIALIZER_LISTS
+ {
+ glm::vec2 a{ 0, 1 };
+ std::vector<glm::vec2> v = {
+ {0, 1},
+ {4, 5},
+ {8, 9}};
+ }
+
+ {
+ glm::dvec2 a{ 0, 1 };
+ std::vector<glm::dvec2> v = {
+ {0, 1},
+ {4, 5},
+ {8, 9}};
+ }
+#endif
+
+ {
+ glm::vec2 A = glm::vec2(2.0f);
+ glm::vec2 B = glm::vec2(2.0f, 3.0f);
+ glm::vec2 C = glm::vec2(2.0f, 3.0);
+ //glm::vec2 D = glm::dvec2(2.0); // Build error TODO: What does the specification says?
+ glm::vec2 E(glm::dvec2(2.0));
+ glm::vec2 F(glm::ivec2(2));
+ }
+
+ {
+ glm::vec1 const R(1.0f);
+ glm::vec1 const S(2.0f);
+ glm::vec2 const O(1.0f, 2.0f);
+
+ glm::vec2 const A(R);
+ glm::vec2 const B(1.0f);
+ Error += glm::all(glm::equal(A, B, 0.0001f)) ? 0 : 1;
+
+ glm::vec2 const C(R, S);
+ Error += glm::all(glm::equal(C, O, 0.0001f)) ? 0 : 1;
+
+ glm::vec2 const D(R, 2.0f);
+ Error += glm::all(glm::equal(D, O, 0.0001f)) ? 0 : 1;
+
+ glm::vec2 const E(1.0f, S);
+ Error += glm::all(glm::equal(E, O, 0.0001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 const R(1.0f);
+ glm::dvec1 const S(2.0);
+ glm::vec2 const O(1.0, 2.0);
+
+ glm::vec2 const A(R);
+ glm::vec2 const B(1.0);
+ Error += glm::all(glm::equal(A, B, 0.0001f)) ? 0 : 1;
+
+ glm::vec2 const C(R, S);
+ Error += glm::all(glm::equal(C, O, 0.0001f)) ? 0 : 1;
+
+ glm::vec2 const D(R, 2.0);
+ Error += glm::all(glm::equal(D, O, 0.0001f)) ? 0 : 1;
+
+ glm::vec2 const E(1.0, S);
+ Error += glm::all(glm::equal(E, O, 0.0001f)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::vec2) == sizeof(glm::mediump_vec2) ? 0 : 1;
+ Error += 8 == sizeof(glm::mediump_vec2) ? 0 : 1;
+ Error += sizeof(glm::dvec2) == sizeof(glm::highp_dvec2) ? 0 : 1;
+ Error += 16 == sizeof(glm::highp_dvec2) ? 0 : 1;
+ Error += glm::vec2().length() == 2 ? 0 : 1;
+ Error += glm::dvec2().length() == 2 ? 0 : 1;
+ Error += glm::vec2::length() == 2 ? 0 : 1;
+ Error += glm::dvec2::length() == 2 ? 0 : 1;
+
+ GLM_CONSTEXPR std::size_t Length = glm::vec2::length();
+ Error += Length == 2 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_operator_increment()
+{
+ int Error = 0;
+
+ glm::ivec2 v0(1);
+ glm::ivec2 v1(v0);
+ glm::ivec2 v2(v0);
+ glm::ivec2 v3 = ++v1;
+ glm::ivec2 v4 = v2++;
+
+ Error += glm::all(glm::equal(v0, v4)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v2)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v3)) ? 0 : 1;
+
+ int i0(1);
+ int i1(i0);
+ int i2(i0);
+ int i3 = ++i1;
+ int i4 = i2++;
+
+ Error += i0 == i4 ? 0 : 1;
+ Error += i1 == i2 ? 0 : 1;
+ Error += i1 == i3 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::vec2::length() == 2, "GLM: Failed constexpr");
+ static_assert(glm::vec2(1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec2(1.0f, -1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec2(1.0f, -1.0f).y < 0.0f, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+static int test_swizzle()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::vec2 A = glm::vec2(1.0f, 2.0f);
+ glm::vec2 B = A.xy;
+ glm::vec2 C(A.xy);
+ glm::vec2 D(A.xy());
+
+ Error += glm::all(glm::equal(A, B, 0.0001f)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, D, 0.0001f)) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ {
+ glm::vec2 A = glm::vec2(1.0f, 2.0f);
+ glm::vec2 B = A.xy();
+ glm::vec2 C(A.xy());
+
+ Error += glm::all(glm::equal(A, B, 0.0001f)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, 0.0001f)) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_ctor();
+ Error += test_operators();
+ Error += test_operator_increment();
+ Error += test_swizzle();
+ Error += test_constexpr();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_vec3.cpp b/3rdparty/glm/source/test/core/core_type_vec3.cpp
new file mode 100644
index 0000000..4da8187
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_vec3.cpp
@@ -0,0 +1,628 @@
+#define GLM_FORCE_SWIZZLE
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/geometric.hpp>
+#include <glm/vec2.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec4.hpp>
+#include <vector>
+
+static glm::vec3 g1;
+static glm::vec3 g2(1);
+static glm::vec3 g3(1, 1, 1);
+
+int test_vec3_ctor()
+{
+ int Error = 0;
+
+# if GLM_HAS_TRIVIAL_QUERIES
+ // Error += std::is_trivially_default_constructible<glm::vec3>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::vec3>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::vec3>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::dvec3>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::ivec3>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::uvec3>::value ? 0 : 1;
+
+ Error += std::is_copy_constructible<glm::vec3>::value ? 0 : 1;
+# endif
+
+# if GLM_HAS_INITIALIZER_LISTS
+ {
+ glm::vec3 a{ 0, 1, 2 };
+ std::vector<glm::vec3> v = {
+ {0, 1, 2},
+ {4, 5, 6},
+ {8, 9, 0}};
+ }
+
+ {
+ glm::dvec3 a{ 0, 1, 2 };
+ std::vector<glm::dvec3> v = {
+ {0, 1, 2},
+ {4, 5, 6},
+ {8, 9, 0}};
+ }
+# endif
+
+ {
+ glm::ivec3 A(1);
+ glm::ivec3 B(1, 1, 1);
+
+ Error += A == B ? 0 : 1;
+ }
+
+ {
+ std::vector<glm::ivec3> Tests;
+ Tests.push_back(glm::ivec3(glm::ivec2(1, 2), 3));
+ Tests.push_back(glm::ivec3(1, glm::ivec2(2, 3)));
+ Tests.push_back(glm::ivec3(1, 2, 3));
+ Tests.push_back(glm::ivec3(glm::ivec4(1, 2, 3, 4)));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ Error += Tests[i] == glm::ivec3(1, 2, 3) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 const R(1.0f);
+ glm::vec1 const S(2.0f);
+ glm::vec1 const T(3.0f);
+ glm::vec3 const O(1.0f, 2.0f, 3.0f);
+
+ glm::vec3 const A(R);
+ glm::vec3 const B(1.0f);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const C(R, S, T);
+ Error += glm::all(glm::equal(C, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const D(R, 2.0f, 3.0f);
+ Error += glm::all(glm::equal(D, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const E(1.0f, S, 3.0f);
+ Error += glm::all(glm::equal(E, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const F(1.0f, S, T);
+ Error += glm::all(glm::equal(F, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const G(R, 2.0f, T);
+ Error += glm::all(glm::equal(G, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const H(R, S, 3.0f);
+ Error += glm::all(glm::equal(H, O, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 const R(1.0);
+ glm::dvec1 const S(2.0);
+ glm::vec1 const T(3.0);
+ glm::vec3 const O(1.0f, 2.0f, 3.0f);
+
+ glm::vec3 const A(R);
+ glm::vec3 const B(1.0);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const C(R, S, T);
+ Error += glm::all(glm::equal(C, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const D(R, 2.0, 3.0);
+ Error += glm::all(glm::equal(D, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const E(1.0f, S, 3.0);
+ Error += glm::all(glm::equal(E, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const F(1.0, S, T);
+ Error += glm::all(glm::equal(F, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const G(R, 2.0, T);
+ Error += glm::all(glm::equal(G, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const H(R, S, 3.0);
+ Error += glm::all(glm::equal(H, O, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+float foo()
+{
+ glm::vec3 bar = glm::vec3(0.0f, 1.0f, 1.0f);
+
+ return glm::length(bar);
+}
+
+static int test_bvec3_ctor()
+{
+ int Error = 0;
+
+ glm::bvec3 const A(true);
+ glm::bvec3 const B(true);
+ glm::bvec3 const C(false);
+ glm::bvec3 const D = A && B;
+ glm::bvec3 const E = A && C;
+ glm::bvec3 const F = A || C;
+
+ Error += D == glm::bvec3(true) ? 0 : 1;
+ Error += E == glm::bvec3(false) ? 0 : 1;
+ Error += F == glm::bvec3(true) ? 0 : 1;
+
+ bool const G = A == C;
+ bool const H = A != C;
+ Error += !G ? 0 : 1;
+ Error += H ? 0 : 1;
+
+ return Error;
+}
+
+static int test_vec3_operators()
+{
+ int Error = 0;
+
+ {
+ glm::ivec3 A(1);
+ glm::ivec3 B(1);
+ bool R = A != B;
+ bool S = A == B;
+
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ {
+ glm::vec3 const A(1.0f, 2.0f, 3.0f);
+ glm::vec3 const B(4.0f, 5.0f, 6.0f);
+
+ glm::vec3 const C = A + B;
+ Error += glm::all(glm::equal(C, glm::vec3(5, 7, 9), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const D = B - A;
+ Error += glm::all(glm::equal(D, glm::vec3(3, 3, 3), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const E = A * B;
+ Error += glm::all(glm::equal(E, glm::vec3(4, 10, 18), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const F = B / A;
+ Error += glm::all(glm::equal(F, glm::vec3(4, 2.5, 2), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const G = A + 1.0f;
+ Error += glm::all(glm::equal(G, glm::vec3(2, 3, 4), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const H = B - 1.0f;
+ Error += glm::all(glm::equal(H, glm::vec3(3, 4, 5), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const I = A * 2.0f;
+ Error += glm::all(glm::equal(I, glm::vec3(2, 4, 6), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const J = B / 2.0f;
+ Error += glm::all(glm::equal(J, glm::vec3(2, 2.5, 3), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const K = 1.0f + A;
+ Error += glm::all(glm::equal(K, glm::vec3(2, 3, 4), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const L = 1.0f - B;
+ Error += glm::all(glm::equal(L, glm::vec3(-3, -4, -5), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const M = 2.0f * A;
+ Error += glm::all(glm::equal(M, glm::vec3(2, 4, 6), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec3 const N = 2.0f / B;
+ Error += glm::all(glm::equal(N, glm::vec3(0.5, 2.0 / 5.0, 2.0 / 6.0), glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B(4.0f, 5.0f, 6.0f);
+
+ A += B;
+ Error += A == glm::ivec3(5, 7, 9) ? 0 : 1;
+
+ A += 1;
+ Error += A == glm::ivec3(6, 8, 10) ? 0 : 1;
+ }
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B(4.0f, 5.0f, 6.0f);
+
+ B -= A;
+ Error += B == glm::ivec3(3, 3, 3) ? 0 : 1;
+
+ B -= 1;
+ Error += B == glm::ivec3(2, 2, 2) ? 0 : 1;
+ }
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B(4.0f, 5.0f, 6.0f);
+
+ A *= B;
+ Error += A == glm::ivec3(4, 10, 18) ? 0 : 1;
+
+ A *= 2;
+ Error += A == glm::ivec3(8, 20, 36) ? 0 : 1;
+ }
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B(4.0f, 4.0f, 6.0f);
+
+ B /= A;
+ Error += B == glm::ivec3(4, 2, 2) ? 0 : 1;
+
+ B /= 2;
+ Error += B == glm::ivec3(2, 1, 1) ? 0 : 1;
+ }
+ {
+ glm::ivec3 B(2);
+
+ B /= B.y;
+ Error += B == glm::ivec3(1) ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B = -A;
+ Error += B == glm::ivec3(-1.0f, -2.0f, -3.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B = --A;
+ Error += B == glm::ivec3(0.0f, 1.0f, 2.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B = A--;
+ Error += B == glm::ivec3(1.0f, 2.0f, 3.0f) ? 0 : 1;
+ Error += A == glm::ivec3(0.0f, 1.0f, 2.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B = ++A;
+ Error += B == glm::ivec3(2.0f, 3.0f, 4.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 A(1.0f, 2.0f, 3.0f);
+ glm::ivec3 B = A++;
+ Error += B == glm::ivec3(1.0f, 2.0f, 3.0f) ? 0 : 1;
+ Error += A == glm::ivec3(2.0f, 3.0f, 4.0f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_vec3_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::vec3) == sizeof(glm::lowp_vec3) ? 0 : 1;
+ Error += sizeof(glm::vec3) == sizeof(glm::mediump_vec3) ? 0 : 1;
+ Error += sizeof(glm::vec3) == sizeof(glm::highp_vec3) ? 0 : 1;
+ Error += 12 == sizeof(glm::mediump_vec3) ? 0 : 1;
+ Error += sizeof(glm::dvec3) == sizeof(glm::lowp_dvec3) ? 0 : 1;
+ Error += sizeof(glm::dvec3) == sizeof(glm::mediump_dvec3) ? 0 : 1;
+ Error += sizeof(glm::dvec3) == sizeof(glm::highp_dvec3) ? 0 : 1;
+ Error += 24 == sizeof(glm::highp_dvec3) ? 0 : 1;
+ Error += glm::vec3().length() == 3 ? 0 : 1;
+ Error += glm::dvec3().length() == 3 ? 0 : 1;
+ Error += glm::vec3::length() == 3 ? 0 : 1;
+ Error += glm::dvec3::length() == 3 ? 0 : 1;
+
+ GLM_CONSTEXPR std::size_t Length = glm::vec3::length();
+ Error += Length == 3 ? 0 : 1;
+
+ return Error;
+}
+
+int test_vec3_swizzle3_2()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::ivec3 v(1, 2, 3);
+ glm::ivec2 u;
+
+ // Can not assign a vec3 swizzle to a vec2
+ //u = v.xyz; //Illegal
+ //u = v.rgb; //Illegal
+ //u = v.stp; //Illegal
+
+ u = v.xx; Error += (u.x == 1 && u.y == 1) ? 0 : 1;
+ u = v.xy; Error += (u.x == 1 && u.y == 2) ? 0 : 1;
+ u = v.xz; Error += (u.x == 1 && u.y == 3) ? 0 : 1;
+ u = v.yx; Error += (u.x == 2 && u.y == 1) ? 0 : 1;
+ u = v.yy; Error += (u.x == 2 && u.y == 2) ? 0 : 1;
+ u = v.yz; Error += (u.x == 2 && u.y == 3) ? 0 : 1;
+ u = v.zx; Error += (u.x == 3 && u.y == 1) ? 0 : 1;
+ u = v.zy; Error += (u.x == 3 && u.y == 2) ? 0 : 1;
+ u = v.zz; Error += (u.x == 3 && u.y == 3) ? 0 : 1;
+
+ u = v.rr; Error += (u.r == 1 && u.g == 1) ? 0 : 1;
+ u = v.rg; Error += (u.r == 1 && u.g == 2) ? 0 : 1;
+ u = v.rb; Error += (u.r == 1 && u.g == 3) ? 0 : 1;
+ u = v.gr; Error += (u.r == 2 && u.g == 1) ? 0 : 1;
+ u = v.gg; Error += (u.r == 2 && u.g == 2) ? 0 : 1;
+ u = v.gb; Error += (u.r == 2 && u.g == 3) ? 0 : 1;
+ u = v.br; Error += (u.r == 3 && u.g == 1) ? 0 : 1;
+ u = v.bg; Error += (u.r == 3 && u.g == 2) ? 0 : 1;
+ u = v.bb; Error += (u.r == 3 && u.g == 3) ? 0 : 1;
+
+ u = v.ss; Error += (u.s == 1 && u.t == 1) ? 0 : 1;
+ u = v.st; Error += (u.s == 1 && u.t == 2) ? 0 : 1;
+ u = v.sp; Error += (u.s == 1 && u.t == 3) ? 0 : 1;
+ u = v.ts; Error += (u.s == 2 && u.t == 1) ? 0 : 1;
+ u = v.tt; Error += (u.s == 2 && u.t == 2) ? 0 : 1;
+ u = v.tp; Error += (u.s == 2 && u.t == 3) ? 0 : 1;
+ u = v.ps; Error += (u.s == 3 && u.t == 1) ? 0 : 1;
+ u = v.pt; Error += (u.s == 3 && u.t == 2) ? 0 : 1;
+ u = v.pp; Error += (u.s == 3 && u.t == 3) ? 0 : 1;
+ // Mixed member aliases are not valid
+ //u = v.rx; //Illegal
+ //u = v.sy; //Illegal
+
+ u = glm::ivec2(1, 2);
+ v = glm::ivec3(1, 2, 3);
+ //v.xx = u; //Illegal
+ v.xy = u; Error += (v.x == 1 && v.y == 2 && v.z == 3) ? 0 : 1;
+ v.xz = u; Error += (v.x == 1 && v.y == 2 && v.z == 2) ? 0 : 1;
+ v.yx = u; Error += (v.x == 2 && v.y == 1 && v.z == 2) ? 0 : 1;
+ //v.yy = u; //Illegal
+ v.yz = u; Error += (v.x == 2 && v.y == 1 && v.z == 2) ? 0 : 1;
+ v.zx = u; Error += (v.x == 2 && v.y == 1 && v.z == 1) ? 0 : 1;
+ v.zy = u; Error += (v.x == 2 && v.y == 2 && v.z == 1) ? 0 : 1;
+ //v.zz = u; //Illegal
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+ return Error;
+}
+
+int test_vec3_swizzle3_3()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::ivec3 v(1, 2, 3);
+ glm::ivec3 u;
+
+ u = v; Error += (u.x == 1 && u.y == 2 && u.z == 3) ? 0 : 1;
+
+ u = v.xyz; Error += (u.x == 1 && u.y == 2 && u.z == 3) ? 0 : 1;
+ u = v.zyx; Error += (u.x == 3 && u.y == 2 && u.z == 1) ? 0 : 1;
+ u.zyx = v; Error += (u.x == 3 && u.y == 2 && u.z == 1) ? 0 : 1;
+
+ u = v.rgb; Error += (u.x == 1 && u.y == 2 && u.z == 3) ? 0 : 1;
+ u = v.bgr; Error += (u.x == 3 && u.y == 2 && u.z == 1) ? 0 : 1;
+ u.bgr = v; Error += (u.x == 3 && u.y == 2 && u.z == 1) ? 0 : 1;
+
+ u = v.stp; Error += (u.x == 1 && u.y == 2 && u.z == 3) ? 0 : 1;
+ u = v.pts; Error += (u.x == 3 && u.y == 2 && u.z == 1) ? 0 : 1;
+ u.pts = v; Error += (u.x == 3 && u.y == 2 && u.z == 1) ? 0 : 1;
+ }
+# endif//GLM_LANG
+
+ return Error;
+}
+
+int test_vec3_swizzle_operators()
+{
+ int Error = 0;
+
+ glm::ivec3 const u = glm::ivec3(1, 2, 3);
+ glm::ivec3 const v = glm::ivec3(10, 20, 30);
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::ivec3 q;
+
+ // Swizzle, swizzle binary operators
+ q = u.xyz + v.xyz; Error += (q == (u + v)) ? 0 : 1;
+ q = (u.zyx + v.zyx).zyx; Error += (q == (u + v)) ? 0 : 1;
+ q = (u.xyz - v.xyz); Error += (q == (u - v)) ? 0 : 1;
+ q = (u.xyz * v.xyz); Error += (q == (u * v)) ? 0 : 1;
+ q = (u.xxx * v.xxx); Error += (q == glm::ivec3(u.x * v.x)) ? 0 : 1;
+ q = (u.xyz / v.xyz); Error += (q == (u / v)) ? 0 : 1;
+
+ // vec, swizzle binary operators
+ q = u + v.xyz; Error += (q == (u + v)) ? 0 : 1;
+ q = (u - v.xyz); Error += (q == (u - v)) ? 0 : 1;
+ q = (u * v.xyz); Error += (q == (u * v)) ? 0 : 1;
+ q = (u * v.xxx); Error += (q == v.x * u) ? 0 : 1;
+ q = (u / v.xyz); Error += (q == (u / v)) ? 0 : 1;
+
+ // swizzle,vec binary operators
+ q = u.xyz + v; Error += (q == (u + v)) ? 0 : 1;
+ q = (u.xyz - v); Error += (q == (u - v)) ? 0 : 1;
+ q = (u.xyz * v); Error += (q == (u * v)) ? 0 : 1;
+ q = (u.xxx * v); Error += (q == u.x * v) ? 0 : 1;
+ q = (u.xyz / v); Error += (q == (u / v)) ? 0 : 1;
+ }
+# endif//GLM_LANG
+
+ // Compile errors
+ //q = (u.yz * v.xyz);
+ //q = (u * v.xy);
+
+ return Error;
+}
+
+int test_vec3_swizzle_functions()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ {
+ // NOTE: template functions cannot pick up the implicit conversion from
+ // a swizzle to the unswizzled type, therefore the operator() must be
+ // used. E.g.:
+ //
+ // glm::dot(u.xy, v.xy); <--- Compile error
+ // glm::dot(u.xy(), v.xy()); <--- Compiles correctly
+
+ float r;
+
+ // vec2
+ glm::vec2 a(1, 2);
+ glm::vec2 b(10, 20);
+ r = glm::dot(a, b); Error += (int(r) == 50) ? 0 : 1;
+ r = glm::dot(glm::vec2(a.xy()), glm::vec2(b.xy())); Error += (int(r) == 50) ? 0 : 1;
+ r = glm::dot(glm::vec2(a.xy()), glm::vec2(b.yy())); Error += (int(r) == 60) ? 0 : 1;
+
+ // vec3
+ glm::vec3 u = glm::vec3(1, 2, 3);
+ glm::vec3 v = glm::vec3(10, 20, 30);
+ r = glm::dot(u, v); Error += (int(r) == 140) ? 0 : 1;
+ r = glm::dot(u.xyz(), v.zyz()); Error += (int(r) == 160) ? 0 : 1;
+ r = glm::dot(u, v.zyx()); Error += (int(r) == 100) ? 0 : 1;
+ r = glm::dot(u.xyz(), v); Error += (int(r) == 140) ? 0 : 1;
+ r = glm::dot(u.xy(), v.xy()); Error += (int(r) == 50) ? 0 : 1;
+
+ // vec4
+ glm::vec4 s = glm::vec4(1, 2, 3, 4);
+ glm::vec4 t = glm::vec4(10, 20, 30, 40);
+ r = glm::dot(s, t); Error += (int(r) == 300) ? 0 : 1;
+ r = glm::dot(s.xyzw(), t.xyzw()); Error += (int(r) == 300) ? 0 : 1;
+ r = glm::dot(s.xyz(), t.xyz()); Error += (int(r) == 140) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+
+ return Error;
+}
+
+int test_vec3_swizzle_partial()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::vec3 const A(1, 2, 3);
+ glm::vec3 B(A.xy, 3);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 const A(1, 2, 3);
+ glm::ivec3 const B(1, A.yz);
+ Error += A == B ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 const A(1, 2, 3);
+ glm::ivec3 const B(A.xyz);
+ Error += A == B ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+ return Error;
+}
+
+static int test_operator_increment()
+{
+ int Error = 0;
+
+ glm::ivec3 v0(1);
+ glm::ivec3 v1(v0);
+ glm::ivec3 v2(v0);
+ glm::ivec3 v3 = ++v1;
+ glm::ivec3 v4 = v2++;
+
+ Error += glm::all(glm::equal(v0, v4)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v2)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v3)) ? 0 : 1;
+
+ int i0(1);
+ int i1(i0);
+ int i2(i0);
+ int i3 = ++i1;
+ int i4 = i2++;
+
+ Error += i0 == i4 ? 0 : 1;
+ Error += i1 == i2 ? 0 : 1;
+ Error += i1 == i3 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_swizzle()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::vec3 A = glm::vec3(1.0f, 2.0f, 3.0f);
+ glm::vec3 B = A.xyz;
+ glm::vec3 C(A.xyz);
+ glm::vec3 D(A.xyz());
+ glm::vec3 E(A.x, A.yz);
+ glm::vec3 F(A.x, A.yz());
+ glm::vec3 G(A.xy, A.z);
+ glm::vec3 H(A.xy(), A.z);
+
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, D, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, E, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, F, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, G, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, H, glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ {
+ glm::vec3 A = glm::vec3(1.0f, 2.0f, 3.0f);
+ glm::vec3 B = A.xyz();
+ glm::vec3 C(A.xyz());
+ glm::vec3 D(A.xyz());
+ glm::vec3 E(A.x, A.yz());
+ glm::vec3 F(A.x, A.yz());
+ glm::vec3 G(A.xy(), A.z);
+ glm::vec3 H(A.xy(), A.z);
+
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, D, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, E, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, F, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, G, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, H, glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::vec3::length() == 3, "GLM: Failed constexpr");
+ static_assert(glm::vec3(1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec3(1.0f, -1.0f, -1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec3(1.0f, -1.0f, -1.0f).y < 0.0f, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_vec3_ctor();
+ Error += test_bvec3_ctor();
+ Error += test_vec3_operators();
+ Error += test_vec3_size();
+ Error += test_operator_increment();
+ Error += test_constexpr();
+
+ Error += test_swizzle();
+ Error += test_vec3_swizzle3_2();
+ Error += test_vec3_swizzle3_3();
+ Error += test_vec3_swizzle_partial();
+ Error += test_vec3_swizzle_operators();
+ Error += test_vec3_swizzle_functions();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/core/core_type_vec4.cpp b/3rdparty/glm/source/test/core/core_type_vec4.cpp
new file mode 100644
index 0000000..5d65259
--- /dev/null
+++ b/3rdparty/glm/source/test/core/core_type_vec4.cpp
@@ -0,0 +1,850 @@
+#define GLM_FORCE_SWIZZLE
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/vec2.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec4.hpp>
+#include <cstdio>
+#include <ctime>
+#include <vector>
+
+static glm::vec4 g1;
+static glm::vec4 g2(1);
+static glm::vec4 g3(1, 1, 1, 1);
+
+template <int Value>
+struct mask
+{
+ enum{value = Value};
+};
+
+enum comp
+{
+ X,
+ Y,
+ Z,
+ W
+};
+
+//template<comp X, comp Y, comp Z, comp W>
+//__m128 swizzle(glm::vec4 const& v)
+//{
+// __m128 Src = _mm_set_ps(v.w, v.z, v.y, v.x);
+// return _mm_shuffle_ps(Src, Src, mask<(int(W) << 6) | (int(Z) << 4) | (int(Y) << 2) | (int(X) << 0)>::value);
+//}
+
+static int test_vec4_ctor()
+{
+ int Error = 0;
+
+ {
+ glm::ivec4 A(1, 2, 3, 4);
+ glm::ivec4 B(A);
+ Error += glm::all(glm::equal(A, B)) ? 0 : 1;
+ }
+
+# if GLM_HAS_TRIVIAL_QUERIES
+ // Error += std::is_trivially_default_constructible<glm::vec4>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::vec4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::vec4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::dvec4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::ivec4>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::uvec4>::value ? 0 : 1;
+
+ Error += std::is_copy_constructible<glm::vec4>::value ? 0 : 1;
+# endif
+
+#if GLM_HAS_INITIALIZER_LISTS
+ {
+ glm::vec4 a{ 0, 1, 2, 3 };
+ std::vector<glm::vec4> v = {
+ {0, 1, 2, 3},
+ {4, 5, 6, 7},
+ {8, 9, 0, 1}};
+ }
+
+ {
+ glm::dvec4 a{ 0, 1, 2, 3 };
+ std::vector<glm::dvec4> v = {
+ {0, 1, 2, 3},
+ {4, 5, 6, 7},
+ {8, 9, 0, 1}};
+ }
+#endif
+
+ {
+ glm::ivec4 const A(1);
+ glm::ivec4 const B(1, 1, 1, 1);
+
+ Error += A == B ? 0 : 1;
+ }
+
+ {
+ std::vector<glm::ivec4> Tests;
+ Tests.push_back(glm::ivec4(glm::ivec2(1, 2), 3, 4));
+ Tests.push_back(glm::ivec4(1, glm::ivec2(2, 3), 4));
+ Tests.push_back(glm::ivec4(1, 2, glm::ivec2(3, 4)));
+ Tests.push_back(glm::ivec4(glm::ivec3(1, 2, 3), 4));
+ Tests.push_back(glm::ivec4(1, glm::ivec3(2, 3, 4)));
+ Tests.push_back(glm::ivec4(glm::ivec2(1, 2), glm::ivec2(3, 4)));
+ Tests.push_back(glm::ivec4(1, 2, 3, 4));
+ Tests.push_back(glm::ivec4(glm::ivec4(1, 2, 3, 4)));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ Error += Tests[i] == glm::ivec4(1, 2, 3, 4) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 const R(1.0f);
+ glm::vec1 const S(2.0f);
+ glm::vec1 const T(3.0f);
+ glm::vec1 const U(4.0f);
+ glm::vec4 const O(1.0f, 2.0f, 3.0f, 4.0f);
+
+ glm::vec4 const A(R);
+ glm::vec4 const B(1.0f);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const C(R, S, T, U);
+ Error += glm::all(glm::equal(C, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const D(R, 2.0f, 3.0f, 4.0f);
+ Error += glm::all(glm::equal(D, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const E(1.0f, S, 3.0f, 4.0f);
+ Error += glm::all(glm::equal(E, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const F(R, S, 3.0f, 4.0f);
+ Error += glm::all(glm::equal(F, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const G(1.0f, 2.0f, T, 4.0f);
+ Error += glm::all(glm::equal(G, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const H(R, 2.0f, T, 4.0f);
+ Error += glm::all(glm::equal(H, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const I(1.0f, S, T, 4.0f);
+ Error += glm::all(glm::equal(I, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const J(R, S, T, 4.0f);
+ Error += glm::all(glm::equal(J, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const K(R, 2.0f, 3.0f, U);
+ Error += glm::all(glm::equal(K, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const L(1.0f, S, 3.0f, U);
+ Error += glm::all(glm::equal(L, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const M(R, S, 3.0f, U);
+ Error += glm::all(glm::equal(M, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const N(1.0f, 2.0f, T, U);
+ Error += glm::all(glm::equal(N, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const P(R, 2.0f, T, U);
+ Error += glm::all(glm::equal(P, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const Q(1.0f, S, T, U);
+ Error += glm::all(glm::equal(Q, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const V(R, S, T, U);
+ Error += glm::all(glm::equal(V, O, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 const R(1.0f);
+ glm::dvec1 const S(2.0);
+ glm::vec1 const T(3.0);
+ glm::dvec1 const U(4.0);
+ glm::vec4 const O(1.0f, 2.0, 3.0f, 4.0);
+
+ glm::vec4 const A(R);
+ glm::vec4 const B(1.0);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const C(R, S, T, U);
+ Error += glm::all(glm::equal(C, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const D(R, 2.0f, 3.0, 4.0f);
+ Error += glm::all(glm::equal(D, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const E(1.0, S, 3.0f, 4.0);
+ Error += glm::all(glm::equal(E, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const F(R, S, 3.0, 4.0f);
+ Error += glm::all(glm::equal(F, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const G(1.0f, 2.0, T, 4.0);
+ Error += glm::all(glm::equal(G, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const H(R, 2.0, T, 4.0);
+ Error += glm::all(glm::equal(H, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const I(1.0, S, T, 4.0f);
+ Error += glm::all(glm::equal(I, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const J(R, S, T, 4.0f);
+ Error += glm::all(glm::equal(J, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const K(R, 2.0f, 3.0, U);
+ Error += glm::all(glm::equal(K, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const L(1.0f, S, 3.0, U);
+ Error += glm::all(glm::equal(L, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const M(R, S, 3.0, U);
+ Error += glm::all(glm::equal(M, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const N(1.0f, 2.0, T, U);
+ Error += glm::all(glm::equal(N, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const P(R, 2.0, T, U);
+ Error += glm::all(glm::equal(P, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const Q(1.0f, S, T, U);
+ Error += glm::all(glm::equal(Q, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const V(R, S, T, U);
+ Error += glm::all(glm::equal(V, O, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 const v1_0(1.0f);
+ glm::vec1 const v1_1(2.0f);
+ glm::vec1 const v1_2(3.0f);
+ glm::vec1 const v1_3(4.0f);
+
+ glm::vec2 const v2_0(1.0f, 2.0f);
+ glm::vec2 const v2_1(2.0f, 3.0f);
+ glm::vec2 const v2_2(3.0f, 4.0f);
+
+ glm::vec3 const v3_0(1.0f, 2.0f, 3.0f);
+ glm::vec3 const v3_1(2.0f, 3.0f, 4.0f);
+
+ glm::vec4 const O(1.0f, 2.0, 3.0f, 4.0);
+
+ glm::vec4 const A(v1_0, v1_1, v2_2);
+ Error += glm::all(glm::equal(A, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const B(1.0f, 2.0f, v2_2);
+ Error += glm::all(glm::equal(B, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const C(v1_0, 2.0f, v2_2);
+ Error += glm::all(glm::equal(C, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const D(1.0f, v1_1, v2_2);
+ Error += glm::all(glm::equal(D, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const E(v2_0, v1_2, v1_3);
+ Error += glm::all(glm::equal(E, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const F(v2_0, 3.0, v1_3);
+ Error += glm::all(glm::equal(F, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const G(v2_0, v1_2, 4.0);
+ Error += glm::all(glm::equal(G, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const H(v2_0, 3.0f, 4.0);
+ Error += glm::all(glm::equal(H, O, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 const v1_0(1.0f);
+ glm::vec1 const v1_1(2.0f);
+ glm::vec1 const v1_2(3.0f);
+ glm::vec1 const v1_3(4.0f);
+
+ glm::vec2 const v2(2.0f, 3.0f);
+
+ glm::vec4 const O(1.0f, 2.0, 3.0f, 4.0);
+
+ glm::vec4 const A(v1_0, v2, v1_3);
+ Error += glm::all(glm::equal(A, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const B(v1_0, v2, 4.0);
+ Error += glm::all(glm::equal(B, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const C(1.0, v2, v1_3);
+ Error += glm::all(glm::equal(C, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const D(1.0f, v2, 4.0);
+ Error += glm::all(glm::equal(D, O, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const E(1.0, v2, 4.0f);
+ Error += glm::all(glm::equal(E, O, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_bvec4_ctor()
+{
+ int Error = 0;
+
+ glm::bvec4 const A(true);
+ glm::bvec4 const B(true);
+ glm::bvec4 const C(false);
+ glm::bvec4 const D = A && B;
+ glm::bvec4 const E = A && C;
+ glm::bvec4 const F = A || C;
+
+ Error += D == glm::bvec4(true) ? 0 : 1;
+ Error += E == glm::bvec4(false) ? 0 : 1;
+ Error += F == glm::bvec4(true) ? 0 : 1;
+
+ bool const G = A == C;
+ bool const H = A != C;
+ Error += !G ? 0 : 1;
+ Error += H ? 0 : 1;
+
+ return Error;
+}
+
+static int test_operators()
+{
+ int Error = 0;
+
+ {
+ glm::ivec4 A(1);
+ glm::ivec4 B(1);
+ bool R = A != B;
+ bool S = A == B;
+
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 const A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::vec4 const B(4.0f, 5.0f, 6.0f, 7.0f);
+
+ glm::vec4 const C = A + B;
+ Error += glm::all(glm::equal(C, glm::vec4(5, 7, 9, 11), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const D = B - A;
+ Error += glm::all(glm::equal(D, glm::vec4(3, 3, 3, 3), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const E = A * B;
+ Error += glm::all(glm::equal(E, glm::vec4(4, 10, 18, 28), glm::epsilon<float>()) )? 0 : 1;
+
+ glm::vec4 const F = B / A;
+ Error += glm::all(glm::equal(F, glm::vec4(4, 2.5, 2, 7.0f / 4.0f), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const G = A + 1.0f;
+ Error += glm::all(glm::equal(G, glm::vec4(2, 3, 4, 5), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const H = B - 1.0f;
+ Error += glm::all(glm::equal(H, glm::vec4(3, 4, 5, 6), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const I = A * 2.0f;
+ Error += glm::all(glm::equal(I, glm::vec4(2, 4, 6, 8), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const J = B / 2.0f;
+ Error += glm::all(glm::equal(J, glm::vec4(2, 2.5, 3, 3.5), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const K = 1.0f + A;
+ Error += glm::all(glm::equal(K, glm::vec4(2, 3, 4, 5), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const L = 1.0f - B;
+ Error += glm::all(glm::equal(L, glm::vec4(-3, -4, -5, -6), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const M = 2.0f * A;
+ Error += glm::all(glm::equal(M, glm::vec4(2, 4, 6, 8), glm::epsilon<float>())) ? 0 : 1;
+
+ glm::vec4 const N = 2.0f / B;
+ Error += glm::all(glm::equal(N, glm::vec4(0.5, 2.0 / 5.0, 2.0 / 6.0, 2.0 / 7.0), glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B(4.0f, 5.0f, 6.0f, 7.0f);
+
+ A += B;
+ Error += A == glm::ivec4(5, 7, 9, 11) ? 0 : 1;
+
+ A += 1;
+ Error += A == glm::ivec4(6, 8, 10, 12) ? 0 : 1;
+ }
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B(4.0f, 5.0f, 6.0f, 7.0f);
+
+ B -= A;
+ Error += B == glm::ivec4(3, 3, 3, 3) ? 0 : 1;
+
+ B -= 1;
+ Error += B == glm::ivec4(2, 2, 2, 2) ? 0 : 1;
+ }
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B(4.0f, 5.0f, 6.0f, 7.0f);
+
+ A *= B;
+ Error += A == glm::ivec4(4, 10, 18, 28) ? 0 : 1;
+
+ A *= 2;
+ Error += A == glm::ivec4(8, 20, 36, 56) ? 0 : 1;
+ }
+ {
+ glm::ivec4 A(1.0f, 2.0f, 2.0f, 4.0f);
+ glm::ivec4 B(4.0f, 4.0f, 8.0f, 8.0f);
+
+ B /= A;
+ Error += B == glm::ivec4(4, 2, 4, 2) ? 0 : 1;
+
+ B /= 2;
+ Error += B == glm::ivec4(2, 1, 2, 1) ? 0 : 1;
+ }
+ {
+ glm::ivec4 B(2);
+
+ B /= B.y;
+ Error += B == glm::ivec4(1) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = -A;
+ Error += B == glm::ivec4(-1.0f, -2.0f, -3.0f, -4.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = --A;
+ Error += B == glm::ivec4(0.0f, 1.0f, 2.0f, 3.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = A--;
+ Error += B == glm::ivec4(1.0f, 2.0f, 3.0f, 4.0f) ? 0 : 1;
+ Error += A == glm::ivec4(0.0f, 1.0f, 2.0f, 3.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = ++A;
+ Error += B == glm::ivec4(2.0f, 3.0f, 4.0f, 5.0f) ? 0 : 1;
+ }
+
+ {
+ glm::ivec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = A++;
+ Error += B == glm::ivec4(1.0f, 2.0f, 3.0f, 4.0f) ? 0 : 1;
+ Error += A == glm::ivec4(2.0f, 3.0f, 4.0f, 5.0f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_equal()
+{
+ int Error = 0;
+
+ {
+ glm::uvec4 const A(1, 2, 3, 4);
+ glm::uvec4 const B(1, 2, 3, 4);
+ Error += A == B ? 0 : 1;
+ Error += A != B ? 1 : 0;
+ }
+
+ {
+ glm::ivec4 const A(1, 2, 3, 4);
+ glm::ivec4 const B(1, 2, 3, 4);
+ Error += A == B ? 0 : 1;
+ Error += A != B ? 1 : 0;
+ }
+
+ return Error;
+}
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::vec4) == sizeof(glm::lowp_vec4) ? 0 : 1;
+ Error += sizeof(glm::vec4) == sizeof(glm::mediump_vec4) ? 0 : 1;
+ Error += sizeof(glm::vec4) == sizeof(glm::highp_vec4) ? 0 : 1;
+ Error += 16 == sizeof(glm::mediump_vec4) ? 0 : 1;
+ Error += sizeof(glm::dvec4) == sizeof(glm::lowp_dvec4) ? 0 : 1;
+ Error += sizeof(glm::dvec4) == sizeof(glm::mediump_dvec4) ? 0 : 1;
+ Error += sizeof(glm::dvec4) == sizeof(glm::highp_dvec4) ? 0 : 1;
+ Error += 32 == sizeof(glm::highp_dvec4) ? 0 : 1;
+ Error += glm::vec4().length() == 4 ? 0 : 1;
+ Error += glm::dvec4().length() == 4 ? 0 : 1;
+ Error += glm::vec4::length() == 4 ? 0 : 1;
+ Error += glm::dvec4::length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_swizzle_partial()
+{
+ int Error = 0;
+
+ glm::vec4 const A(1, 2, 3, 4);
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::vec4 B(A.xy, A.zw);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ }
+ {
+ glm::vec4 B(A.xy, 3.0f, 4.0f);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ }
+ {
+ glm::vec4 B(1.0f, A.yz, 4.0f);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ }
+ {
+ glm::vec4 B(1.0f, 2.0f, A.zw);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 B(A.xyz, 4.0f);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ }
+ {
+ glm::vec4 B(1.0f, A.yzw);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+
+ return Error;
+}
+
+static int test_swizzle()
+{
+ int Error = 0;
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+ {
+ glm::ivec4 A = glm::ivec4(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::ivec4 B = A.xyzw;
+ glm::ivec4 C(A.xyzw);
+ glm::ivec4 D(A.xyzw());
+ glm::ivec4 E(A.x, A.yzw);
+ glm::ivec4 F(A.x, A.yzw());
+ glm::ivec4 G(A.xyz, A.w);
+ glm::ivec4 H(A.xyz(), A.w);
+ glm::ivec4 I(A.xy, A.zw);
+ glm::ivec4 J(A.xy(), A.zw());
+ glm::ivec4 K(A.x, A.y, A.zw);
+ glm::ivec4 L(A.x, A.yz, A.w);
+ glm::ivec4 M(A.xy, A.z, A.w);
+
+ Error += glm::all(glm::equal(A, B)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, D)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, E)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, F)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, G)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, H)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, I)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, J)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, K)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, L)) ? 0 : 1;
+ Error += glm::all(glm::equal(A, M)) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
+
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+ {
+ glm::vec4 A = glm::vec4(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::vec4 B = A.xyzw();
+ glm::vec4 C(A.xyzw());
+ glm::vec4 D(A.xyzw());
+ glm::vec4 E(A.x, A.yzw());
+ glm::vec4 F(A.x, A.yzw());
+ glm::vec4 G(A.xyz(), A.w);
+ glm::vec4 H(A.xyz(), A.w);
+ glm::vec4 I(A.xy(), A.zw());
+ glm::vec4 J(A.xy(), A.zw());
+ glm::vec4 K(A.x, A.y, A.zw());
+ glm::vec4 L(A.x, A.yz(), A.w);
+ glm::vec4 M(A.xy(), A.z, A.w);
+
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, C, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, D, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, E, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, F, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, G, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, H, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, I, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, J, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, K, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, L, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(A, M, glm::epsilon<float>())) ? 0 : 1;
+ }
+# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR || GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION
+
+ return Error;
+}
+
+static int test_operator_increment()
+{
+ int Error = 0;
+
+ glm::ivec4 v0(1);
+ glm::ivec4 v1(v0);
+ glm::ivec4 v2(v0);
+ glm::ivec4 v3 = ++v1;
+ glm::ivec4 v4 = v2++;
+
+ Error += glm::all(glm::equal(v0, v4)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v2)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v3)) ? 0 : 1;
+
+ int i0(1);
+ int i1(i0);
+ int i2(i0);
+ int i3 = ++i1;
+ int i4 = i2++;
+
+ Error += i0 == i4 ? 0 : 1;
+ Error += i1 == i2 ? 0 : 1;
+ Error += i1 == i3 ? 0 : 1;
+
+ return Error;
+}
+
+struct AoS
+{
+ glm::vec4 A;
+ glm::vec3 B;
+ glm::vec3 C;
+ glm::vec2 D;
+};
+
+static int test_perf_AoS(std::size_t Size)
+{
+ int Error = 0;
+
+ std::vector<AoS> In;
+ std::vector<AoS> Out;
+ In.resize(Size);
+ Out.resize(Size);
+
+ std::clock_t StartTime = std::clock();
+
+ for(std::size_t i = 0; i < In.size(); ++i)
+ Out[i] = In[i];
+
+ std::clock_t EndTime = std::clock();
+
+ std::printf("AoS: %d\n", static_cast<int>(EndTime - StartTime));
+
+ return Error;
+}
+
+static int test_perf_SoA(std::size_t Size)
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> InA;
+ std::vector<glm::vec3> InB;
+ std::vector<glm::vec3> InC;
+ std::vector<glm::vec2> InD;
+ std::vector<glm::vec4> OutA;
+ std::vector<glm::vec3> OutB;
+ std::vector<glm::vec3> OutC;
+ std::vector<glm::vec2> OutD;
+
+ InA.resize(Size);
+ InB.resize(Size);
+ InC.resize(Size);
+ InD.resize(Size);
+ OutA.resize(Size);
+ OutB.resize(Size);
+ OutC.resize(Size);
+ OutD.resize(Size);
+
+ std::clock_t StartTime = std::clock();
+
+ for(std::size_t i = 0; i < InA.size(); ++i)
+ {
+ OutA[i] = InA[i];
+ OutB[i] = InB[i];
+ OutC[i] = InC[i];
+ OutD[i] = InD[i];
+ }
+
+ std::clock_t EndTime = std::clock();
+
+ std::printf("SoA: %d\n", static_cast<int>(EndTime - StartTime));
+
+ return Error;
+}
+
+namespace heap
+{
+ struct A
+ {
+ float f;
+ };
+
+ struct B : public A
+ {
+ float g;
+ glm::vec4 v;
+ };
+
+ static int test()
+ {
+ int Error = 0;
+
+ A* p = new B;
+ p->f = 0.0f;
+ delete p;
+
+ Error += sizeof(B) == sizeof(glm::vec4) + sizeof(float) * 2 ? 0 : 1;
+
+ return Error;
+ }
+}//namespace heap
+
+static int test_simd()
+{
+ int Error = 0;
+
+ glm::vec4 const a(std::clock(), std::clock(), std::clock(), std::clock());
+ glm::vec4 const b(std::clock(), std::clock(), std::clock(), std::clock());
+
+ glm::vec4 const c(b * a);
+ glm::vec4 const d(a + c);
+
+ Error += glm::all(glm::greaterThanEqual(d, glm::vec4(0))) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_inheritance()
+{
+ struct my_vec4 : public glm::vec4
+ {
+ my_vec4()
+ : glm::vec4(76.f, 75.f, 74.f, 73.f)
+ , member(82)
+ {}
+
+ int member;
+ };
+
+ int Error = 0;
+
+ my_vec4 v;
+
+ Error += v.member == 82 ? 0 : 1;
+ Error += glm::equal(v.x, 76.f, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(v.y, 75.f, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(v.z, 74.f, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(v.w, 73.f, glm::epsilon<float>()) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::vec4::length() == 4, "GLM: Failed constexpr");
+ static_assert(glm::vec4(1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec4(1.0f, -1.0f, -1.0f, -1.0f).x > 0.0f, "GLM: Failed constexpr");
+ static_assert(glm::vec4(1.0f, -1.0f, -1.0f, -1.0f).y < 0.0f, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+/*
+static int test_simd_gen()
+{
+ int Error = 0;
+
+ int const C = static_cast<int>(std::clock());
+ int const D = static_cast<int>(std::clock());
+
+ glm::ivec4 const A(C);
+ glm::ivec4 const B(D);
+
+ Error += A != B ? 0 : 1;
+
+ return Error;
+}
+*/
+int main()
+{
+ int Error = 0;
+
+ //Error += test_simd_gen();
+
+/*
+ {
+ glm::ivec4 const a1(2);
+ glm::ivec4 const b1 = a1 >> 1;
+
+ __m128i const e1 = _mm_set1_epi32(2);
+ __m128i const f1 = _mm_srli_epi32(e1, 1);
+
+ glm::ivec4 const g1 = *reinterpret_cast<glm::ivec4 const* const>(&f1);
+
+ glm::ivec4 const a2(-2);
+ glm::ivec4 const b2 = a2 >> 1;
+
+ __m128i const e2 = _mm_set1_epi32(-1);
+ __m128i const f2 = _mm_srli_epi32(e2, 1);
+
+ glm::ivec4 const g2 = *reinterpret_cast<glm::ivec4 const* const>(&f2);
+
+ std::printf("GNI\n");
+ }
+
+ {
+ glm::uvec4 const a1(2);
+ glm::uvec4 const b1 = a1 >> 1u;
+
+ __m128i const e1 = _mm_set1_epi32(2);
+ __m128i const f1 = _mm_srli_epi32(e1, 1);
+
+ glm::uvec4 const g1 = *reinterpret_cast<glm::uvec4 const* const>(&f1);
+
+ glm::uvec4 const a2(-1);
+ glm::uvec4 const b2 = a2 >> 1u;
+
+ __m128i const e2 = _mm_set1_epi32(-1);
+ __m128i const f2 = _mm_srli_epi32(e2, 1);
+
+ glm::uvec4 const g2 = *reinterpret_cast<glm::uvec4 const* const>(&f2);
+
+ std::printf("GNI\n");
+ }
+*/
+
+# ifdef NDEBUG
+ std::size_t const Size(1000000);
+# else
+ std::size_t const Size(1);
+# endif//NDEBUG
+
+ Error += test_perf_AoS(Size);
+ Error += test_perf_SoA(Size);
+
+ Error += test_vec4_ctor();
+ Error += test_bvec4_ctor();
+ Error += test_size();
+ Error += test_operators();
+ Error += test_equal();
+ Error += test_swizzle();
+ Error += test_swizzle_partial();
+ Error += test_simd();
+ Error += test_operator_increment();
+ Error += heap::test();
+ Error += test_inheritance();
+ Error += test_constexpr();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/ext/CMakeLists.txt b/3rdparty/glm/source/test/ext/CMakeLists.txt
new file mode 100644
index 0000000..40c91ba
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/CMakeLists.txt
@@ -0,0 +1,55 @@
+glmCreateTestGTC(ext_matrix_relational)
+glmCreateTestGTC(ext_matrix_transform)
+glmCreateTestGTC(ext_matrix_common)
+glmCreateTestGTC(ext_matrix_integer)
+glmCreateTestGTC(ext_matrix_int2x2_sized)
+glmCreateTestGTC(ext_matrix_int2x3_sized)
+glmCreateTestGTC(ext_matrix_int2x4_sized)
+glmCreateTestGTC(ext_matrix_int3x2_sized)
+glmCreateTestGTC(ext_matrix_int3x3_sized)
+glmCreateTestGTC(ext_matrix_int3x4_sized)
+glmCreateTestGTC(ext_matrix_int4x2_sized)
+glmCreateTestGTC(ext_matrix_int4x3_sized)
+glmCreateTestGTC(ext_matrix_int4x4_sized)
+glmCreateTestGTC(ext_matrix_uint2x2_sized)
+glmCreateTestGTC(ext_matrix_uint2x3_sized)
+glmCreateTestGTC(ext_matrix_uint2x4_sized)
+glmCreateTestGTC(ext_matrix_uint3x2_sized)
+glmCreateTestGTC(ext_matrix_uint3x3_sized)
+glmCreateTestGTC(ext_matrix_uint3x4_sized)
+glmCreateTestGTC(ext_matrix_uint4x2_sized)
+glmCreateTestGTC(ext_matrix_uint4x3_sized)
+glmCreateTestGTC(ext_matrix_uint4x4_sized)
+glmCreateTestGTC(ext_quaternion_common)
+glmCreateTestGTC(ext_quaternion_exponential)
+glmCreateTestGTC(ext_quaternion_geometric)
+glmCreateTestGTC(ext_quaternion_relational)
+glmCreateTestGTC(ext_quaternion_transform)
+glmCreateTestGTC(ext_quaternion_trigonometric)
+glmCreateTestGTC(ext_quaternion_type)
+glmCreateTestGTC(ext_scalar_common)
+glmCreateTestGTC(ext_scalar_constants)
+glmCreateTestGTC(ext_scalar_int_sized)
+glmCreateTestGTC(ext_scalar_uint_sized)
+glmCreateTestGTC(ext_scalar_integer)
+glmCreateTestGTC(ext_scalar_ulp)
+glmCreateTestGTC(ext_scalar_reciprocal)
+glmCreateTestGTC(ext_scalar_relational)
+glmCreateTestGTC(ext_vec1)
+glmCreateTestGTC(ext_vector_bool1)
+glmCreateTestGTC(ext_vector_common)
+glmCreateTestGTC(ext_vector_iec559)
+glmCreateTestGTC(ext_vector_int1_sized)
+glmCreateTestGTC(ext_vector_int2_sized)
+glmCreateTestGTC(ext_vector_int3_sized)
+glmCreateTestGTC(ext_vector_int4_sized)
+glmCreateTestGTC(ext_vector_integer)
+glmCreateTestGTC(ext_vector_integer_sized)
+glmCreateTestGTC(ext_vector_uint1_sized)
+glmCreateTestGTC(ext_vector_uint2_sized)
+glmCreateTestGTC(ext_vector_uint3_sized)
+glmCreateTestGTC(ext_vector_uint4_sized)
+glmCreateTestGTC(ext_vector_reciprocal)
+glmCreateTestGTC(ext_vector_relational)
+glmCreateTestGTC(ext_vector_ulp)
+
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_clip_space.cpp b/3rdparty/glm/source/test/ext/ext_matrix_clip_space.cpp
new file mode 100644
index 0000000..ca84c19
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_clip_space.cpp
@@ -0,0 +1,13 @@
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/matrix_clip_space.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/vector_float3.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_common.cpp b/3rdparty/glm/source/test/ext/ext_matrix_common.cpp
new file mode 100644
index 0000000..df0c3fe
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_common.cpp
@@ -0,0 +1,53 @@
+#include <glm/ext/matrix_common.hpp>
+#include <glm/ext/matrix_double4x4.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_bool4.hpp>
+
+static int test_mix()
+{
+ int Error = 0;
+
+ {
+ glm::mat4 A(2);
+ glm::mat4 B(4);
+ glm::mat4 C = glm::mix(A, B, 0.5f);
+ glm::bvec4 const D = glm::equal(C, glm::mat4(3), 1);
+ Error += glm::all(D) ? 0 : 1;
+ }
+
+ {
+ glm::mat4 A(2);
+ glm::mat4 B(4);
+ glm::mat4 C = glm::mix(A, B, 0.5);
+ glm::bvec4 const D = glm::equal(C, glm::mat4(3), 1);
+ Error += glm::all(D) ? 0 : 1;
+ }
+
+ {
+ glm::dmat4 A(2);
+ glm::dmat4 B(4);
+ glm::dmat4 C = glm::mix(A, B, 0.5);
+ glm::bvec4 const D = glm::equal(C, glm::dmat4(3), 1);
+ Error += glm::all(D) ? 0 : 1;
+ }
+
+ {
+ glm::dmat4 A(2);
+ glm::dmat4 B(4);
+ glm::dmat4 C = glm::mix(A, B, 0.5f);
+ glm::bvec4 const D = glm::equal(C, glm::dmat4(3), 1);
+ Error += glm::all(D) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_mix();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int2x2_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int2x2_sized.cpp
new file mode 100644
index 0000000..93b6e86
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int2x2_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int2x2_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+ static_assert(sizeof(glm::i8mat2x2) == 4, "int8 size isn't 1 byte on this platform");
+ static_assert(sizeof(glm::i16mat2x2) == 8, "int16 size isn't 2 bytes on this platform");
+ static_assert(sizeof(glm::i32mat2x2) == 16, "int32 size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::i64mat2x2) == 32, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat2x2) < sizeof(glm::i16mat2x2) ? 0 : 1;
+ Error += sizeof(glm::i16mat2x2) < sizeof(glm::i32mat2x2) ? 0 : 1;
+ Error += sizeof(glm::i32mat2x2) < sizeof(glm::i64mat2x2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int2x3_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int2x3_sized.cpp
new file mode 100644
index 0000000..058f57b
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int2x3_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int2x3_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8mat2x3) == 6, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16mat2x3) == 12, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32mat2x3) == 24, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64mat2x3) == 48, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat2x3) < sizeof(glm::i16mat2x3) ? 0 : 1;
+ Error += sizeof(glm::i16mat2x3) < sizeof(glm::i32mat2x3) ? 0 : 1;
+ Error += sizeof(glm::i32mat2x3) < sizeof(glm::i64mat2x3) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int2x4_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int2x4_sized.cpp
new file mode 100644
index 0000000..c20198d
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int2x4_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int2x4_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8mat2x4) == 8, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16mat2x4) == 16, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32mat2x4) == 32, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64mat2x4) == 64, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat2x4) < sizeof(glm::i16mat2x4) ? 0 : 1;
+ Error += sizeof(glm::i16mat2x4) < sizeof(glm::i32mat2x4) ? 0 : 1;
+ Error += sizeof(glm::i32mat2x4) < sizeof(glm::i64mat2x4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int3x2_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int3x2_sized.cpp
new file mode 100644
index 0000000..1d14029
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int3x2_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int3x2_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8mat3x2) == 6, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16mat3x2) == 12, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32mat3x2) == 24, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64mat3x2) == 48, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat3x2) < sizeof(glm::i16mat3x2) ? 0 : 1;
+ Error += sizeof(glm::i16mat3x2) < sizeof(glm::i32mat3x2) ? 0 : 1;
+ Error += sizeof(glm::i32mat3x2) < sizeof(glm::i64mat3x2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int3x3_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int3x3_sized.cpp
new file mode 100644
index 0000000..d82836c
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int3x3_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int3x3_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8mat3x3) == 9, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16mat3x3) == 18, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32mat3x3) == 36, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64mat3x3) == 72, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat3x3) < sizeof(glm::i16mat3x3) ? 0 : 1;
+ Error += sizeof(glm::i16mat3x3) < sizeof(glm::i32mat3x3) ? 0 : 1;
+ Error += sizeof(glm::i32mat3x3) < sizeof(glm::i64mat3x3) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int3x4_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int3x4_sized.cpp
new file mode 100644
index 0000000..52b7d52
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int3x4_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int3x4_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8mat3x4) == 12, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16mat3x4) == 24, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32mat3x4) == 48, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64mat3x4) == 96, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat3x4) < sizeof(glm::i16mat3x4) ? 0 : 1;
+ Error += sizeof(glm::i16mat3x4) < sizeof(glm::i32mat3x4) ? 0 : 1;
+ Error += sizeof(glm::i32mat3x4) < sizeof(glm::i64mat3x4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int4x2_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int4x2_sized.cpp
new file mode 100644
index 0000000..3c4566f
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int4x2_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int4x2_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8mat4x2) == 8, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16mat4x2) == 16, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32mat4x2) == 32, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64mat4x2) == 64, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat4x2) < sizeof(glm::i16mat4x2) ? 0 : 1;
+ Error += sizeof(glm::i16mat4x2) < sizeof(glm::i32mat4x2) ? 0 : 1;
+ Error += sizeof(glm::i32mat4x2) < sizeof(glm::i64mat4x2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int4x3_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int4x3_sized.cpp
new file mode 100644
index 0000000..fb882af
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int4x3_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int4x3_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8mat4x3) == 12, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16mat4x3) == 24, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32mat4x3) == 48, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64mat4x3) == 96, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat4x3) < sizeof(glm::i16mat4x3) ? 0 : 1;
+ Error += sizeof(glm::i16mat4x3) < sizeof(glm::i32mat4x3) ? 0 : 1;
+ Error += sizeof(glm::i32mat4x3) < sizeof(glm::i64mat4x3) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_int4x4_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_int4x4_sized.cpp
new file mode 100644
index 0000000..02769ea
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_int4x4_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_int4x4_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8mat4x4) == 16, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16mat4x4) == 32, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32mat4x4) == 64, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64mat4x4) == 128, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8mat4x4) < sizeof(glm::i16mat4x4) ? 0 : 1;
+ Error += sizeof(glm::i16mat4x4) < sizeof(glm::i32mat4x4) ? 0 : 1;
+ Error += sizeof(glm::i32mat4x4) < sizeof(glm::i64mat4x4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_integer.cpp b/3rdparty/glm/source/test/ext/ext_matrix_integer.cpp
new file mode 100644
index 0000000..c26d557
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_integer.cpp
@@ -0,0 +1,237 @@
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/matrix_integer.hpp>
+#include <glm/ext/matrix_int2x2.hpp>
+#include <glm/ext/matrix_int2x3.hpp>
+#include <glm/ext/matrix_int2x4.hpp>
+#include <glm/ext/matrix_int3x2.hpp>
+#include <glm/ext/matrix_int3x3.hpp>
+#include <glm/ext/matrix_int3x4.hpp>
+#include <glm/ext/matrix_int4x2.hpp>
+#include <glm/ext/matrix_int4x3.hpp>
+#include <glm/ext/matrix_int4x4.hpp>
+
+using namespace glm;
+
+int test_matrixCompMult()
+{
+ int Error = 0;
+
+ {
+ imat2 m(0, 1, 2, 3);
+ imat2 n = matrixCompMult(m, m);
+ imat2 expected = imat2(0, 1, 4, 9);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ {
+ imat2x3 m(0, 1, 2, 3, 4, 5);
+ imat2x3 n = matrixCompMult(m, m);
+ imat2x3 expected = imat2x3(0, 1, 4, 9, 16, 25);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ {
+ imat2x4 m(0, 1, 2, 3, 4, 5, 6, 7);
+ imat2x4 n = matrixCompMult(m, m);
+ imat2x4 expected = imat2x4(0, 1, 4, 9, 16, 25, 36, 49);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ {
+ imat3 m(0, 1, 2, 3, 4, 5, 6, 7, 8);
+ imat3 n = matrixCompMult(m, m);
+ imat3 expected = imat3(0, 1, 4, 9, 16, 25, 36, 49, 64);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ {
+ imat3x2 m(0, 1, 2, 3, 4, 5);
+ imat3x2 n = matrixCompMult(m, m);
+ imat3x2 expected = imat3x2(0, 1, 4, 9, 16, 25);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ {
+ imat3x4 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+ imat3x4 n = matrixCompMult(m, m);
+ imat3x4 expected = imat3x4(0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ {
+ imat4 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ imat4 n = matrixCompMult(m, m);
+ imat4 expected = imat4(0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ {
+ imat4x2 m(0, 1, 2, 3, 4, 5, 6, 7);
+ imat4x2 n = matrixCompMult(m, m);
+ imat4x2 expected = imat4x2(0, 1, 4, 9, 16, 25, 36, 49);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ {
+ imat4x3 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+ imat4x3 n = matrixCompMult(m, m);
+ imat4x3 expected = imat4x3(0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121);
+ Error += all(equal(n, expected)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_outerProduct()
+{
+ int Error = 0;
+
+ {
+ glm::imat2x2 const m = glm::outerProduct(glm::ivec2(1), glm::ivec2(1));
+ Error += all(equal(m, glm::imat2x2(1, 1, 1, 1))) ? 0 : 1;
+ }
+ {
+ glm::imat2x3 const m = glm::outerProduct(glm::ivec3(1), glm::ivec2(1));
+ Error += all(equal(m, glm::imat2x3(1, 1, 1, 1, 1, 1))) ? 0 : 1;
+ }
+ {
+ glm::imat2x4 const m = glm::outerProduct(glm::ivec4(1), glm::ivec2(1));
+ Error += all(equal(m, glm::imat2x4(1, 1, 1, 1, 1, 1, 1, 1))) ? 0 : 1;
+ }
+
+ {
+ glm::imat3x2 const m = glm::outerProduct(glm::ivec2(1), glm::ivec3(1));
+ Error += all(equal(m, glm::imat3x2(1, 1, 1, 1, 1, 1))) ? 0 : 1;
+ }
+ {
+ glm::imat3x3 const m = glm::outerProduct(glm::ivec3(1), glm::ivec3(1));
+ Error += all(equal(m, glm::imat3x3(1, 1, 1, 1, 1, 1, 1, 1, 1))) ? 0 : 1;
+ }
+ {
+ glm::imat3x4 const m = glm::outerProduct(glm::ivec4(1), glm::ivec3(1));
+ Error += all(equal(m, glm::imat3x4(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))) ? 0 : 1;
+ }
+
+
+ {
+ glm::imat4x2 const m = glm::outerProduct(glm::ivec2(1), glm::ivec4(1));
+ Error += all(equal(m, glm::imat4x2(1, 1, 1, 1, 1, 1, 1, 1))) ? 0 : 1;
+ }
+ {
+ glm::imat4x3 const m = glm::outerProduct(glm::ivec3(1), glm::ivec4(1));
+ Error += all(equal(m, glm::imat4x3(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))) ? 0 : 1;
+ }
+ {
+ glm::imat4x4 const m = glm::outerProduct(glm::ivec4(1), glm::ivec4(1));
+ Error += all(equal(m, glm::imat4x4(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_transpose()
+{
+ int Error = 0;
+
+ {
+ imat2 const m(0, 1, 2, 3);
+ imat2 const t = transpose(m);
+ imat2 const expected = imat2(0, 2, 1, 3);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ {
+ imat2x3 m(0, 1, 2, 3, 4, 5);
+ imat3x2 t = transpose(m);
+ imat3x2 const expected = imat3x2(0, 3, 1, 4, 2, 5);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ {
+ imat2x4 m(0, 1, 2, 3, 4, 5, 6, 7);
+ imat4x2 t = transpose(m);
+ imat4x2 const expected = imat4x2(0, 4, 1, 5, 2, 6, 3, 7);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ {
+ imat3 m(0, 1, 2, 3, 4, 5, 6, 7, 8);
+ imat3 t = transpose(m);
+ imat3 const expected = imat3(0, 3, 6, 1, 4, 7, 2, 5, 8);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ {
+ imat3x2 m(0, 1, 2, 3, 4, 5);
+ imat2x3 t = transpose(m);
+ imat2x3 const expected = imat2x3(0, 2, 4, 1, 3, 5);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ {
+ imat3x4 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+ imat4x3 t = transpose(m);
+ imat4x3 const expected = imat4x3(0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ {
+ imat4 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ imat4 t = transpose(m);
+ imat4 const expected = imat4(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ {
+ imat4x2 m(0, 1, 2, 3, 4, 5, 6, 7);
+ imat2x4 t = transpose(m);
+ imat2x4 const expected = imat2x4(0, 2, 4, 6, 1, 3, 5, 7);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ {
+ imat4x3 m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+ imat3x4 t = transpose(m);
+ imat3x4 const expected = imat3x4(0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11);
+ Error += all(equal(t, expected)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_determinant()
+{
+ int Error = 0;
+
+ {
+ imat2 const m(1, 1, 1, 1);
+ int const t = determinant(m);
+ Error += t == 0 ? 0 : 1;
+ }
+
+ {
+ imat3 m(1, 1, 1, 1, 1, 1, 1, 1, 1);
+ int t = determinant(m);
+ Error += t == 0 ? 0 : 1;
+ }
+
+ {
+ imat4 m(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
+ int t = determinant(m);
+ Error += t == 0 ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_matrixCompMult();
+ Error += test_outerProduct();
+ Error += test_transpose();
+ Error += test_determinant();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_projection.cpp b/3rdparty/glm/source/test/ext/ext_matrix_projection.cpp
new file mode 100644
index 0000000..88f6ae9
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_projection.cpp
@@ -0,0 +1,13 @@
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/matrix_projection.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/vector_float3.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_relational.cpp b/3rdparty/glm/source/test/ext/ext_matrix_relational.cpp
new file mode 100644
index 0000000..64c0dae
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_relational.cpp
@@ -0,0 +1,163 @@
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/matrix_double2x2.hpp>
+#include <glm/ext/matrix_double2x3.hpp>
+#include <glm/ext/matrix_double2x4.hpp>
+#include <glm/ext/matrix_double3x2.hpp>
+#include <glm/ext/matrix_double3x3.hpp>
+#include <glm/ext/matrix_double3x4.hpp>
+#include <glm/ext/matrix_double4x2.hpp>
+#include <glm/ext/matrix_double4x3.hpp>
+#include <glm/ext/matrix_double4x4.hpp>
+#include <glm/ext/vector_double2.hpp>
+#include <glm/ext/vector_double3.hpp>
+#include <glm/ext/vector_double4.hpp>
+#include <glm/ext/matrix_float2x2.hpp>
+#include <glm/ext/matrix_float2x3.hpp>
+#include <glm/ext/matrix_float2x4.hpp>
+#include <glm/ext/matrix_float3x2.hpp>
+#include <glm/ext/matrix_float3x3.hpp>
+#include <glm/ext/matrix_float3x4.hpp>
+#include <glm/ext/matrix_float4x2.hpp>
+#include <glm/ext/matrix_float4x3.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/vector_float2.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/scalar_ulp.hpp>
+
+template <typename matType, typename vecType>
+static int test_equal()
+{
+ typedef typename matType::value_type valType;
+
+ valType const Epsilon = static_cast<valType>(0.001f);
+ valType const One = static_cast<valType>(1);
+ valType const Two = static_cast<valType>(2);
+
+ int Error = 0;
+
+ Error += glm::all(glm::equal(matType(One), matType(One), Epsilon)) ? 0 : 1;
+ Error += glm::all(glm::equal(matType(One), matType(Two), vecType(Epsilon))) ? 1 : 0;
+
+ return Error;
+}
+
+template <typename matType, typename vecType>
+static int test_notEqual()
+{
+ typedef typename matType::value_type valType;
+
+ valType const Epsilon = static_cast<valType>(0.001f);
+ valType const One = static_cast<valType>(1);
+ valType const Two = static_cast<valType>(2);
+
+ int Error = 0;
+
+ Error += !glm::any(glm::notEqual(matType(One), matType(One), Epsilon)) ? 0 : 1;
+ Error += !glm::any(glm::notEqual(matType(One), matType(Two), vecType(Epsilon))) ? 1 : 0;
+
+ return Error;
+}
+
+
+template <typename T>
+static int test_equal_ulps()
+{
+ typedef glm::mat<4, 4, T, glm::defaultp> mat4;
+
+ T const One(1);
+ mat4 const Ones(1);
+
+ int Error = 0;
+
+ T const ULP1Plus = glm::nextFloat(One);
+ Error += glm::all(glm::equal(Ones, mat4(ULP1Plus), 1)) ? 0 : 1;
+
+ T const ULP2Plus = glm::nextFloat(ULP1Plus);
+ Error += !glm::all(glm::equal(Ones, mat4(ULP2Plus), 1)) ? 0 : 1;
+
+ T const ULP1Minus = glm::prevFloat(One);
+ Error += glm::all(glm::equal(Ones, mat4(ULP1Minus), 1)) ? 0 : 1;
+
+ T const ULP2Minus = glm::prevFloat(ULP1Minus);
+ Error += !glm::all(glm::equal(Ones, mat4(ULP2Minus), 1)) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename T>
+static int test_notEqual_ulps()
+{
+ typedef glm::mat<4, 4, T, glm::defaultp> mat4;
+
+ T const One(1);
+ mat4 const Ones(1);
+
+ int Error = 0;
+
+ T const ULP1Plus = glm::nextFloat(One);
+ Error += !glm::all(glm::notEqual(Ones, mat4(ULP1Plus), 1)) ? 0 : 1;
+
+ T const ULP2Plus = glm::nextFloat(ULP1Plus);
+ Error += glm::all(glm::notEqual(Ones, mat4(ULP2Plus), 1)) ? 0 : 1;
+
+ T const ULP1Minus = glm::prevFloat(One);
+ Error += !glm::all(glm::notEqual(Ones, mat4(ULP1Minus), 1)) ? 0 : 1;
+
+ T const ULP2Minus = glm::prevFloat(ULP1Minus);
+ Error += glm::all(glm::notEqual(Ones, mat4(ULP2Minus), 1)) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_equal_ulps<float>();
+ Error += test_equal_ulps<double>();
+ Error += test_notEqual_ulps<float>();
+ Error += test_notEqual_ulps<double>();
+
+ Error += test_equal<glm::mat2x2, glm::vec2>();
+ Error += test_equal<glm::mat2x3, glm::vec2>();
+ Error += test_equal<glm::mat2x4, glm::vec2>();
+ Error += test_equal<glm::mat3x2, glm::vec3>();
+ Error += test_equal<glm::mat3x3, glm::vec3>();
+ Error += test_equal<glm::mat3x4, glm::vec3>();
+ Error += test_equal<glm::mat4x2, glm::vec4>();
+ Error += test_equal<glm::mat4x3, glm::vec4>();
+ Error += test_equal<glm::mat4x4, glm::vec4>();
+
+ Error += test_equal<glm::dmat2x2, glm::dvec2>();
+ Error += test_equal<glm::dmat2x3, glm::dvec2>();
+ Error += test_equal<glm::dmat2x4, glm::dvec2>();
+ Error += test_equal<glm::dmat3x2, glm::dvec3>();
+ Error += test_equal<glm::dmat3x3, glm::dvec3>();
+ Error += test_equal<glm::dmat3x4, glm::dvec3>();
+ Error += test_equal<glm::dmat4x2, glm::dvec4>();
+ Error += test_equal<glm::dmat4x3, glm::dvec4>();
+ Error += test_equal<glm::dmat4x4, glm::dvec4>();
+
+ Error += test_notEqual<glm::mat2x2, glm::vec2>();
+ Error += test_notEqual<glm::mat2x3, glm::vec2>();
+ Error += test_notEqual<glm::mat2x4, glm::vec2>();
+ Error += test_notEqual<glm::mat3x2, glm::vec3>();
+ Error += test_notEqual<glm::mat3x3, glm::vec3>();
+ Error += test_notEqual<glm::mat3x4, glm::vec3>();
+ Error += test_notEqual<glm::mat4x2, glm::vec4>();
+ Error += test_notEqual<glm::mat4x3, glm::vec4>();
+ Error += test_notEqual<glm::mat4x4, glm::vec4>();
+
+ Error += test_notEqual<glm::dmat2x2, glm::dvec2>();
+ Error += test_notEqual<glm::dmat2x3, glm::dvec2>();
+ Error += test_notEqual<glm::dmat2x4, glm::dvec2>();
+ Error += test_notEqual<glm::dmat3x2, glm::dvec3>();
+ Error += test_notEqual<glm::dmat3x3, glm::dvec3>();
+ Error += test_notEqual<glm::dmat3x4, glm::dvec3>();
+ Error += test_notEqual<glm::dmat4x2, glm::dvec4>();
+ Error += test_notEqual<glm::dmat4x3, glm::dvec4>();
+ Error += test_notEqual<glm::dmat4x4, glm::dvec4>();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_transform.cpp b/3rdparty/glm/source/test/ext/ext_matrix_transform.cpp
new file mode 100644
index 0000000..cf653b7
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_transform.cpp
@@ -0,0 +1,61 @@
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/matrix_transform.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/vector_float3.hpp>
+
+static int test_translate()
+{
+ int Error = 0;
+
+ glm::mat4 const M(1.0f);
+ glm::vec3 const V(1.0f);
+
+ glm::mat4 const T = glm::translate(M, V);
+ Error += glm::all(glm::equal(T[3], glm::vec4(1.0f), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_scale()
+{
+ int Error = 0;
+
+ glm::mat4 const M(1.0f);
+ glm::vec3 const V(2.0f);
+
+ glm::mat4 const S = glm::scale(M, V);
+ glm::mat4 const R = glm::mat4(
+ glm::vec4(2, 0, 0, 0),
+ glm::vec4(0, 2, 0, 0),
+ glm::vec4(0, 0, 2, 0),
+ glm::vec4(0, 0, 0, 1));
+ Error += glm::all(glm::equal(S, R, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_rotate()
+{
+ int Error = 0;
+
+ glm::vec4 const A(1.0f, 0.0f, 0.0f, 1.0f);
+
+ glm::mat4 const R = glm::rotate(glm::mat4(1.0f), glm::radians(90.f), glm::vec3(0, 0, 1));
+ glm::vec4 const B = R * A;
+ Error += glm::all(glm::equal(B, glm::vec4(0.0f, 1.0f, 0.0f, 1.0f), 0.0001f)) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_translate();
+ Error += test_scale();
+ Error += test_rotate();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint2x2_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint2x2_sized.cpp
new file mode 100644
index 0000000..a9bd49c
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint2x2_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint2x2_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+ static_assert(sizeof(glm::u8mat2x2) == 4, "uint8 size isn't 1 byte on this platform");
+ static_assert(sizeof(glm::u16mat2x2) == 8, "uint16 size isn't 2 bytes on this platform");
+ static_assert(sizeof(glm::u32mat2x2) == 16, "uint32 size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::u64mat2x2) == 32, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat2x2) < sizeof(glm::u16mat2x2) ? 0 : 1;
+ Error += sizeof(glm::u16mat2x2) < sizeof(glm::u32mat2x2) ? 0 : 1;
+ Error += sizeof(glm::u32mat2x2) < sizeof(glm::u64mat2x2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint2x3_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint2x3_sized.cpp
new file mode 100644
index 0000000..0b75893
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint2x3_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint2x3_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8mat2x3) == 6, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16mat2x3) == 12, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32mat2x3) == 24, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64mat2x3) == 48, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat2x3) < sizeof(glm::u16mat2x3) ? 0 : 1;
+ Error += sizeof(glm::u16mat2x3) < sizeof(glm::u32mat2x3) ? 0 : 1;
+ Error += sizeof(glm::u32mat2x3) < sizeof(glm::u64mat2x3) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint2x4_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint2x4_sized.cpp
new file mode 100644
index 0000000..84af4dd
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint2x4_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint2x4_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8mat2x4) == 8, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16mat2x4) == 16, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32mat2x4) == 32, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64mat2x4) == 64, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat2x4) < sizeof(glm::u16mat2x4) ? 0 : 1;
+ Error += sizeof(glm::u16mat2x4) < sizeof(glm::u32mat2x4) ? 0 : 1;
+ Error += sizeof(glm::u32mat2x4) < sizeof(glm::u64mat2x4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint3x2_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint3x2_sized.cpp
new file mode 100644
index 0000000..3c035e2
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint3x2_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint3x2_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8mat3x2) == 6, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16mat3x2) == 12, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32mat3x2) == 24, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64mat3x2) == 48, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat3x2) < sizeof(glm::u16mat3x2) ? 0 : 1;
+ Error += sizeof(glm::u16mat3x2) < sizeof(glm::u32mat3x2) ? 0 : 1;
+ Error += sizeof(glm::u32mat3x2) < sizeof(glm::u64mat3x2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint3x3_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint3x3_sized.cpp
new file mode 100644
index 0000000..64384ed
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint3x3_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint3x3_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8mat3x3) == 9, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16mat3x3) == 18, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32mat3x3) == 36, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64mat3x3) == 72, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat3x3) < sizeof(glm::u16mat3x3) ? 0 : 1;
+ Error += sizeof(glm::u16mat3x3) < sizeof(glm::u32mat3x3) ? 0 : 1;
+ Error += sizeof(glm::u32mat3x3) < sizeof(glm::u64mat3x3) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint3x4_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint3x4_sized.cpp
new file mode 100644
index 0000000..7f743df
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint3x4_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint3x4_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8mat3x4) == 12, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16mat3x4) == 24, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32mat3x4) == 48, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64mat3x4) == 96, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat3x4) < sizeof(glm::u16mat3x4) ? 0 : 1;
+ Error += sizeof(glm::u16mat3x4) < sizeof(glm::u32mat3x4) ? 0 : 1;
+ Error += sizeof(glm::u32mat3x4) < sizeof(glm::u64mat3x4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint4x2_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint4x2_sized.cpp
new file mode 100644
index 0000000..2b4453f
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint4x2_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint4x2_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8mat4x2) == 8, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16mat4x2) == 16, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32mat4x2) == 32, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64mat4x2) == 64, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat4x2) < sizeof(glm::u16mat4x2) ? 0 : 1;
+ Error += sizeof(glm::u16mat4x2) < sizeof(glm::u32mat4x2) ? 0 : 1;
+ Error += sizeof(glm::u32mat4x2) < sizeof(glm::u64mat4x2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint4x3_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint4x3_sized.cpp
new file mode 100644
index 0000000..2820bde
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint4x3_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint4x3_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8mat4x3) == 12, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16mat4x3) == 24, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32mat4x3) == 48, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64mat4x3) == 96, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat4x3) < sizeof(glm::u16mat4x3) ? 0 : 1;
+ Error += sizeof(glm::u16mat4x3) < sizeof(glm::u32mat4x3) ? 0 : 1;
+ Error += sizeof(glm::u32mat4x3) < sizeof(glm::u64mat4x3) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_matrix_uint4x4_sized.cpp b/3rdparty/glm/source/test/ext/ext_matrix_uint4x4_sized.cpp
new file mode 100644
index 0000000..8f9e239
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_matrix_uint4x4_sized.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/matrix_uint4x4_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8mat4x4) == 16, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16mat4x4) == 32, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32mat4x4) == 64, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64mat4x4) == 128, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8mat4x4) < sizeof(glm::u16mat4x4) ? 0 : 1;
+ Error += sizeof(glm::u16mat4x4) < sizeof(glm::u32mat4x4) ? 0 : 1;
+ Error += sizeof(glm::u32mat4x4) < sizeof(glm::u64mat4x4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_quaternion_common.cpp b/3rdparty/glm/source/test/ext/ext_quaternion_common.cpp
new file mode 100644
index 0000000..861aa65
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_quaternion_common.cpp
@@ -0,0 +1,61 @@
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/quaternion_common.hpp>
+#include <glm/ext/quaternion_float.hpp>
+#include <glm/ext/quaternion_relational.hpp>
+#include <glm/ext/quaternion_trigonometric.hpp>
+#include <glm/ext/scalar_constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+
+static int test_conjugate()
+{
+ int Error = 0;
+
+ glm::quat const A(glm::vec3(1, 0, 0), glm::vec3(0, 1, 0));
+ glm::quat const C = glm::conjugate(A);
+ Error += glm::any(glm::notEqual(A, C, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::quat const B = glm::conjugate(C);
+ Error += glm::all(glm::equal(A, B, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_mix()
+{
+ int Error = 0;
+
+ glm::quat const Q1(glm::vec3(1, 0, 0), glm::vec3(1, 0, 0));
+ glm::quat const Q2(glm::vec3(1, 0, 0), glm::vec3(0, 1, 0));
+
+ {
+ glm::quat const Q3 = glm::mix(Q1, Q2, 0.5f);
+ float const F3 = glm::degrees(glm::angle(Q3));
+ Error += glm::equal(F3, 45.0f, 0.001f) ? 0 : 1;
+
+ glm::quat const Q4 = glm::mix(Q2, Q1, 0.5f);
+ float const F4 = glm::degrees(glm::angle(Q4));
+ Error += glm::equal(F4, 45.0f, 0.001f) ? 0 : 1;
+ }
+
+ {
+ glm::quat const Q3 = glm::slerp(Q1, Q2, 0.5f);
+ float const F3 = glm::degrees(glm::angle(Q3));
+ Error += glm::equal(F3, 45.0f, 0.001f) ? 0 : 1;
+
+ glm::quat const Q4 = glm::slerp(Q2, Q1, 0.5f);
+ float const F4 = glm::degrees(glm::angle(Q4));
+ Error += glm::equal(F4, 45.0f, 0.001f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_conjugate();
+ Error += test_mix();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_quaternion_exponential.cpp b/3rdparty/glm/source/test/ext/ext_quaternion_exponential.cpp
new file mode 100644
index 0000000..fbcdbef
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_quaternion_exponential.cpp
@@ -0,0 +1,87 @@
+#include <glm/ext/quaternion_exponential.hpp>
+#include <glm/ext/quaternion_float.hpp>
+#include <glm/ext/quaternion_float_precision.hpp>
+#include <glm/ext/quaternion_double.hpp>
+#include <glm/ext/quaternion_double_precision.hpp>
+#include <glm/ext/quaternion_relational.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float3_precision.hpp>
+#include <glm/ext/vector_double3.hpp>
+#include <glm/ext/vector_double3_precision.hpp>
+#include <glm/ext/scalar_constants.hpp>
+
+template <typename quaType, typename vecType>
+int test_log()
+{
+ typedef typename quaType::value_type T;
+
+ T const Epsilon = static_cast<T>(0.001f);
+
+ int Error = 0;
+
+ quaType const Q(vecType(1, 0, 0), vecType(0, 1, 0));
+ quaType const P = glm::log(Q);
+ Error += glm::any(glm::notEqual(Q, P, Epsilon)) ? 0 : 1;
+
+ quaType const R = glm::exp(P);
+ Error += glm::all(glm::equal(Q, R, Epsilon)) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename quaType, typename vecType>
+int test_pow()
+{
+ typedef typename quaType::value_type T;
+
+ T const Epsilon = static_cast<T>(0.001f);
+
+ int Error = 0;
+
+ quaType const Q(vecType(1, 0, 0), vecType(0, 1, 0));
+
+ {
+ T const One = static_cast<T>(1.0f);
+ quaType const P = glm::pow(Q, One);
+ Error += glm::all(glm::equal(Q, P, Epsilon)) ? 0 : 1;
+ }
+
+ {
+ T const Two = static_cast<T>(2.0f);
+ quaType const P = glm::pow(Q, Two);
+ quaType const R = Q * Q;
+ Error += glm::all(glm::equal(P, R, Epsilon)) ? 0 : 1;
+
+ quaType const U = glm::sqrt(P);
+ Error += glm::all(glm::equal(Q, U, Epsilon)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_log<glm::quat, glm::vec3>();
+ Error += test_log<glm::lowp_quat, glm::lowp_vec3>();
+ Error += test_log<glm::mediump_quat, glm::mediump_vec3>();
+ Error += test_log<glm::highp_quat, glm::highp_vec3>();
+
+ Error += test_log<glm::dquat, glm::dvec3>();
+ Error += test_log<glm::lowp_dquat, glm::lowp_dvec3>();
+ Error += test_log<glm::mediump_dquat, glm::mediump_dvec3>();
+ Error += test_log<glm::highp_dquat, glm::highp_dvec3>();
+
+ Error += test_pow<glm::quat, glm::vec3>();
+ Error += test_pow<glm::lowp_quat, glm::lowp_vec3>();
+ Error += test_pow<glm::mediump_quat, glm::mediump_vec3>();
+ Error += test_pow<glm::highp_quat, glm::highp_vec3>();
+
+ Error += test_pow<glm::dquat, glm::dvec3>();
+ Error += test_pow<glm::lowp_dquat, glm::lowp_dvec3>();
+ Error += test_pow<glm::mediump_dquat, glm::mediump_dvec3>();
+ Error += test_pow<glm::highp_dquat, glm::highp_dvec3>();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_quaternion_geometric.cpp b/3rdparty/glm/source/test/ext/ext_quaternion_geometric.cpp
new file mode 100644
index 0000000..73b5dea
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_quaternion_geometric.cpp
@@ -0,0 +1,88 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/quaternion_geometric.hpp>
+#include <glm/ext/quaternion_float.hpp>
+#include <glm/ext/quaternion_trigonometric.hpp>
+#include <glm/ext/quaternion_float_precision.hpp>
+#include <glm/ext/quaternion_double.hpp>
+#include <glm/ext/quaternion_double_precision.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float3_precision.hpp>
+#include <glm/ext/vector_double3.hpp>
+#include <glm/ext/vector_double3_precision.hpp>
+#include <glm/ext/scalar_relational.hpp>
+
+float const Epsilon = 0.001f;
+
+static int test_length()
+{
+ int Error = 0;
+
+ {
+ float const A = glm::length(glm::quat(1, 0, 0, 0));
+ Error += glm::equal(A, 1.0f, Epsilon) ? 0 : 1;
+ }
+
+ {
+ float const A = glm::length(glm::quat(1, glm::vec3(0)));
+ Error += glm::equal(A, 1.0f, Epsilon) ? 0 : 1;
+ }
+
+ {
+ float const A = glm::length(glm::quat(glm::vec3(1, 0, 0), glm::vec3(0, 1, 0)));
+ Error += glm::equal(A, 1.0f, Epsilon) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_normalize()
+{
+ int Error = 0;
+
+ {
+ glm::quat const A = glm::quat(1, 0, 0, 0);
+ glm::quat const N = glm::normalize(A);
+ Error += glm::all(glm::equal(A, N, Epsilon)) ? 0 : 1;
+ }
+
+ {
+ glm::quat const A = glm::quat(1, glm::vec3(0));
+ glm::quat const N = glm::normalize(A);
+ Error += glm::all(glm::equal(A, N, Epsilon)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_dot()
+{
+ int Error = 0;
+
+ {
+ glm::quat const A = glm::quat(1, 0, 0, 0);
+ glm::quat const B = glm::quat(1, 0, 0, 0);
+ float const C = glm::dot(A, B);
+ Error += glm::equal(C, 1.0f, Epsilon) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_cross()
+{
+ int Error = 0;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_length();
+ Error += test_normalize();
+ Error += test_dot();
+ Error += test_cross();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_quaternion_relational.cpp b/3rdparty/glm/source/test/ext/ext_quaternion_relational.cpp
new file mode 100644
index 0000000..7f51fdc
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_quaternion_relational.cpp
@@ -0,0 +1,51 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/quaternion_relational.hpp>
+#include <glm/ext/quaternion_float.hpp>
+#include <glm/ext/quaternion_float_precision.hpp>
+#include <glm/ext/quaternion_double.hpp>
+#include <glm/ext/quaternion_double_precision.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float3_precision.hpp>
+#include <glm/ext/vector_double3.hpp>
+#include <glm/ext/vector_double3_precision.hpp>
+
+template <typename quaType>
+static int test_equal()
+{
+ int Error = 0;
+
+ quaType const Q(1, 0, 0, 0);
+ quaType const P(1, 0, 0, 0);
+ Error += glm::all(glm::equal(Q, P, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename quaType>
+static int test_notEqual()
+{
+ int Error = 0;
+
+ quaType const Q(1, 0, 0, 0);
+ quaType const P(1, 0, 0, 0);
+ Error += glm::any(glm::notEqual(Q, P, glm::epsilon<float>())) ? 1 : 0;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_equal<glm::quat>();
+ Error += test_equal<glm::lowp_quat>();
+ Error += test_equal<glm::mediump_quat>();
+ Error += test_equal<glm::highp_quat>();
+
+ Error += test_notEqual<glm::quat>();
+ Error += test_notEqual<glm::lowp_quat>();
+ Error += test_notEqual<glm::mediump_quat>();
+ Error += test_notEqual<glm::highp_quat>();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_quaternion_transform.cpp b/3rdparty/glm/source/test/ext/ext_quaternion_transform.cpp
new file mode 100644
index 0000000..fefe88e
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_quaternion_transform.cpp
@@ -0,0 +1,45 @@
+#include <glm/ext/quaternion_transform.hpp>
+#include <glm/ext/quaternion_float.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/scalar_constants.hpp>
+
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/quaternion.hpp>
+
+static int test_lookAt()
+{
+ int Error(0);
+
+ glm::vec3 eye(0.0f);
+ glm::vec3 center(1.1f, -2.0f, 3.1416f);
+ glm::vec3 up(-0.17f, 7.23f, -1.744f);
+
+ glm::quat test_quat = glm::quatLookAt(glm::normalize(center - eye), up);
+ glm::quat test_mat = glm::conjugate(glm::quat_cast(glm::lookAt(eye, center, up)));
+
+ Error += static_cast<int>(glm::abs(glm::length(test_quat) - 1.0f) > glm::epsilon<float>());
+ Error += static_cast<int>(glm::min(glm::length(test_quat + (-test_mat)), glm::length(test_quat + test_mat)) > glm::epsilon<float>());
+
+ // Test left-handed implementation
+ glm::quat test_quatLH = glm::quatLookAtLH(glm::normalize(center - eye), up);
+ glm::quat test_matLH = glm::conjugate(glm::quat_cast(glm::lookAtLH(eye, center, up)));
+ Error += static_cast<int>(glm::abs(glm::length(test_quatLH) - 1.0f) > glm::epsilon<float>());
+ Error += static_cast<int>(glm::min(glm::length(test_quatLH - test_matLH), glm::length(test_quatLH + test_matLH)) > glm::epsilon<float>());
+
+ // Test right-handed implementation
+ glm::quat test_quatRH = glm::quatLookAtRH(glm::normalize(center - eye), up);
+ glm::quat test_matRH = glm::conjugate(glm::quat_cast(glm::lookAtRH(eye, center, up)));
+ Error += static_cast<int>(glm::abs(glm::length(test_quatRH) - 1.0f) > glm::epsilon<float>());
+ Error += static_cast<int>(glm::min(glm::length(test_quatRH - test_matRH), glm::length(test_quatRH + test_matRH)) > glm::epsilon<float>());
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_lookAt();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_quaternion_trigonometric.cpp b/3rdparty/glm/source/test/ext/ext_quaternion_trigonometric.cpp
new file mode 100644
index 0000000..d237125
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_quaternion_trigonometric.cpp
@@ -0,0 +1,40 @@
+#include <glm/ext/quaternion_trigonometric.hpp>
+#include <glm/ext/quaternion_float.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/scalar_relational.hpp>
+
+float const Epsilon = 0.001f;
+
+static int test_angle()
+{
+ int Error = 0;
+
+ {
+ glm::quat const Q = glm::quat(glm::vec3(1, 0, 0), glm::vec3(0, 1, 0));
+ float const A = glm::degrees(glm::angle(Q));
+ Error += glm::equal(A, 90.0f, Epsilon) ? 0 : 1;
+ }
+
+ {
+ glm::quat const Q = glm::quat(glm::vec3(0, 1, 0), glm::vec3(1, 0, 0));
+ float const A = glm::degrees(glm::angle(Q));
+ Error += glm::equal(A, 90.0f, Epsilon) ? 0 : 1;
+ }
+
+ {
+ glm::quat const Q = glm::angleAxis(glm::two_pi<float>() - 1.0f, glm::vec3(1, 0, 0));
+ float const A = glm::angle(Q);
+ Error += glm::equal(A, 1.0f, Epsilon) ? 1 : 0;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_angle();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_quaternion_type.cpp b/3rdparty/glm/source/test/ext/ext_quaternion_type.cpp
new file mode 100644
index 0000000..7e61149
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_quaternion_type.cpp
@@ -0,0 +1,113 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/quaternion_relational.hpp>
+#include <glm/ext/quaternion_float.hpp>
+#include <glm/ext/quaternion_float_precision.hpp>
+#include <glm/ext/quaternion_double.hpp>
+#include <glm/ext/quaternion_double_precision.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <vector>
+
+static int test_ctr()
+{
+ int Error(0);
+
+# if GLM_HAS_TRIVIAL_QUERIES
+ // Error += std::is_trivially_default_constructible<glm::quat>::value ? 0 : 1;
+ // Error += std::is_trivially_default_constructible<glm::dquat>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::quat>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::dquat>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::quat>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::dquat>::value ? 0 : 1;
+
+ Error += std::is_copy_constructible<glm::quat>::value ? 0 : 1;
+ Error += std::is_copy_constructible<glm::dquat>::value ? 0 : 1;
+# endif
+
+# if GLM_HAS_INITIALIZER_LISTS
+ {
+ glm::quat A{0, 1, 2, 3};
+
+ std::vector<glm::quat> B{
+ {0, 1, 2, 3},
+ {0, 1, 2, 3}};
+ }
+# endif//GLM_HAS_INITIALIZER_LISTS
+
+ return Error;
+}
+
+static int test_two_axis_ctr()
+{
+ int Error = 0;
+
+ glm::quat const q1(glm::vec3(1, 0, 0), glm::vec3(0, 1, 0));
+ glm::vec3 const v1 = q1 * glm::vec3(1, 0, 0);
+ Error += glm::all(glm::equal(v1, glm::vec3(0, 1, 0), 0.0001f)) ? 0 : 1;
+
+ glm::quat const q2 = q1 * q1;
+ glm::vec3 const v2 = q2 * glm::vec3(1, 0, 0);
+ Error += glm::all(glm::equal(v2, glm::vec3(-1, 0, 0), 0.0001f)) ? 0 : 1;
+
+ glm::quat const q3(glm::vec3(1, 0, 0), glm::vec3(-1, 0, 0));
+ glm::vec3 const v3 = q3 * glm::vec3(1, 0, 0);
+ Error += glm::all(glm::equal(v3, glm::vec3(-1, 0, 0), 0.0001f)) ? 0 : 1;
+
+ glm::quat const q4(glm::vec3(0, 1, 0), glm::vec3(0, -1, 0));
+ glm::vec3 const v4 = q4 * glm::vec3(0, 1, 0);
+ Error += glm::all(glm::equal(v4, glm::vec3(0, -1, 0), 0.0001f)) ? 0 : 1;
+
+ glm::quat const q5(glm::vec3(0, 0, 1), glm::vec3(0, 0, -1));
+ glm::vec3 const v5 = q5 * glm::vec3(0, 0, 1);
+ Error += glm::all(glm::equal(v5, glm::vec3(0, 0, -1), 0.0001f)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_size()
+{
+ int Error = 0;
+
+ std::size_t const A = sizeof(glm::quat);
+ Error += 16 == A ? 0 : 1;
+ std::size_t const B = sizeof(glm::dquat);
+ Error += 32 == B ? 0 : 1;
+ Error += glm::quat().length() == 4 ? 0 : 1;
+ Error += glm::dquat().length() == 4 ? 0 : 1;
+ Error += glm::quat::length() == 4 ? 0 : 1;
+ Error += glm::dquat::length() == 4 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_precision()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::lowp_quat) <= sizeof(glm::mediump_quat) ? 0 : 1;
+ Error += sizeof(glm::mediump_quat) <= sizeof(glm::highp_quat) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::quat::length() == 4, "GLM: Failed constexpr");
+ static_assert(glm::quat(1.0f, glm::vec3(0.0f)).w > 0.0f, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_ctr();
+ Error += test_two_axis_ctr();
+ Error += test_size();
+ Error += test_precision();
+ Error += test_constexpr();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_common.cpp b/3rdparty/glm/source/test/ext/ext_scalar_common.cpp
new file mode 100644
index 0000000..917a242
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_common.cpp
@@ -0,0 +1,360 @@
+#include <glm/ext/scalar_common.hpp>
+#include <glm/ext/scalar_constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/common.hpp>
+
+#if ((GLM_LANG & GLM_LANG_CXX11_FLAG) || (GLM_COMPILER & GLM_COMPILER_VC))
+# define GLM_NAN(T) NAN
+#else
+# define GLM_NAN(T) (static_cast<T>(0.0f) / static_cast<T>(0.0f))
+#endif
+
+template <typename T>
+static int test_min()
+{
+ int Error = 0;
+
+ T const N = static_cast<T>(0);
+ T const B = static_cast<T>(1);
+ Error += glm::equal(glm::min(N, B), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(B, N), N, glm::epsilon<T>()) ? 0 : 1;
+
+ T const C = static_cast<T>(2);
+ Error += glm::equal(glm::min(N, B, C), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(B, N, C), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(C, N, B), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(C, B, N), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(B, C, N), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(N, C, B), N, glm::epsilon<T>()) ? 0 : 1;
+
+ T const D = static_cast<T>(3);
+ Error += glm::equal(glm::min(D, N, B, C), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(B, D, N, C), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(C, N, D, B), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(C, B, D, N), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(B, C, N, D), N, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::min(N, C, B, D), N, glm::epsilon<T>()) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename T>
+static int test_min_nan()
+{
+ int Error = 0;
+
+ T const B = static_cast<T>(1);
+ T const N = static_cast<T>(GLM_NAN(T));
+ Error += glm::isnan(glm::min(N, B)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(B, N)) ? 0 : 1;
+
+ T const C = static_cast<T>(2);
+ Error += glm::isnan(glm::min(N, B, C)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(B, N, C)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(C, N, B)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(C, B, N)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(B, C, N)) ? 0 : 1;
+ Error += glm::isnan(glm::min(N, C, B)) ? 0 : 1;
+
+ T const D = static_cast<T>(3);
+ Error += !glm::isnan(glm::min(D, N, B, C)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(B, D, N, C)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(C, N, D, B)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(C, B, D, N)) ? 0 : 1;
+ Error += !glm::isnan(glm::min(B, C, N, D)) ? 0 : 1;
+ Error += glm::isnan(glm::min(N, C, B, D)) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename T>
+static int test_max()
+{
+ int Error = 0;
+
+ T const N = static_cast<T>(0);
+ T const B = static_cast<T>(1);
+ Error += glm::equal(glm::max(N, B), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(B, N), B, glm::epsilon<T>()) ? 0 : 1;
+
+ T const C = static_cast<T>(2);
+ Error += glm::equal(glm::max(N, B, C), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(B, N, C), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(C, N, B), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(C, B, N), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(B, C, N), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(N, C, B), C, glm::epsilon<T>()) ? 0 : 1;
+
+ T const D = static_cast<T>(3);
+ Error += glm::equal(glm::max(D, N, B, C), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(B, D, N, C), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(C, N, D, B), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(C, B, D, N), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(B, C, N, D), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::max(N, C, B, D), D, glm::epsilon<T>()) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename T>
+static int test_max_nan()
+{
+ int Error = 0;
+
+ T const B = static_cast<T>(1);
+ T const N = static_cast<T>(GLM_NAN(T));
+ Error += glm::isnan(glm::max(N, B)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(B, N)) ? 0 : 1;
+
+ T const C = static_cast<T>(2);
+ Error += glm::isnan(glm::max(N, B, C)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(B, N, C)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(C, N, B)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(C, B, N)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(B, C, N)) ? 0 : 1;
+ Error += glm::isnan(glm::max(N, C, B)) ? 0 : 1;
+
+ T const D = static_cast<T>(3);
+ Error += !glm::isnan(glm::max(D, N, B, C)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(B, D, N, C)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(C, N, D, B)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(C, B, D, N)) ? 0 : 1;
+ Error += !glm::isnan(glm::max(B, C, N, D)) ? 0 : 1;
+ Error += glm::isnan(glm::max(N, C, B, D)) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename T>
+static int test_fmin()
+{
+ int Error = 0;
+
+ T const B = static_cast<T>(1);
+ T const N = static_cast<T>(GLM_NAN(T));
+ Error += glm::equal(glm::fmin(N, B), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(B, N), B, glm::epsilon<T>()) ? 0 : 1;
+
+ T const C = static_cast<T>(2);
+ Error += glm::equal(glm::fmin(N, B, C), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(B, N, C), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(C, N, B), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(C, B, N), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(B, C, N), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(N, C, B), B, glm::epsilon<T>()) ? 0 : 1;
+
+ T const D = static_cast<T>(3);
+ Error += glm::equal(glm::fmin(D, N, B, C), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(B, D, N, C), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(C, N, D, B), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(C, B, D, N), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(B, C, N, D), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmin(N, C, B, D), B, glm::epsilon<T>()) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename T>
+static int test_fmax()
+{
+ int Error = 0;
+
+ T const B = static_cast<T>(1);
+ T const N = static_cast<T>(GLM_NAN(T));
+ Error += glm::equal(glm::fmax(N, B), B, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(B, N), B, glm::epsilon<T>()) ? 0 : 1;
+
+ T const C = static_cast<T>(2);
+ Error += glm::equal(glm::fmax(N, B, C), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(B, N, C), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(C, N, B), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(C, B, N), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(B, C, N), C, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(N, C, B), C, glm::epsilon<T>()) ? 0 : 1;
+
+ T const D = static_cast<T>(3);
+ Error += glm::equal(glm::fmax(D, N, B, C), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(B, D, N, C), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(C, N, D, B), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(C, B, D, N), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(B, C, N, D), D, glm::epsilon<T>()) ? 0 : 1;
+ Error += glm::equal(glm::fmax(N, C, B, D), D, glm::epsilon<T>()) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_clamp()
+{
+ int Error = 0;
+
+ float A = glm::clamp(0.5f);
+ Error += glm::equal(A, 0.5f, 0.00001f) ? 0 : 1;
+
+ float B = glm::clamp(0.0f);
+ Error += glm::equal(B, 0.0f, 0.00001f) ? 0 : 1;
+
+ float C = glm::clamp(1.0f);
+ Error += glm::equal(C, 1.0f, 0.00001f) ? 0 : 1;
+
+ float D = glm::clamp(-0.5f);
+ Error += glm::equal(D, 0.0f, 0.00001f) ? 0 : 1;
+
+ float E = glm::clamp(1.5f);
+ Error += glm::equal(E, 1.0f, 0.00001f) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_repeat()
+{
+ int Error = 0;
+
+ float A = glm::repeat(0.5f);
+ Error += glm::equal(A, 0.5f, 0.00001f) ? 0 : 1;
+
+ float B = glm::repeat(0.0f);
+ Error += glm::equal(B, 0.0f, 0.00001f) ? 0 : 1;
+
+ float C = glm::repeat(1.0f);
+ Error += glm::equal(C, 0.0f, 0.00001f) ? 0 : 1;
+
+ float D = glm::repeat(-0.5f);
+ Error += glm::equal(D, 0.5f, 0.00001f) ? 0 : 1;
+
+ float E = glm::repeat(1.5f);
+ Error += glm::equal(E, 0.5f, 0.00001f) ? 0 : 1;
+
+ float F = glm::repeat(0.9f);
+ Error += glm::equal(F, 0.9f, 0.00001f) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_mirrorClamp()
+{
+ int Error = 0;
+
+ float A = glm::mirrorClamp(0.5f);
+ Error += glm::equal(A, 0.5f, 0.00001f) ? 0 : 1;
+
+ float B = glm::mirrorClamp(0.0f);
+ Error += glm::equal(B, 0.0f, 0.00001f) ? 0 : 1;
+
+ float C = glm::mirrorClamp(1.1f);
+ Error += glm::equal(C, 0.1f, 0.00001f) ? 0 : 1;
+
+ float D = glm::mirrorClamp(-0.5f);
+ Error += glm::equal(D, 0.5f, 0.00001f) ? 0 : 1;
+
+ float E = glm::mirrorClamp(1.5f);
+ Error += glm::equal(E, 0.5f, 0.00001f) ? 0 : 1;
+
+ float F = glm::mirrorClamp(0.9f);
+ Error += glm::equal(F, 0.9f, 0.00001f) ? 0 : 1;
+
+ float G = glm::mirrorClamp(3.1f);
+ Error += glm::equal(G, 0.1f, 0.00001f) ? 0 : 1;
+
+ float H = glm::mirrorClamp(-3.1f);
+ Error += glm::equal(H, 0.1f, 0.00001f) ? 0 : 1;
+
+ float I = glm::mirrorClamp(-0.9f);
+ Error += glm::equal(I, 0.9f, 0.00001f) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_mirrorRepeat()
+{
+ int Error = 0;
+
+ float A = glm::mirrorRepeat(0.5f);
+ Error += glm::equal(A, 0.5f, 0.00001f) ? 0 : 1;
+
+ float B = glm::mirrorRepeat(0.0f);
+ Error += glm::equal(B, 0.0f, 0.00001f) ? 0 : 1;
+
+ float C = glm::mirrorRepeat(1.0f);
+ Error += glm::equal(C, 1.0f, 0.00001f) ? 0 : 1;
+
+ float D = glm::mirrorRepeat(-0.5f);
+ Error += glm::equal(D, 0.5f, 0.00001f) ? 0 : 1;
+
+ float E = glm::mirrorRepeat(1.5f);
+ Error += glm::equal(E, 0.5f, 0.00001f) ? 0 : 1;
+
+ float F = glm::mirrorRepeat(0.9f);
+ Error += glm::equal(F, 0.9f, 0.00001f) ? 0 : 1;
+
+ float G = glm::mirrorRepeat(3.0f);
+ Error += glm::equal(G, 1.0f, 0.00001f) ? 0 : 1;
+
+ float H = glm::mirrorRepeat(-3.0f);
+ Error += glm::equal(H, 1.0f, 0.00001f) ? 0 : 1;
+
+ float I = glm::mirrorRepeat(-1.0f);
+ Error += glm::equal(I, 1.0f, 0.00001f) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_iround()
+{
+ int Error = 0;
+
+ for(float f = 0.0f; f < 3.1f; f += 0.05f)
+ {
+ int RoundFast = static_cast<int>(glm::iround(f));
+ int RoundSTD = static_cast<int>(glm::round(f));
+ Error += RoundFast == RoundSTD ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+static int test_uround()
+{
+ int Error = 0;
+
+ for(float f = 0.0f; f < 3.1f; f += 0.05f)
+ {
+ int RoundFast = static_cast<int>(glm::uround(f));
+ int RoundSTD = static_cast<int>(glm::round(f));
+ Error += RoundFast == RoundSTD ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_min<float>();
+ Error += test_min<double>();
+ Error += test_min_nan<float>();
+ Error += test_min_nan<double>();
+
+ Error += test_max<float>();
+ Error += test_max<double>();
+ Error += test_max_nan<float>();
+ Error += test_max_nan<double>();
+
+ Error += test_fmin<float>();
+ Error += test_fmin<double>();
+
+ Error += test_fmax<float>();
+ Error += test_fmax<double>();
+
+ Error += test_clamp();
+ Error += test_repeat();
+ Error += test_mirrorClamp();
+ Error += test_mirrorRepeat();
+
+ Error += test_iround();
+ Error += test_uround();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_constants.cpp b/3rdparty/glm/source/test/ext/ext_scalar_constants.cpp
new file mode 100644
index 0000000..3af7099
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_constants.cpp
@@ -0,0 +1,36 @@
+#include <glm/ext/scalar_constants.hpp>
+
+template <typename valType>
+static int test_epsilon()
+{
+ int Error = 0;
+
+ valType const Test = glm::epsilon<valType>();
+ Error += Test > static_cast<valType>(0) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename valType>
+static int test_pi()
+{
+ int Error = 0;
+
+ valType const Test = glm::pi<valType>();
+ Error += Test > static_cast<valType>(3.14) ? 0 : 1;
+ Error += Test < static_cast<valType>(3.15) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_epsilon<float>();
+ Error += test_epsilon<double>();
+ Error += test_pi<float>();
+ Error += test_pi<double>();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_int_sized.cpp b/3rdparty/glm/source/test/ext/ext_scalar_int_sized.cpp
new file mode 100644
index 0000000..b55c6ca
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_int_sized.cpp
@@ -0,0 +1,43 @@
+#include <glm/ext/scalar_int_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+ static_assert(sizeof(glm::int8) == 1, "int8 size isn't 1 byte on this platform");
+ static_assert(sizeof(glm::int16) == 2, "int16 size isn't 2 bytes on this platform");
+ static_assert(sizeof(glm::int32) == 4, "int32 size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::int64) == 8, "int64 size isn't 8 bytes on this platform");
+ static_assert(sizeof(glm::int16) == sizeof(short), "signed short size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::int32) == sizeof(int), "signed int size isn't 4 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::int8) == 1 ? 0 : 1;
+ Error += sizeof(glm::int16) == 2 ? 0 : 1;
+ Error += sizeof(glm::int32) == 4 ? 0 : 1;
+ Error += sizeof(glm::int64) == 8 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::int8) < sizeof(glm::int16) ? 0 : 1;
+ Error += sizeof(glm::int16) < sizeof(glm::int32) ? 0 : 1;
+ Error += sizeof(glm::int32) < sizeof(glm::int64) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_integer.cpp b/3rdparty/glm/source/test/ext/ext_scalar_integer.cpp
new file mode 100644
index 0000000..f169e8a
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_integer.cpp
@@ -0,0 +1,686 @@
+#include <glm/ext/scalar_integer.hpp>
+#include <glm/ext/scalar_int_sized.hpp>
+#include <glm/ext/scalar_uint_sized.hpp>
+#include <vector>
+#include <ctime>
+#include <cstdio>
+
+#if GLM_LANG & GLM_LANG_CXX11_FLAG
+#include <chrono>
+
+namespace isPowerOfTwo
+{
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ bool Return;
+ };
+
+ int test_int16()
+ {
+ type<glm::int16> const Data[] =
+ {
+ {0x0001, true},
+ {0x0002, true},
+ {0x0004, true},
+ {0x0080, true},
+ {0x0000, true},
+ {0x0003, false}
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::int16>); i < n; ++i)
+ {
+ bool Result = glm::isPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_uint16()
+ {
+ type<glm::uint16> const Data[] =
+ {
+ {0x0001, true},
+ {0x0002, true},
+ {0x0004, true},
+ {0x0000, true},
+ {0x0000, true},
+ {0x0003, false}
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::uint16>); i < n; ++i)
+ {
+ bool Result = glm::isPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_int32()
+ {
+ type<int> const Data[] =
+ {
+ {0x00000001, true},
+ {0x00000002, true},
+ {0x00000004, true},
+ {0x0000000f, false},
+ {0x00000000, true},
+ {0x00000003, false}
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<int>); i < n; ++i)
+ {
+ bool Result = glm::isPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_uint32()
+ {
+ type<glm::uint> const Data[] =
+ {
+ {0x00000001, true},
+ {0x00000002, true},
+ {0x00000004, true},
+ {0x80000000, true},
+ {0x00000000, true},
+ {0x00000003, false}
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::uint>); i < n; ++i)
+ {
+ bool Result = glm::isPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += test_int16();
+ Error += test_uint16();
+ Error += test_int32();
+ Error += test_uint32();
+
+ return Error;
+ }
+}//isPowerOfTwo
+
+namespace nextPowerOfTwo_advanced
+{
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType highestBitValue(genIUType Value)
+ {
+ genIUType tmp = Value;
+ genIUType result = genIUType(0);
+ while(tmp)
+ {
+ result = (tmp & (~tmp + 1)); // grab lowest bit
+ tmp &= ~result; // clear lowest bit
+ }
+ return result;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType nextPowerOfTwo_loop(genType value)
+ {
+ return glm::isPowerOfTwo(value) ? value : highestBitValue(value) << 1;
+ }
+
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ genType Return;
+ };
+
+ int test_int32()
+ {
+ type<glm::int32> const Data[] =
+ {
+ {0x0000ffff, 0x00010000},
+ {-3, -4},
+ {-8, -8},
+ {0x00000001, 0x00000001},
+ {0x00000002, 0x00000002},
+ {0x00000004, 0x00000004},
+ {0x00000007, 0x00000008},
+ {0x0000fff0, 0x00010000},
+ {0x0000f000, 0x00010000},
+ {0x08000000, 0x08000000},
+ {0x00000000, 0x00000000},
+ {0x00000003, 0x00000004}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::int32>); i < n; ++i)
+ {
+ glm::int32 Result = glm::nextPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_uint32()
+ {
+ type<glm::uint32> const Data[] =
+ {
+ {0x00000001, 0x00000001},
+ {0x00000002, 0x00000002},
+ {0x00000004, 0x00000004},
+ {0x00000007, 0x00000008},
+ {0x0000ffff, 0x00010000},
+ {0x0000fff0, 0x00010000},
+ {0x0000f000, 0x00010000},
+ {0x80000000, 0x80000000},
+ {0x00000000, 0x00000000},
+ {0x00000003, 0x00000004}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::uint32>); i < n; ++i)
+ {
+ glm::uint32 Result = glm::nextPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int perf()
+ {
+ int Error(0);
+
+ std::vector<glm::uint> v;
+ v.resize(100000000);
+
+ std::clock_t Timestramp0 = std::clock();
+
+ for(glm::uint32 i = 0, n = static_cast<glm::uint>(v.size()); i < n; ++i)
+ v[i] = nextPowerOfTwo_loop(i);
+
+ std::clock_t Timestramp1 = std::clock();
+
+ for(glm::uint32 i = 0, n = static_cast<glm::uint>(v.size()); i < n; ++i)
+ v[i] = glm::nextPowerOfTwo(i);
+
+ std::clock_t Timestramp2 = std::clock();
+
+ std::printf("nextPowerOfTwo_loop: %d clocks\n", static_cast<int>(Timestramp1 - Timestramp0));
+ std::printf("glm::nextPowerOfTwo: %d clocks\n", static_cast<int>(Timestramp2 - Timestramp1));
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ Error += test_int32();
+ Error += test_uint32();
+
+ return Error;
+ }
+}//namespace nextPowerOfTwo_advanced
+
+namespace prevPowerOfTwo
+{
+ template <typename T>
+ int run()
+ {
+ int Error = 0;
+
+ T const A = glm::prevPowerOfTwo(static_cast<T>(7));
+ Error += A == static_cast<T>(4) ? 0 : 1;
+
+ T const B = glm::prevPowerOfTwo(static_cast<T>(15));
+ Error += B == static_cast<T>(8) ? 0 : 1;
+
+ T const C = glm::prevPowerOfTwo(static_cast<T>(31));
+ Error += C == static_cast<T>(16) ? 0 : 1;
+
+ T const D = glm::prevPowerOfTwo(static_cast<T>(32));
+ Error += D == static_cast<T>(32) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<glm::int8>();
+ Error += run<glm::int16>();
+ Error += run<glm::int32>();
+ Error += run<glm::int64>();
+
+ Error += run<glm::uint8>();
+ Error += run<glm::uint16>();
+ Error += run<glm::uint32>();
+ Error += run<glm::uint64>();
+
+ return Error;
+ }
+}//namespace prevPowerOfTwo
+
+namespace nextPowerOfTwo
+{
+ template <typename T>
+ int run()
+ {
+ int Error = 0;
+
+ T const A = glm::nextPowerOfTwo(static_cast<T>(7));
+ Error += A == static_cast<T>(8) ? 0 : 1;
+
+ T const B = glm::nextPowerOfTwo(static_cast<T>(15));
+ Error += B == static_cast<T>(16) ? 0 : 1;
+
+ T const C = glm::nextPowerOfTwo(static_cast<T>(31));
+ Error += C == static_cast<T>(32) ? 0 : 1;
+
+ T const D = glm::nextPowerOfTwo(static_cast<T>(32));
+ Error += D == static_cast<T>(32) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<glm::int8>();
+ Error += run<glm::int16>();
+ Error += run<glm::int32>();
+ Error += run<glm::int64>();
+
+ Error += run<glm::uint8>();
+ Error += run<glm::uint16>();
+ Error += run<glm::uint32>();
+ Error += run<glm::uint64>();
+
+ return Error;
+ }
+}//namespace nextPowerOfTwo
+
+namespace prevMultiple
+{
+ template<typename genIUType>
+ struct type
+ {
+ genIUType Source;
+ genIUType Multiple;
+ genIUType Return;
+ };
+
+ template <typename T>
+ int run()
+ {
+ type<T> const Data[] =
+ {
+ {8, 3, 6},
+ {7, 7, 7}
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<T>); i < n; ++i)
+ {
+ T const Result = glm::prevMultiple(Data[i].Source, Data[i].Multiple);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<glm::int8>();
+ Error += run<glm::int16>();
+ Error += run<glm::int32>();
+ Error += run<glm::int64>();
+
+ Error += run<glm::uint8>();
+ Error += run<glm::uint16>();
+ Error += run<glm::uint32>();
+ Error += run<glm::uint64>();
+
+ return Error;
+ }
+}//namespace prevMultiple
+
+namespace nextMultiple
+{
+ static glm::uint const Multiples = 128;
+
+ int perf_nextMultiple(glm::uint Samples)
+ {
+ std::vector<glm::uint> Results(Samples * Multiples);
+
+ std::chrono::high_resolution_clock::time_point t0 = std::chrono::high_resolution_clock::now();
+
+ for(glm::uint Source = 0; Source < Samples; ++Source)
+ for(glm::uint Multiple = 0; Multiple < Multiples; ++Multiple)
+ {
+ Results[Source * Multiples + Multiple] = glm::nextMultiple(Source, Multiples);
+ }
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+
+ std::printf("- glm::nextMultiple Time %d microseconds\n", static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count()));
+
+ glm::uint Result = 0;
+ for(std::size_t i = 0, n = Results.size(); i < n; ++i)
+ Result += Results[i];
+
+ return Result > 0 ? 0 : 1;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER T nextMultipleMod(T Source, T Multiple)
+ {
+ T const Tmp = Source - static_cast<T>(1);
+ return Tmp + (Multiple - (Tmp % Multiple));
+ }
+
+ int perf_nextMultipleMod(glm::uint Samples)
+ {
+ std::vector<glm::uint> Results(Samples * Multiples);
+
+ std::chrono::high_resolution_clock::time_point t0 = std::chrono::high_resolution_clock::now();
+
+ for(glm::uint Multiple = 0; Multiple < Multiples; ++Multiple)
+ for (glm::uint Source = 0; Source < Samples; ++Source)
+ {
+ Results[Source * Multiples + Multiple] = nextMultipleMod(Source, Multiples);
+ }
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+
+ std::printf("- nextMultipleMod Time %d microseconds\n", static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count()));
+
+ glm::uint Result = 0;
+ for(std::size_t i = 0, n = Results.size(); i < n; ++i)
+ Result += Results[i];
+
+ return Result > 0 ? 0 : 1;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER T nextMultipleNeg(T Source, T Multiple)
+ {
+ if(Source > static_cast<T>(0))
+ {
+ T const Tmp = Source - static_cast<T>(1);
+ return Tmp + (Multiple - (Tmp % Multiple));
+ }
+ else
+ return Source + (-Source % Multiple);
+ }
+
+ int perf_nextMultipleNeg(glm::uint Samples)
+ {
+ std::vector<glm::uint> Results(Samples * Multiples);
+
+ std::chrono::high_resolution_clock::time_point t0 = std::chrono::high_resolution_clock::now();
+
+ for(glm::uint Source = 0; Source < Samples; ++Source)
+ for(glm::uint Multiple = 0; Multiple < Multiples; ++Multiple)
+ {
+ Results[Source * Multiples + Multiple] = nextMultipleNeg(Source, Multiples);
+ }
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+
+ std::printf("- nextMultipleNeg Time %d microseconds\n", static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count()));
+
+ glm::uint Result = 0;
+ for (std::size_t i = 0, n = Results.size(); i < n; ++i)
+ Result += Results[i];
+
+ return Result > 0 ? 0 : 1;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER T nextMultipleUFloat(T Source, T Multiple)
+ {
+ return Source + (Multiple - std::fmod(Source, Multiple));
+ }
+
+ int perf_nextMultipleUFloat(glm::uint Samples)
+ {
+ std::vector<float> Results(Samples * Multiples);
+
+ std::chrono::high_resolution_clock::time_point t0 = std::chrono::high_resolution_clock::now();
+
+ for(glm::uint Source = 0; Source < Samples; ++Source)
+ for(glm::uint Multiple = 0; Multiple < Multiples; ++Multiple)
+ {
+ Results[Source * Multiples + Multiple] = nextMultipleUFloat(static_cast<float>(Source), static_cast<float>(Multiples));
+ }
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+
+ std::printf("- nextMultipleUFloat Time %d microseconds\n", static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count()));
+
+ float Result = 0;
+ for (std::size_t i = 0, n = Results.size(); i < n; ++i)
+ Result += Results[i];
+
+ return Result > 0.0f ? 0 : 1;
+ }
+
+ template <typename T>
+ GLM_FUNC_QUALIFIER T nextMultipleFloat(T Source, T Multiple)
+ {
+ if(Source > static_cast<float>(0))
+ return Source + (Multiple - std::fmod(Source, Multiple));
+ else
+ return Source + std::fmod(-Source, Multiple);
+ }
+
+ int perf_nextMultipleFloat(glm::uint Samples)
+ {
+ std::vector<float> Results(Samples * Multiples);
+
+ std::chrono::high_resolution_clock::time_point t0 = std::chrono::high_resolution_clock::now();
+
+ for(glm::uint Source = 0; Source < Samples; ++Source)
+ for(glm::uint Multiple = 0; Multiple < Multiples; ++Multiple)
+ {
+ Results[Source * Multiples + Multiple] = nextMultipleFloat(static_cast<float>(Source), static_cast<float>(Multiples));
+ }
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+
+ std::printf("- nextMultipleFloat Time %d microseconds\n", static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count()));
+
+ float Result = 0;
+ for (std::size_t i = 0, n = Results.size(); i < n; ++i)
+ Result += Results[i];
+
+ return Result > 0.0f ? 0 : 1;
+ }
+
+ template<typename genIUType>
+ struct type
+ {
+ genIUType Source;
+ genIUType Multiple;
+ genIUType Return;
+ };
+
+ template <typename T>
+ int test_uint()
+ {
+ type<T> const Data[] =
+ {
+ { 3, 4, 4 },
+ { 6, 3, 6 },
+ { 5, 3, 6 },
+ { 7, 7, 7 },
+ { 0, 1, 0 },
+ { 8, 3, 9 }
+ };
+
+ int Error = 0;
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<T>); i < n; ++i)
+ {
+ T const Result0 = glm::nextMultiple(Data[i].Source, Data[i].Multiple);
+ Error += Data[i].Return == Result0 ? 0 : 1;
+ assert(!Error);
+
+ T const Result1 = nextMultipleMod(Data[i].Source, Data[i].Multiple);
+ Error += Data[i].Return == Result1 ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+ }
+
+ int perf()
+ {
+ int Error = 0;
+
+ glm::uint const Samples = 10000;
+
+ for(int i = 0; i < 4; ++i)
+ {
+ std::printf("Run %d :\n", i);
+ Error += perf_nextMultiple(Samples);
+ Error += perf_nextMultipleMod(Samples);
+ Error += perf_nextMultipleNeg(Samples);
+ Error += perf_nextMultipleUFloat(Samples);
+ Error += perf_nextMultipleFloat(Samples);
+ std::printf("\n");
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += test_uint<glm::int8>();
+ Error += test_uint<glm::int16>();
+ Error += test_uint<glm::int32>();
+ Error += test_uint<glm::int64>();
+
+ Error += test_uint<glm::uint8>();
+ Error += test_uint<glm::uint16>();
+ Error += test_uint<glm::uint32>();
+ Error += test_uint<glm::uint64>();
+
+ return Error;
+ }
+}//namespace nextMultiple
+
+namespace findNSB
+{
+ template<typename T>
+ struct type
+ {
+ T Source;
+ int SignificantBitCount;
+ int Return;
+ };
+
+ template <typename T>
+ int run()
+ {
+ type<T> const Data[] =
+ {
+ { 0x00, 1,-1 },
+ { 0x01, 2,-1 },
+ { 0x02, 2,-1 },
+ { 0x06, 3,-1 },
+ { 0x01, 1, 0 },
+ { 0x03, 1, 0 },
+ { 0x03, 2, 1 },
+ { 0x07, 2, 1 },
+ { 0x05, 2, 2 },
+ { 0x0D, 2, 2 }
+ };
+
+ int Error = 0;
+
+ for (std::size_t i = 0, n = sizeof(Data) / sizeof(type<T>); i < n; ++i)
+ {
+ int const Result0 = glm::findNSB(Data[i].Source, Data[i].SignificantBitCount);
+ Error += Data[i].Return == Result0 ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<glm::uint8>();
+ Error += run<glm::uint16>();
+ Error += run<glm::uint32>();
+ Error += run<glm::uint64>();
+
+ Error += run<glm::int8>();
+ Error += run<glm::int16>();
+ Error += run<glm::int32>();
+ Error += run<glm::int64>();
+
+ return Error;
+ }
+}//namespace findNSB
+
+int main()
+{
+ int Error = 0;
+
+ Error += findNSB::test();
+
+ Error += isPowerOfTwo::test();
+ Error += prevPowerOfTwo::test();
+ Error += nextPowerOfTwo::test();
+ Error += nextPowerOfTwo_advanced::test();
+ Error += prevMultiple::test();
+ Error += nextMultiple::test();
+
+# ifdef NDEBUG
+ Error += nextPowerOfTwo_advanced::perf();
+ Error += nextMultiple::perf();
+# endif//NDEBUG
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_packing.cpp b/3rdparty/glm/source/test/ext/ext_scalar_packing.cpp
new file mode 100644
index 0000000..77616e3
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_packing.cpp
@@ -0,0 +1,28 @@
+#include <glm/ext/scalar_packing.hpp>
+#include <glm/ext/scalar_relational.hpp>
+
+int test_packUnorm()
+{
+ int Error = 0;
+
+
+ return Error;
+}
+
+int test_packSnorm()
+{
+ int Error = 0;
+
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_packUnorm();
+ Error += test_packSnorm();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_reciprocal.cpp b/3rdparty/glm/source/test/ext/ext_scalar_reciprocal.cpp
new file mode 100644
index 0000000..ebba10d
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_reciprocal.cpp
@@ -0,0 +1,171 @@
+#include <glm/ext/scalar_reciprocal.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/scalar_constants.hpp>
+
+static int test_sec()
+{
+ int Error = 0;
+
+ Error += glm::equal(glm::sec(0.0), 1.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::sec(glm::pi<double>() * 2.0), 1.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::sec(glm::pi<double>() * -2.0), 1.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::sec(glm::pi<double>() * 1.0), -1.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::sec(glm::pi<double>() * -1.0), -1.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_csc()
+{
+ int Error = 0;
+
+ double const a = glm::csc(glm::pi<double>() * 0.5);
+ Error += glm::equal(a, 1.0, 0.01) ? 0 : 1;
+ double const b = glm::csc(glm::pi<double>() * -0.5);
+ Error += glm::equal(b, -1.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_cot()
+{
+ int Error = 0;
+
+ double const a = glm::cot(glm::pi<double>() * 0.5);
+ Error += glm::equal(a, 0.0, 0.01) ? 0 : 1;
+ double const b = glm::cot(glm::pi<double>() * -0.5);
+ Error += glm::equal(b, 0.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_asec()
+{
+ int Error = 0;
+
+ Error += glm::equal(glm::asec(100000.0), glm::pi<double>() * 0.5, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::asec(-100000.0), glm::pi<double>() * 0.5, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_acsc()
+{
+ int Error = 0;
+
+ Error += glm::equal(glm::acsc(100000.0), 0.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::acsc(-100000.0), 0.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_acot()
+{
+ int Error = 0;
+
+ Error += glm::equal(glm::acot(100000.0), 0.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::acot(-100000.0), glm::pi<double>(), 0.01) ? 0 : 1;
+ Error += glm::equal(glm::acot(0.0), glm::pi<double>() * 0.5, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_sech()
+{
+ int Error = 0;
+
+ Error += glm::equal(glm::sech(100000.0), 0.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::sech(-100000.0), 0.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::sech(0.0), 1.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_csch()
+{
+ int Error = 0;
+
+ Error += glm::equal(glm::csch(100000.0), 0.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::csch(-100000.0), 0.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_coth()
+{
+ int Error = 0;
+
+ double const a = glm::coth(100.0);
+ Error += glm::equal(a, 1.0, 0.01) ? 0 : 1;
+
+ double const b = glm::coth(-100.0);
+ Error += glm::equal(b, -1.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_asech()
+{
+ int Error = 0;
+
+ double const a = glm::asech(1.0);
+ Error += glm::equal(a, 0.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_acsch()
+{
+ int Error = 0;
+
+ Error += glm::acsch(0.01) > 1.0 ? 0 : 1;
+ Error += glm::acsch(-0.01) < -1.0 ? 0 : 1;
+
+ Error += glm::equal(glm::acsch(100.0), 0.0, 0.01) ? 0 : 1;
+ Error += glm::equal(glm::acsch(-100.0), 0.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_acoth()
+{
+ int Error = 0;
+
+ double const a = glm::acoth(1.00001);
+ Error += a > 6.0 ? 0 : 1;
+
+ double const b = glm::acoth(-1.00001);
+ Error += b < -6.0 ? 0 : 1;
+
+ double const c = glm::acoth(10000.0);
+ Error += glm::equal(c, 0.0, 0.01) ? 0 : 1;
+
+ double const d = glm::acoth(-10000.0);
+ Error += glm::equal(d, 0.0, 0.01) ? 0 : 1;
+
+ return Error;
+}
+
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_sec();
+ Error += test_csc();
+ Error += test_cot();
+
+ Error += test_asec();
+ Error += test_acsc();
+ Error += test_acot();
+
+ Error += test_sech();
+ Error += test_csch();
+ Error += test_coth();
+
+ Error += test_asech();
+ Error += test_acsch();
+ Error += test_acoth();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_relational.cpp b/3rdparty/glm/source/test/ext/ext_scalar_relational.cpp
new file mode 100644
index 0000000..61f1999
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_relational.cpp
@@ -0,0 +1,106 @@
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/scalar_integer.hpp>
+#include <glm/ext/scalar_ulp.hpp>
+#include <cmath>
+
+static int test_equal_epsilon()
+{
+# if GLM_CONFIG_CONSTEXP == GLM_ENABLE
+ static_assert(glm::equal(1.01f, 1.02f, 0.1f), "GLM: Failed constexpr");
+ static_assert(!glm::equal(1.01f, 1.02f, 0.001f), "GLM: Failed constexpr");
+# endif
+
+ int Error = 0;
+
+ Error += glm::equal(1.01f, 1.02f, 0.1f) ? 0 : 1;
+ Error += !glm::equal(1.01f, 1.02f, 0.001f) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_notEqual_epsilon()
+{
+# if GLM_CONFIG_CONSTEXP == GLM_ENABLE
+ static_assert(glm::notEqual(1.01f, 1.02f, 0.001f), "GLM: Failed constexpr");
+ static_assert(!glm::notEqual(1.01f, 1.02f, 0.1f), "GLM: Failed constexpr");
+# endif
+
+ int Error = 0;
+
+ Error += glm::notEqual(1.01f, 1.02f, 0.001f) ? 0 : 1;
+ Error += !glm::notEqual(1.01f, 1.02f, 0.1f) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_equal_ulps()
+{
+ int Error = 0;
+
+ float const ULP1Plus = glm::nextFloat(1.0f);
+ Error += glm::equal(1.0f, ULP1Plus, 1) ? 0 : 1;
+
+ float const ULP2Plus = glm::nextFloat(ULP1Plus);
+ Error += !glm::equal(1.0f, ULP2Plus, 1) ? 0 : 1;
+
+ float const ULP1Minus = glm::prevFloat(1.0f);
+ Error += glm::equal(1.0f, ULP1Minus, 1) ? 0 : 1;
+
+ float const ULP2Minus = glm::prevFloat(ULP1Minus);
+ Error += !glm::equal(1.0f, ULP2Minus, 1) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_notEqual_ulps()
+{
+ int Error = 0;
+
+ float const ULP1Plus = glm::nextFloat(1.0f);
+ Error += !glm::notEqual(1.0f, ULP1Plus, 1) ? 0 : 1;
+
+ float const ULP2Plus = glm::nextFloat(ULP1Plus);
+ Error += glm::notEqual(1.0f, ULP2Plus, 1) ? 0 : 1;
+
+ float const ULP1Minus = glm::prevFloat(1.0f);
+ Error += !glm::notEqual(1.0f, ULP1Minus, 1) ? 0 : 1;
+
+ float const ULP2Minus = glm::prevFloat(ULP1Minus);
+ Error += glm::notEqual(1.0f, ULP2Minus, 1) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_equal_sign()
+{
+ int Error = 0;
+
+ Error += !glm::equal(-0.0f, 0.0f, 2) ? 0 : 1;
+ Error += !glm::equal(-0.0, 0.0, 2) ? 0 : 1;
+
+ Error += !glm::equal(-1.0f, 2.0f, 2) ? 0 : 1;
+ Error += !glm::equal(-1.0, 2.0, 2) ? 0 : 1;
+
+ Error += !glm::equal(-0.00001f, 1.00000f, 2) ? 0 : 1;
+ Error += !glm::equal(-0.00001, 1.00000, 2) ? 0 : 1;
+
+ Error += !glm::equal(-1.0f, 1.0f, 2) ? 0 : 1;
+ Error += !glm::equal(-1.0, 1.0, 2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_equal_epsilon();
+ Error += test_notEqual_epsilon();
+
+ Error += test_equal_ulps();
+ Error += test_notEqual_ulps();
+
+ Error += test_equal_sign();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_uint_sized.cpp b/3rdparty/glm/source/test/ext/ext_scalar_uint_sized.cpp
new file mode 100644
index 0000000..1122947
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_uint_sized.cpp
@@ -0,0 +1,43 @@
+#include <glm/ext/scalar_uint_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+ static_assert(sizeof(glm::uint8) == 1, "uint8 size isn't 1 byte on this platform");
+ static_assert(sizeof(glm::uint16) == 2, "uint16 size isn't 2 bytes on this platform");
+ static_assert(sizeof(glm::uint32) == 4, "uint32 size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::uint64) == 8, "uint64 size isn't 8 bytes on this platform");
+ static_assert(sizeof(glm::uint16) == sizeof(unsigned short), "unsigned short size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::uint32) == sizeof(unsigned int), "unsigned int size isn't 4 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::uint8) == 1 ? 0 : 1;
+ Error += sizeof(glm::uint16) == 2 ? 0 : 1;
+ Error += sizeof(glm::uint32) == 4 ? 0 : 1;
+ Error += sizeof(glm::uint64) == 8 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::uint8) < sizeof(glm::uint16) ? 0 : 1;
+ Error += sizeof(glm::uint16) < sizeof(glm::uint32) ? 0 : 1;
+ Error += sizeof(glm::uint32) < sizeof(glm::uint64) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_scalar_ulp.cpp b/3rdparty/glm/source/test/ext/ext_scalar_ulp.cpp
new file mode 100644
index 0000000..a19b774
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_scalar_ulp.cpp
@@ -0,0 +1,96 @@
+#include <glm/ext/scalar_ulp.hpp>
+#include <glm/ext/scalar_relational.hpp>
+
+static int test_ulp_float_dist()
+{
+ int Error = 0;
+
+ float A = 1.0f;
+
+ float B = glm::nextFloat(A);
+ Error += glm::notEqual(A, B, 0) ? 0 : 1;
+ float C = glm::prevFloat(B);
+ Error += glm::equal(A, C, 0) ? 0 : 1;
+
+ int D = glm::floatDistance(A, B);
+ Error += D == 1 ? 0 : 1;
+ int E = glm::floatDistance(A, C);
+ Error += E == 0 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_ulp_float_step()
+{
+ int Error = 0;
+
+ float A = 1.0f;
+
+ for(int i = 10; i < 1000; i *= 10)
+ {
+ float B = glm::nextFloat(A, i);
+ Error += glm::notEqual(A, B, 0) ? 0 : 1;
+ float C = glm::prevFloat(B, i);
+ Error += glm::equal(A, C, 0) ? 0 : 1;
+
+ int D = glm::floatDistance(A, B);
+ Error += D == i ? 0 : 1;
+ int E = glm::floatDistance(A, C);
+ Error += E == 0 ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_ulp_double_dist()
+{
+ int Error = 0;
+
+ double A = 1.0;
+
+ double B = glm::nextFloat(A);
+ Error += glm::notEqual(A, B, 0) ? 0 : 1;
+ double C = glm::prevFloat(B);
+ Error += glm::equal(A, C, 0) ? 0 : 1;
+
+ glm::int64 const D = glm::floatDistance(A, B);
+ Error += D == 1 ? 0 : 1;
+ glm::int64 const E = glm::floatDistance(A, C);
+ Error += E == 0 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_ulp_double_step()
+{
+ int Error = 0;
+
+ double A = 1.0;
+
+ for(int i = 10; i < 1000; i *= 10)
+ {
+ double B = glm::nextFloat(A, i);
+ Error += glm::notEqual(A, B, 0) ? 0 : 1;
+ double C = glm::prevFloat(B, i);
+ Error += glm::equal(A, C, 0) ? 0 : 1;
+
+ glm::int64 const D = glm::floatDistance(A, B);
+ Error += D == i ? 0 : 1;
+ glm::int64 const E = glm::floatDistance(A, C);
+ Error += E == 0 ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_ulp_float_dist();
+ Error += test_ulp_float_step();
+ Error += test_ulp_double_dist();
+ Error += test_ulp_double_step();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vec1.cpp b/3rdparty/glm/source/test/ext/ext_vec1.cpp
new file mode 100644
index 0000000..fc0b931
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vec1.cpp
@@ -0,0 +1,157 @@
+#define GLM_FORCE_SWIZZLE
+#include <glm/vector_relational.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <vector>
+
+static glm::vec1 g1;
+static glm::vec1 g2(1);
+
+static int test_vec1_operators()
+{
+ int Error(0);
+
+ glm::ivec1 A(1);
+ glm::ivec1 B(1);
+ {
+ bool R = A != B;
+ bool S = A == B;
+
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ {
+ A *= 1;
+ B *= 1;
+ A += 1;
+ B += 1;
+
+ bool R = A != B;
+ bool S = A == B;
+
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_vec1_ctor()
+{
+ int Error = 0;
+
+# if GLM_HAS_TRIVIAL_QUERIES
+ // Error += std::is_trivially_default_constructible<glm::vec1>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::vec1>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::vec1>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::dvec1>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::ivec1>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::uvec1>::value ? 0 : 1;
+
+ Error += std::is_copy_constructible<glm::vec1>::value ? 0 : 1;
+# endif
+
+
+ {
+ glm::ivec1 A = glm::vec1(2.0f);
+
+ glm::ivec1 E(glm::dvec1(2.0));
+ Error += A == E ? 0 : 1;
+
+ glm::ivec1 F(glm::ivec1(2));
+ Error += A == F ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_vec1_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::vec1) == sizeof(glm::mediump_vec1) ? 0 : 1;
+ Error += 4 == sizeof(glm::mediump_vec1) ? 0 : 1;
+ Error += sizeof(glm::dvec1) == sizeof(glm::highp_dvec1) ? 0 : 1;
+ Error += 8 == sizeof(glm::highp_dvec1) ? 0 : 1;
+ Error += glm::vec1().length() == 1 ? 0 : 1;
+ Error += glm::dvec1().length() == 1 ? 0 : 1;
+ Error += glm::vec1::length() == 1 ? 0 : 1;
+ Error += glm::dvec1::length() == 1 ? 0 : 1;
+
+ GLM_CONSTEXPR std::size_t Length = glm::vec1::length();
+ Error += Length == 1 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_vec1_operator_increment()
+{
+ int Error(0);
+
+ glm::ivec1 v0(1);
+ glm::ivec1 v1(v0);
+ glm::ivec1 v2(v0);
+ glm::ivec1 v3 = ++v1;
+ glm::ivec1 v4 = v2++;
+
+ Error += glm::all(glm::equal(v0, v4)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v2)) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, v3)) ? 0 : 1;
+
+ int i0(1);
+ int i1(i0);
+ int i2(i0);
+ int i3 = ++i1;
+ int i4 = i2++;
+
+ Error += i0 == i4 ? 0 : 1;
+ Error += i1 == i2 ? 0 : 1;
+ Error += i1 == i3 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_bvec1_ctor()
+{
+ int Error = 0;
+
+ glm::bvec1 const A(true);
+ glm::bvec1 const B(true);
+ glm::bvec1 const C(false);
+ glm::bvec1 const D = A && B;
+ glm::bvec1 const E = A && C;
+ glm::bvec1 const F = A || C;
+
+ Error += D == glm::bvec1(true) ? 0 : 1;
+ Error += E == glm::bvec1(false) ? 0 : 1;
+ Error += F == glm::bvec1(true) ? 0 : 1;
+
+ bool const G = A == C;
+ bool const H = A != C;
+ Error += !G ? 0 : 1;
+ Error += H ? 0 : 1;
+
+ return Error;
+}
+
+static int test_constexpr()
+{
+#if GLM_HAS_CONSTEXPR
+ static_assert(glm::vec1::length() == 1, "GLM: Failed constexpr");
+ static_assert(glm::vec1(1.0f).x > 0.0f, "GLM: Failed constexpr");
+#endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_vec1_size();
+ Error += test_vec1_ctor();
+ Error += test_bvec1_ctor();
+ Error += test_vec1_operators();
+ Error += test_vec1_operator_increment();
+ Error += test_constexpr();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_bool1.cpp b/3rdparty/glm/source/test/ext/ext_vector_bool1.cpp
new file mode 100644
index 0000000..43eed57
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_bool1.cpp
@@ -0,0 +1,104 @@
+#include <glm/ext/vector_bool1.hpp>
+#include <glm/ext/vector_bool1_precision.hpp>
+
+template <typename genType>
+static int test_operators()
+{
+ int Error = 0;
+
+ genType const A(true);
+ genType const B(true);
+ {
+ bool const R = A != B;
+ bool const S = A == B;
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename genType>
+static int test_ctor()
+{
+ int Error = 0;
+
+ glm::bvec1 const A = genType(true);
+
+ glm::bvec1 const E(genType(true));
+ Error += A == E ? 0 : 1;
+
+ glm::bvec1 const F(E);
+ Error += A == F ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::bvec1) == sizeof(genType) ? 0 : 1;
+ Error += genType().length() == 1 ? 0 : 1;
+ Error += genType::length() == 1 ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_relational()
+{
+ int Error = 0;
+
+ genType const A(true);
+ genType const B(true);
+ genType const C(false);
+
+ Error += A == B ? 0 : 1;
+ Error += (A && B) == A ? 0 : 1;
+ Error += (A || C) == A ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_constexpr()
+{
+# if GLM_HAS_CONSTEXPR
+ static_assert(genType::length() == 1, "GLM: Failed constexpr");
+# endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_operators<glm::bvec1>();
+ Error += test_operators<glm::lowp_bvec1>();
+ Error += test_operators<glm::mediump_bvec1>();
+ Error += test_operators<glm::highp_bvec1>();
+
+ Error += test_ctor<glm::bvec1>();
+ Error += test_ctor<glm::lowp_bvec1>();
+ Error += test_ctor<glm::mediump_bvec1>();
+ Error += test_ctor<glm::highp_bvec1>();
+
+ Error += test_size<glm::bvec1>();
+ Error += test_size<glm::lowp_bvec1>();
+ Error += test_size<glm::mediump_bvec1>();
+ Error += test_size<glm::highp_bvec1>();
+
+ Error += test_relational<glm::bvec1>();
+ Error += test_relational<glm::lowp_bvec1>();
+ Error += test_relational<glm::mediump_bvec1>();
+ Error += test_relational<glm::highp_bvec1>();
+
+ Error += test_constexpr<glm::bvec1>();
+ Error += test_constexpr<glm::lowp_bvec1>();
+ Error += test_constexpr<glm::mediump_bvec1>();
+ Error += test_constexpr<glm::highp_bvec1>();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_common.cpp b/3rdparty/glm/source/test/ext/ext_vector_common.cpp
new file mode 100644
index 0000000..211003f
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_common.cpp
@@ -0,0 +1,365 @@
+#include <glm/ext/vector_common.hpp>
+
+#include <glm/ext/vector_bool1.hpp>
+#include <glm/ext/vector_bool1_precision.hpp>
+#include <glm/ext/vector_bool2.hpp>
+#include <glm/ext/vector_bool2_precision.hpp>
+#include <glm/ext/vector_bool3.hpp>
+#include <glm/ext/vector_bool3_precision.hpp>
+#include <glm/ext/vector_bool4.hpp>
+#include <glm/ext/vector_bool4_precision.hpp>
+
+#include <glm/ext/vector_float1.hpp>
+#include <glm/ext/vector_float1_precision.hpp>
+#include <glm/ext/vector_float2.hpp>
+#include <glm/ext/vector_float2_precision.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float3_precision.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/vector_float4_precision.hpp>
+#include <glm/ext/vector_double1.hpp>
+#include <glm/ext/vector_double1_precision.hpp>
+#include <glm/ext/vector_double2.hpp>
+#include <glm/ext/vector_double2_precision.hpp>
+#include <glm/ext/vector_double3.hpp>
+#include <glm/ext/vector_double3_precision.hpp>
+#include <glm/ext/vector_double4.hpp>
+#include <glm/ext/vector_double4_precision.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/scalar_constants.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/common.hpp>
+
+#if ((GLM_LANG & GLM_LANG_CXX11_FLAG) || (GLM_COMPILER & GLM_COMPILER_VC))
+# define GLM_NAN(T) NAN
+#else
+# define GLM_NAN(T) (static_cast<T>(0.0f) / static_cast<T>(0.0f))
+#endif
+
+template <typename vecType>
+static int test_min()
+{
+ typedef typename vecType::value_type T;
+
+ int Error = 0;
+
+ vecType const N(static_cast<T>(0));
+ vecType const B(static_cast<T>(1));
+
+ Error += glm::all(glm::equal(glm::min(N, B), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(B, N), N, glm::epsilon<T>())) ? 0 : 1;
+
+ vecType const C(static_cast<T>(2));
+ Error += glm::all(glm::equal(glm::min(N, B, C), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(B, N, C), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(C, N, B), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(C, B, N), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(B, C, N), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(N, C, B), N, glm::epsilon<T>())) ? 0 : 1;
+
+ vecType const D(static_cast<T>(3));
+ Error += glm::all(glm::equal(glm::min(D, N, B, C), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(B, D, N, C), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(C, N, D, B), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(C, B, D, N), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(B, C, N, D), N, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::min(N, C, B, D), N, glm::epsilon<T>())) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename vecType>
+static int test_min_nan()
+{
+ typedef typename vecType::value_type T;
+
+ int Error = 0;
+
+ vecType const B(static_cast<T>(1));
+ vecType const N(GLM_NAN(T));
+
+ Error += glm::all(glm::isnan(glm::min(N, B))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(B, N))) ? 0 : 1;
+
+ vecType const C(static_cast<T>(2));
+ Error += glm::all(glm::isnan(glm::min(N, B, C))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(B, N, C))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(C, N, B))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(C, B, N))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(B, C, N))) ? 0 : 1;
+ Error += glm::all(glm::isnan(glm::min(N, C, B))) ? 0 : 1;
+
+ vecType const D(static_cast<T>(3));
+ Error += !glm::all(glm::isnan(glm::min(D, N, B, C))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(B, D, N, C))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(C, N, D, B))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(C, B, D, N))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::min(B, C, N, D))) ? 0 : 1;
+ Error += glm::all(glm::isnan(glm::min(N, C, B, D))) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename vecType>
+static int test_max()
+{
+ typedef typename vecType::value_type T;
+
+ int Error = 0;
+
+ vecType const N(static_cast<T>(0));
+ vecType const B(static_cast<T>(1));
+ Error += glm::all(glm::equal(glm::max(N, B), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(B, N), B, glm::epsilon<T>())) ? 0 : 1;
+
+ vecType const C(static_cast<T>(2));
+ Error += glm::all(glm::equal(glm::max(N, B, C), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(B, N, C), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(C, N, B), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(C, B, N), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(B, C, N), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(N, C, B), C, glm::epsilon<T>())) ? 0 : 1;
+
+ vecType const D(static_cast<T>(3));
+ Error += glm::all(glm::equal(glm::max(D, N, B, C), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(B, D, N, C), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(C, N, D, B), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(C, B, D, N), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(B, C, N, D), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::max(N, C, B, D), D, glm::epsilon<T>())) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename vecType>
+static int test_max_nan()
+{
+ typedef typename vecType::value_type T;
+
+ int Error = 0;
+
+ vecType const B(static_cast<T>(1));
+ vecType const N(GLM_NAN(T));
+
+ Error += glm::all(glm::isnan(glm::max(N, B))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(B, N))) ? 0 : 1;
+
+ vecType const C(static_cast<T>(2));
+ Error += glm::all(glm::isnan(glm::max(N, B, C))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(B, N, C))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(C, N, B))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(C, B, N))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(B, C, N))) ? 0 : 1;
+ Error += glm::all(glm::isnan(glm::max(N, C, B))) ? 0 : 1;
+
+ vecType const D(static_cast<T>(3));
+ Error += !glm::all(glm::isnan(glm::max(D, N, B, C))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(B, D, N, C))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(C, N, D, B))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(C, B, D, N))) ? 0 : 1;
+ Error += !glm::all(glm::isnan(glm::max(B, C, N, D))) ? 0 : 1;
+ Error += glm::all(glm::isnan(glm::max(N, C, B, D))) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename vecType>
+static int test_fmin()
+{
+ typedef typename vecType::value_type T;
+
+ int Error = 0;
+
+ vecType const B(static_cast<T>(1));
+ vecType const N(GLM_NAN(T));
+
+ Error += glm::all(glm::equal(glm::fmin(N, B), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(B, N), B, glm::epsilon<T>())) ? 0 : 1;
+
+ vecType const C(static_cast<T>(2));
+ Error += glm::all(glm::equal(glm::fmin(N, B, C), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(B, N, C), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(C, N, B), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(C, B, N), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(B, C, N), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(N, C, B), B, glm::epsilon<T>())) ? 0 : 1;
+
+ vecType const D(static_cast<T>(3));
+ Error += glm::all(glm::equal(glm::fmin(D, N, B, C), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(B, D, N, C), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(C, N, D, B), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(C, B, D, N), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(B, C, N, D), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmin(N, C, B, D), B, glm::epsilon<T>())) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename vecType>
+static int test_fmax()
+{
+ typedef typename vecType::value_type T;
+
+ int Error = 0;
+
+ vecType const B(static_cast<T>(1));
+ vecType const N(GLM_NAN(T));
+
+ Error += glm::all(glm::equal(glm::fmax(N, B), B, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(B, N), B, glm::epsilon<T>())) ? 0 : 1;
+
+ vecType const C(static_cast<T>(2));
+ Error += glm::all(glm::equal(glm::fmax(N, B, C), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(B, N, C), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(C, N, B), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(C, B, N), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(B, C, N), C, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(N, C, B), C, glm::epsilon<T>())) ? 0 : 1;
+
+ vecType const D(static_cast<T>(3));
+ Error += glm::all(glm::equal(glm::fmax(D, N, B, C), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(B, D, N, C), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(C, N, D, B), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(C, B, D, N), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(B, C, N, D), D, glm::epsilon<T>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::fmax(N, C, B, D), D, glm::epsilon<T>())) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_clamp()
+{
+ int Error = 0;
+
+ glm::vec2 K = glm::clamp(glm::vec2(0.5f));
+ Error += glm::all(glm::equal(K, glm::vec2(0.5f), glm::vec2(0.00001f))) ? 0 : 1;
+
+ glm::vec3 L = glm::clamp(glm::vec3(0.5f));
+ Error += glm::all(glm::equal(L, glm::vec3(0.5f), glm::vec3(0.00001f))) ? 0 : 1;
+
+ glm::vec4 M = glm::clamp(glm::vec4(0.5f));
+ Error += glm::all(glm::equal(M, glm::vec4(0.5f), glm::vec4(0.00001f))) ? 0 : 1;
+
+ glm::vec1 N = glm::clamp(glm::vec1(0.5f));
+ Error += glm::all(glm::equal(N, glm::vec1(0.5f), glm::vec1(0.00001f))) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_repeat()
+{
+ int Error = 0;
+
+ glm::vec2 K = glm::repeat(glm::vec2(0.5f));
+ Error += glm::all(glm::equal(K, glm::vec2(0.5f), glm::vec2(0.00001f))) ? 0 : 1;
+
+ glm::vec3 L = glm::repeat(glm::vec3(0.5f));
+ Error += glm::all(glm::equal(L, glm::vec3(0.5f), glm::vec3(0.00001f))) ? 0 : 1;
+
+ glm::vec4 M = glm::repeat(glm::vec4(0.5f));
+ Error += glm::all(glm::equal(M, glm::vec4(0.5f), glm::vec4(0.00001f))) ? 0 : 1;
+
+ glm::vec1 N = glm::repeat(glm::vec1(0.5f));
+ Error += glm::all(glm::equal(N, glm::vec1(0.5f), glm::vec1(0.00001f))) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_mirrorClamp()
+{
+ int Error = 0;
+
+ glm::vec2 K = glm::mirrorClamp(glm::vec2(0.5f));
+ Error += glm::all(glm::equal(K, glm::vec2(0.5f), glm::vec2(0.00001f))) ? 0 : 1;
+
+ glm::vec3 L = glm::mirrorClamp(glm::vec3(0.5f));
+ Error += glm::all(glm::equal(L, glm::vec3(0.5f), glm::vec3(0.00001f))) ? 0 : 1;
+
+ glm::vec4 M = glm::mirrorClamp(glm::vec4(0.5f));
+ Error += glm::all(glm::equal(M, glm::vec4(0.5f), glm::vec4(0.00001f))) ? 0 : 1;
+
+ glm::vec1 N = glm::mirrorClamp(glm::vec1(0.5f));
+ Error += glm::all(glm::equal(N, glm::vec1(0.5f), glm::vec1(0.00001f))) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_mirrorRepeat()
+{
+ int Error = 0;
+
+ glm::vec2 K = glm::mirrorRepeat(glm::vec2(0.5f));
+ Error += glm::all(glm::equal(K, glm::vec2(0.5f), glm::vec2(0.00001f))) ? 0 : 1;
+
+ glm::vec3 L = glm::mirrorRepeat(glm::vec3(0.5f));
+ Error += glm::all(glm::equal(L, glm::vec3(0.5f), glm::vec3(0.00001f))) ? 0 : 1;
+
+ glm::vec4 M = glm::mirrorRepeat(glm::vec4(0.5f));
+ Error += glm::all(glm::equal(M, glm::vec4(0.5f), glm::vec4(0.00001f))) ? 0 : 1;
+
+ glm::vec1 N = glm::mirrorRepeat(glm::vec1(0.5f));
+ Error += glm::all(glm::equal(N, glm::vec1(0.5f), glm::vec1(0.00001f))) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_iround()
+{
+ int Error = 0;
+
+ for(float f = 0.0f; f < 3.1f; f += 0.05f)
+ {
+ int RoundFast = static_cast<int>(glm::iround(f));
+ int RoundSTD = static_cast<int>(glm::round(f));
+ Error += RoundFast == RoundSTD ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+static int test_uround()
+{
+ int Error = 0;
+
+ for(float f = 0.0f; f < 3.1f; f += 0.05f)
+ {
+ int RoundFast = static_cast<int>(glm::uround(f));
+ int RoundSTD = static_cast<int>(glm::round(f));
+ Error += RoundFast == RoundSTD ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_min<glm::vec3>();
+ Error += test_min<glm::vec2>();
+ Error += test_min_nan<glm::vec3>();
+ Error += test_min_nan<glm::vec2>();
+
+ Error += test_max<glm::vec3>();
+ Error += test_max<glm::vec2>();
+ Error += test_max_nan<glm::vec3>();
+ Error += test_max_nan<glm::vec2>();
+
+ Error += test_fmin<glm::vec3>();
+ Error += test_fmin<glm::vec2>();
+
+ Error += test_fmax<glm::vec3>();
+ Error += test_fmax<glm::vec2>();
+
+ Error += test_clamp();
+ Error += test_repeat();
+ Error += test_mirrorClamp();
+ Error += test_mirrorRepeat();
+
+ Error += test_iround();
+ Error += test_uround();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_iec559.cpp b/3rdparty/glm/source/test/ext/ext_vector_iec559.cpp
new file mode 100644
index 0000000..5a9da50
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_iec559.cpp
@@ -0,0 +1,166 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_double1.hpp>
+#include <glm/ext/vector_double1_precision.hpp>
+#include <glm/ext/vector_double2.hpp>
+#include <glm/ext/vector_double3.hpp>
+#include <glm/ext/vector_double4.hpp>
+#include <glm/ext/vector_float1.hpp>
+#include <glm/ext/vector_float1_precision.hpp>
+#include <glm/ext/vector_float2.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float4.hpp>
+
+template <typename genType>
+static int test_operators()
+{
+ typedef typename genType::value_type valType;
+
+ int Error = 0;
+
+ {
+ genType const A(1);
+ genType const B(1);
+
+ genType const C = A + B;
+ Error += glm::all(glm::equal(C, genType(2), glm::epsilon<valType>())) ? 0 : 1;
+
+ genType const D = A - B;
+ Error += glm::all(glm::equal(D, genType(0), glm::epsilon<valType>())) ? 0 : 1;
+
+ genType const E = A * B;
+ Error += glm::all(glm::equal(E, genType(1), glm::epsilon<valType>())) ? 0 : 1;
+
+ genType const F = A / B;
+ Error += glm::all(glm::equal(F, genType(1), glm::epsilon<valType>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename genType>
+static int test_ctor()
+{
+ typedef typename genType::value_type T;
+
+ int Error = 0;
+
+ glm::vec<1, T> const A = genType(1);
+
+ glm::vec<1, T> const E(genType(1));
+ Error += glm::all(glm::equal(A, E, glm::epsilon<T>())) ? 0 : 1;
+
+ glm::vec<1, T> const F(E);
+ Error += glm::all(glm::equal(A, F, glm::epsilon<T>())) ? 0 : 1;
+
+ genType const B = genType(1);
+ genType const G(glm::vec<2, T>(1));
+ Error += glm::all(glm::equal(B, G, glm::epsilon<T>())) ? 0 : 1;
+
+ genType const H(glm::vec<3, T>(1));
+ Error += glm::all(glm::equal(B, H, glm::epsilon<T>())) ? 0 : 1;
+
+ genType const I(glm::vec<4, T>(1));
+ Error += glm::all(glm::equal(B, I, glm::epsilon<T>())) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_size()
+{
+ typedef typename genType::value_type T;
+
+ int Error = 0;
+
+ Error += sizeof(glm::vec<1, T>) == sizeof(genType) ? 0 : 1;
+ Error += genType().length() == 1 ? 0 : 1;
+ Error += genType::length() == 1 ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_relational()
+{
+ typedef typename genType::value_type valType;
+
+ int Error = 0;
+
+ genType const A(1);
+ genType const B(1);
+ genType const C(0);
+
+ Error += all(equal(A, B, glm::epsilon<valType>())) ? 0 : 1;
+ Error += any(notEqual(A, C, glm::epsilon<valType>())) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_constexpr()
+{
+# if GLM_CONFIG_CONSTEXP == GLM_ENABLE
+ static_assert(genType::length() == 1, "GLM: Failed constexpr");
+# endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_operators<glm::dvec1>();
+ Error += test_operators<glm::lowp_dvec1>();
+ Error += test_operators<glm::mediump_dvec1>();
+ Error += test_operators<glm::highp_dvec1>();
+
+ Error += test_ctor<glm::dvec1>();
+ Error += test_ctor<glm::lowp_dvec1>();
+ Error += test_ctor<glm::mediump_dvec1>();
+ Error += test_ctor<glm::highp_dvec1>();
+
+ Error += test_size<glm::dvec1>();
+ Error += test_size<glm::lowp_dvec1>();
+ Error += test_size<glm::mediump_dvec1>();
+ Error += test_size<glm::highp_dvec1>();
+
+ Error += test_relational<glm::dvec1>();
+ Error += test_relational<glm::lowp_dvec1>();
+ Error += test_relational<glm::mediump_dvec1>();
+ Error += test_relational<glm::highp_dvec1>();
+
+ Error += test_constexpr<glm::dvec1>();
+ Error += test_constexpr<glm::lowp_dvec1>();
+ Error += test_constexpr<glm::mediump_dvec1>();
+ Error += test_constexpr<glm::highp_dvec1>();
+
+ Error += test_operators<glm::vec1>();
+ Error += test_operators<glm::lowp_vec1>();
+ Error += test_operators<glm::mediump_vec1>();
+ Error += test_operators<glm::highp_vec1>();
+
+ Error += test_ctor<glm::vec1>();
+ Error += test_ctor<glm::lowp_vec1>();
+ Error += test_ctor<glm::mediump_vec1>();
+ Error += test_ctor<glm::highp_vec1>();
+
+ Error += test_size<glm::vec1>();
+ Error += test_size<glm::lowp_vec1>();
+ Error += test_size<glm::mediump_vec1>();
+ Error += test_size<glm::highp_vec1>();
+
+ Error += test_relational<glm::vec1>();
+ Error += test_relational<glm::lowp_vec1>();
+ Error += test_relational<glm::mediump_vec1>();
+ Error += test_relational<glm::highp_vec1>();
+
+ Error += test_constexpr<glm::vec1>();
+ Error += test_constexpr<glm::lowp_vec1>();
+ Error += test_constexpr<glm::mediump_vec1>();
+ Error += test_constexpr<glm::highp_vec1>();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_int1_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_int1_sized.cpp
new file mode 100644
index 0000000..c262f49
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_int1_sized.cpp
@@ -0,0 +1,41 @@
+#include <glm/ext/vector_int1_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+ static_assert(sizeof(glm::i8vec1) == 1, "int8 size isn't 1 byte on this platform");
+ static_assert(sizeof(glm::i16vec1) == 2, "int16 size isn't 2 bytes on this platform");
+ static_assert(sizeof(glm::i32vec1) == 4, "int32 size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::i64vec1) == 8, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec1) == 1 ? 0 : 1;
+ Error += sizeof(glm::i16vec1) == 2 ? 0 : 1;
+ Error += sizeof(glm::i32vec1) == 4 ? 0 : 1;
+ Error += sizeof(glm::i64vec1) == 8 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec1) < sizeof(glm::i16vec1) ? 0 : 1;
+ Error += sizeof(glm::i16vec1) < sizeof(glm::i32vec1) ? 0 : 1;
+ Error += sizeof(glm::i32vec1) < sizeof(glm::i64vec1) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_int2_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_int2_sized.cpp
new file mode 100644
index 0000000..f4ad8b6
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_int2_sized.cpp
@@ -0,0 +1,41 @@
+#include <glm/ext/vector_int2_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+ static_assert(sizeof(glm::i8vec2) == 2, "int8 size isn't 1 byte on this platform");
+ static_assert(sizeof(glm::i16vec2) == 4, "int16 size isn't 2 bytes on this platform");
+ static_assert(sizeof(glm::i32vec2) == 8, "int32 size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::i64vec2) == 16, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec2) == 2 ? 0 : 1;
+ Error += sizeof(glm::i16vec2) == 4 ? 0 : 1;
+ Error += sizeof(glm::i32vec2) == 8 ? 0 : 1;
+ Error += sizeof(glm::i64vec2) == 16 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec2) < sizeof(glm::i16vec2) ? 0 : 1;
+ Error += sizeof(glm::i16vec2) < sizeof(glm::i32vec2) ? 0 : 1;
+ Error += sizeof(glm::i32vec2) < sizeof(glm::i64vec2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_int3_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_int3_sized.cpp
new file mode 100644
index 0000000..c51bfe7
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_int3_sized.cpp
@@ -0,0 +1,41 @@
+#include <glm/ext/vector_int3_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+ static_assert(sizeof(glm::i8vec3) == 3, "int8 size isn't 1 byte on this platform");
+ static_assert(sizeof(glm::i16vec3) == 6, "int16 size isn't 2 bytes on this platform");
+ static_assert(sizeof(glm::i32vec3) == 12, "int32 size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::i64vec3) == 24, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec3) == 3 ? 0 : 1;
+ Error += sizeof(glm::i16vec3) == 6 ? 0 : 1;
+ Error += sizeof(glm::i32vec3) == 12 ? 0 : 1;
+ Error += sizeof(glm::i64vec3) == 24 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec3) < sizeof(glm::i16vec3) ? 0 : 1;
+ Error += sizeof(glm::i16vec3) < sizeof(glm::i32vec3) ? 0 : 1;
+ Error += sizeof(glm::i32vec3) < sizeof(glm::i64vec3) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_int4_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_int4_sized.cpp
new file mode 100644
index 0000000..93fd9ed
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_int4_sized.cpp
@@ -0,0 +1,41 @@
+#include <glm/ext/vector_int4_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::i8vec4) == 4, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::i16vec4) == 8, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::i32vec4) == 16, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::i64vec4) == 32, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec4) == 4 ? 0 : 1;
+ Error += sizeof(glm::i16vec4) == 8 ? 0 : 1;
+ Error += sizeof(glm::i32vec4) == 16 ? 0 : 1;
+ Error += sizeof(glm::i64vec4) == 32 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec4) < sizeof(glm::i16vec4) ? 0 : 1;
+ Error += sizeof(glm::i16vec4) < sizeof(glm::i32vec4) ? 0 : 1;
+ Error += sizeof(glm::i32vec4) < sizeof(glm::i64vec4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_integer.cpp b/3rdparty/glm/source/test/ext/ext_vector_integer.cpp
new file mode 100644
index 0000000..d7278d3
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_integer.cpp
@@ -0,0 +1,547 @@
+#include <glm/ext/vector_integer.hpp>
+#include <glm/ext/scalar_int_sized.hpp>
+#include <glm/ext/scalar_uint_sized.hpp>
+#include <vector>
+#include <ctime>
+#include <cstdio>
+
+namespace isPowerOfTwo
+{
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ bool Return;
+ };
+
+ template <glm::length_t L>
+ int test_int16()
+ {
+ type<glm::int16> const Data[] =
+ {
+ { 0x0001, true },
+ { 0x0002, true },
+ { 0x0004, true },
+ { 0x0080, true },
+ { 0x0000, true },
+ { 0x0003, false }
+ };
+
+ int Error = 0;
+
+ for (std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::int16>); i < n; ++i)
+ {
+ glm::vec<L, bool> const Result = glm::isPowerOfTwo(glm::vec<L, glm::int16>(Data[i].Value));
+ Error += glm::vec<L, bool>(Data[i].Return) == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ template <glm::length_t L>
+ int test_uint16()
+ {
+ type<glm::uint16> const Data[] =
+ {
+ { 0x0001, true },
+ { 0x0002, true },
+ { 0x0004, true },
+ { 0x0000, true },
+ { 0x0000, true },
+ { 0x0003, false }
+ };
+
+ int Error = 0;
+
+ for (std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::uint16>); i < n; ++i)
+ {
+ glm::vec<L, bool> const Result = glm::isPowerOfTwo(glm::vec<L, glm::uint16>(Data[i].Value));
+ Error += glm::vec<L, bool>(Data[i].Return) == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ template <glm::length_t L>
+ int test_int32()
+ {
+ type<int> const Data[] =
+ {
+ { 0x00000001, true },
+ { 0x00000002, true },
+ { 0x00000004, true },
+ { 0x0000000f, false },
+ { 0x00000000, true },
+ { 0x00000003, false }
+ };
+
+ int Error = 0;
+
+ for (std::size_t i = 0, n = sizeof(Data) / sizeof(type<int>); i < n; ++i)
+ {
+ glm::vec<L, bool> const Result = glm::isPowerOfTwo(glm::vec<L, glm::int32>(Data[i].Value));
+ Error += glm::vec<L, bool>(Data[i].Return) == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ template <glm::length_t L>
+ int test_uint32()
+ {
+ type<glm::uint> const Data[] =
+ {
+ { 0x00000001, true },
+ { 0x00000002, true },
+ { 0x00000004, true },
+ { 0x80000000, true },
+ { 0x00000000, true },
+ { 0x00000003, false }
+ };
+
+ int Error = 0;
+
+ for (std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::uint>); i < n; ++i)
+ {
+ glm::vec<L, bool> const Result = glm::isPowerOfTwo(glm::vec<L, glm::uint32>(Data[i].Value));
+ Error += glm::vec<L, bool>(Data[i].Return) == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += test_int16<1>();
+ Error += test_int16<2>();
+ Error += test_int16<3>();
+ Error += test_int16<4>();
+
+ Error += test_uint16<1>();
+ Error += test_uint16<2>();
+ Error += test_uint16<3>();
+ Error += test_uint16<4>();
+
+ Error += test_int32<1>();
+ Error += test_int32<2>();
+ Error += test_int32<3>();
+ Error += test_int32<4>();
+
+ Error += test_uint32<1>();
+ Error += test_uint32<2>();
+ Error += test_uint32<3>();
+ Error += test_uint32<4>();
+
+ return Error;
+ }
+}//isPowerOfTwo
+
+namespace prevPowerOfTwo
+{
+ template <glm::length_t L, typename T>
+ int run()
+ {
+ int Error = 0;
+
+ glm::vec<L, T> const A = glm::prevPowerOfTwo(glm::vec<L, T>(7));
+ Error += A == glm::vec<L, T>(4) ? 0 : 1;
+
+ glm::vec<L, T> const B = glm::prevPowerOfTwo(glm::vec<L, T>(15));
+ Error += B == glm::vec<L, T>(8) ? 0 : 1;
+
+ glm::vec<L, T> const C = glm::prevPowerOfTwo(glm::vec<L, T>(31));
+ Error += C == glm::vec<L, T>(16) ? 0 : 1;
+
+ glm::vec<L, T> const D = glm::prevPowerOfTwo(glm::vec<L, T>(32));
+ Error += D == glm::vec<L, T>(32) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<1, glm::int8>();
+ Error += run<2, glm::int8>();
+ Error += run<3, glm::int8>();
+ Error += run<4, glm::int8>();
+
+ Error += run<1, glm::int16>();
+ Error += run<2, glm::int16>();
+ Error += run<3, glm::int16>();
+ Error += run<4, glm::int16>();
+
+ Error += run<1, glm::int32>();
+ Error += run<2, glm::int32>();
+ Error += run<3, glm::int32>();
+ Error += run<4, glm::int32>();
+
+ Error += run<1, glm::int64>();
+ Error += run<2, glm::int64>();
+ Error += run<3, glm::int64>();
+ Error += run<4, glm::int64>();
+
+ Error += run<1, glm::uint8>();
+ Error += run<2, glm::uint8>();
+ Error += run<3, glm::uint8>();
+ Error += run<4, glm::uint8>();
+
+ Error += run<1, glm::uint16>();
+ Error += run<2, glm::uint16>();
+ Error += run<3, glm::uint16>();
+ Error += run<4, glm::uint16>();
+
+ Error += run<1, glm::uint32>();
+ Error += run<2, glm::uint32>();
+ Error += run<3, glm::uint32>();
+ Error += run<4, glm::uint32>();
+
+ Error += run<1, glm::uint64>();
+ Error += run<2, glm::uint64>();
+ Error += run<3, glm::uint64>();
+ Error += run<4, glm::uint64>();
+
+ return Error;
+ }
+}//namespace prevPowerOfTwo
+
+namespace nextPowerOfTwo
+{
+ template <glm::length_t L, typename T>
+ int run()
+ {
+ int Error = 0;
+
+ glm::vec<L, T> const A = glm::nextPowerOfTwo(glm::vec<L, T>(7));
+ Error += A == glm::vec<L, T>(8) ? 0 : 1;
+
+ glm::vec<L, T> const B = glm::nextPowerOfTwo(glm::vec<L, T>(15));
+ Error += B == glm::vec<L, T>(16) ? 0 : 1;
+
+ glm::vec<L, T> const C = glm::nextPowerOfTwo(glm::vec<L, T>(31));
+ Error += C == glm::vec<L, T>(32) ? 0 : 1;
+
+ glm::vec<L, T> const D = glm::nextPowerOfTwo(glm::vec<L, T>(32));
+ Error += D == glm::vec<L, T>(32) ? 0 : 1;
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<1, glm::int8>();
+ Error += run<2, glm::int8>();
+ Error += run<3, glm::int8>();
+ Error += run<4, glm::int8>();
+
+ Error += run<1, glm::int16>();
+ Error += run<2, glm::int16>();
+ Error += run<3, glm::int16>();
+ Error += run<4, glm::int16>();
+
+ Error += run<1, glm::int32>();
+ Error += run<2, glm::int32>();
+ Error += run<3, glm::int32>();
+ Error += run<4, glm::int32>();
+
+ Error += run<1, glm::int64>();
+ Error += run<2, glm::int64>();
+ Error += run<3, glm::int64>();
+ Error += run<4, glm::int64>();
+
+ Error += run<1, glm::uint8>();
+ Error += run<2, glm::uint8>();
+ Error += run<3, glm::uint8>();
+ Error += run<4, glm::uint8>();
+
+ Error += run<1, glm::uint16>();
+ Error += run<2, glm::uint16>();
+ Error += run<3, glm::uint16>();
+ Error += run<4, glm::uint16>();
+
+ Error += run<1, glm::uint32>();
+ Error += run<2, glm::uint32>();
+ Error += run<3, glm::uint32>();
+ Error += run<4, glm::uint32>();
+
+ Error += run<1, glm::uint64>();
+ Error += run<2, glm::uint64>();
+ Error += run<3, glm::uint64>();
+ Error += run<4, glm::uint64>();
+
+ return Error;
+ }
+}//namespace nextPowerOfTwo
+
+namespace prevMultiple
+{
+ template<typename genIUType>
+ struct type
+ {
+ genIUType Source;
+ genIUType Multiple;
+ genIUType Return;
+ };
+
+ template <glm::length_t L, typename T>
+ int run()
+ {
+ type<T> const Data[] =
+ {
+ { 8, 3, 6 },
+ { 7, 7, 7 }
+ };
+
+ int Error = 0;
+
+ for (std::size_t i = 0, n = sizeof(Data) / sizeof(type<T>); i < n; ++i)
+ {
+ glm::vec<L, T> const Result0 = glm::prevMultiple(glm::vec<L, T>(Data[i].Source), Data[i].Multiple);
+ Error += glm::vec<L, T>(Data[i].Return) == Result0 ? 0 : 1;
+
+ glm::vec<L, T> const Result1 = glm::prevMultiple(glm::vec<L, T>(Data[i].Source), glm::vec<L, T>(Data[i].Multiple));
+ Error += glm::vec<L, T>(Data[i].Return) == Result1 ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<1, glm::int8>();
+ Error += run<2, glm::int8>();
+ Error += run<3, glm::int8>();
+ Error += run<4, glm::int8>();
+
+ Error += run<1, glm::int16>();
+ Error += run<2, glm::int16>();
+ Error += run<3, glm::int16>();
+ Error += run<4, glm::int16>();
+
+ Error += run<1, glm::int32>();
+ Error += run<2, glm::int32>();
+ Error += run<3, glm::int32>();
+ Error += run<4, glm::int32>();
+
+ Error += run<1, glm::int64>();
+ Error += run<2, glm::int64>();
+ Error += run<3, glm::int64>();
+ Error += run<4, glm::int64>();
+
+ Error += run<1, glm::uint8>();
+ Error += run<2, glm::uint8>();
+ Error += run<3, glm::uint8>();
+ Error += run<4, glm::uint8>();
+
+ Error += run<1, glm::uint16>();
+ Error += run<2, glm::uint16>();
+ Error += run<3, glm::uint16>();
+ Error += run<4, glm::uint16>();
+
+ Error += run<1, glm::uint32>();
+ Error += run<2, glm::uint32>();
+ Error += run<3, glm::uint32>();
+ Error += run<4, glm::uint32>();
+
+ Error += run<1, glm::uint64>();
+ Error += run<2, glm::uint64>();
+ Error += run<3, glm::uint64>();
+ Error += run<4, glm::uint64>();
+
+ return Error;
+ }
+}//namespace prevMultiple
+
+namespace nextMultiple
+{
+ template<typename genIUType>
+ struct type
+ {
+ genIUType Source;
+ genIUType Multiple;
+ genIUType Return;
+ };
+
+ template <glm::length_t L, typename T>
+ int run()
+ {
+ type<T> const Data[] =
+ {
+ { 3, 4, 4 },
+ { 6, 3, 6 },
+ { 5, 3, 6 },
+ { 7, 7, 7 },
+ { 0, 1, 0 },
+ { 8, 3, 9 }
+ };
+
+ int Error = 0;
+
+ for (std::size_t i = 0, n = sizeof(Data) / sizeof(type<T>); i < n; ++i)
+ {
+ glm::vec<L, T> const Result0 = glm::nextMultiple(glm::vec<L, T>(Data[i].Source), glm::vec<L, T>(Data[i].Multiple));
+ Error += glm::vec<L, T>(Data[i].Return) == Result0 ? 0 : 1;
+
+ glm::vec<L, T> const Result1 = glm::nextMultiple(glm::vec<L, T>(Data[i].Source), Data[i].Multiple);
+ Error += glm::vec<L, T>(Data[i].Return) == Result1 ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<1, glm::int8>();
+ Error += run<2, glm::int8>();
+ Error += run<3, glm::int8>();
+ Error += run<4, glm::int8>();
+
+ Error += run<1, glm::int16>();
+ Error += run<2, glm::int16>();
+ Error += run<3, glm::int16>();
+ Error += run<4, glm::int16>();
+
+ Error += run<1, glm::int32>();
+ Error += run<2, glm::int32>();
+ Error += run<3, glm::int32>();
+ Error += run<4, glm::int32>();
+
+ Error += run<1, glm::int64>();
+ Error += run<2, glm::int64>();
+ Error += run<3, glm::int64>();
+ Error += run<4, glm::int64>();
+
+ Error += run<1, glm::uint8>();
+ Error += run<2, glm::uint8>();
+ Error += run<3, glm::uint8>();
+ Error += run<4, glm::uint8>();
+
+ Error += run<1, glm::uint16>();
+ Error += run<2, glm::uint16>();
+ Error += run<3, glm::uint16>();
+ Error += run<4, glm::uint16>();
+
+ Error += run<1, glm::uint32>();
+ Error += run<2, glm::uint32>();
+ Error += run<3, glm::uint32>();
+ Error += run<4, glm::uint32>();
+
+ Error += run<1, glm::uint64>();
+ Error += run<2, glm::uint64>();
+ Error += run<3, glm::uint64>();
+ Error += run<4, glm::uint64>();
+
+ return Error;
+ }
+}//namespace nextMultiple
+
+namespace findNSB
+{
+ template<typename T>
+ struct type
+ {
+ T Source;
+ int SignificantBitCount;
+ int Return;
+ };
+
+ template <glm::length_t L, typename T>
+ int run()
+ {
+ type<T> const Data[] =
+ {
+ { 0x00, 1,-1 },
+ { 0x01, 2,-1 },
+ { 0x02, 2,-1 },
+ { 0x06, 3,-1 },
+ { 0x01, 1, 0 },
+ { 0x03, 1, 0 },
+ { 0x03, 2, 1 },
+ { 0x07, 2, 1 },
+ { 0x05, 2, 2 },
+ { 0x0D, 2, 2 }
+ };
+
+ int Error = 0;
+
+ for (std::size_t i = 0, n = sizeof(Data) / sizeof(type<T>); i < n; ++i)
+ {
+ glm::vec<L, int> const Result0 = glm::findNSB<L, T, glm::defaultp>(glm::vec<L, T>(Data[i].Source), glm::vec<L, int>(Data[i].SignificantBitCount));
+ Error += glm::vec<L, int>(Data[i].Return) == Result0 ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ Error += run<1, glm::uint8>();
+ Error += run<2, glm::uint8>();
+ Error += run<3, glm::uint8>();
+ Error += run<4, glm::uint8>();
+
+ Error += run<1, glm::uint16>();
+ Error += run<2, glm::uint16>();
+ Error += run<3, glm::uint16>();
+ Error += run<4, glm::uint16>();
+
+ Error += run<1, glm::uint32>();
+ Error += run<2, glm::uint32>();
+ Error += run<3, glm::uint32>();
+ Error += run<4, glm::uint32>();
+
+ Error += run<1, glm::uint64>();
+ Error += run<2, glm::uint64>();
+ Error += run<3, glm::uint64>();
+ Error += run<4, glm::uint64>();
+
+ Error += run<1, glm::int8>();
+ Error += run<2, glm::int8>();
+ Error += run<3, glm::int8>();
+ Error += run<4, glm::int8>();
+
+ Error += run<1, glm::int16>();
+ Error += run<2, glm::int16>();
+ Error += run<3, glm::int16>();
+ Error += run<4, glm::int16>();
+
+ Error += run<1, glm::int32>();
+ Error += run<2, glm::int32>();
+ Error += run<3, glm::int32>();
+ Error += run<4, glm::int32>();
+
+ Error += run<1, glm::int64>();
+ Error += run<2, glm::int64>();
+ Error += run<3, glm::int64>();
+ Error += run<4, glm::int64>();
+
+
+ return Error;
+ }
+}//namespace findNSB
+
+int main()
+{
+ int Error = 0;
+
+ Error += isPowerOfTwo::test();
+ Error += prevPowerOfTwo::test();
+ Error += nextPowerOfTwo::test();
+ Error += prevMultiple::test();
+ Error += nextMultiple::test();
+ Error += findNSB::test();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_integer_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_integer_sized.cpp
new file mode 100644
index 0000000..52b3f4e
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_integer_sized.cpp
@@ -0,0 +1,216 @@
+#include <glm/ext/vector_integer.hpp>
+#include <glm/ext/vector_int1.hpp>
+#include <glm/ext/vector_int1_sized.hpp>
+#include <glm/ext/vector_uint1.hpp>
+#include <glm/ext/vector_uint1_sized.hpp>
+
+template <typename genType>
+static int test_operators()
+{
+ int Error = 0;
+
+ {
+ genType const A(1);
+ genType const B(1);
+
+ bool const R = A != B;
+ bool const S = A == B;
+ Error += (S && !R) ? 0 : 1;
+ }
+
+ {
+ genType const A(1);
+ genType const B(1);
+
+ genType const C = A + B;
+ Error += C == genType(2) ? 0 : 1;
+
+ genType const D = A - B;
+ Error += D == genType(0) ? 0 : 1;
+
+ genType const E = A * B;
+ Error += E == genType(1) ? 0 : 1;
+
+ genType const F = A / B;
+ Error += F == genType(1) ? 0 : 1;
+ }
+
+ {
+ genType const A(3);
+ genType const B(2);
+
+ genType const C = A % B;
+ Error += C == genType(1) ? 0 : 1;
+ }
+
+ {
+ genType const A(1);
+ genType const B(1);
+ genType const C(0);
+
+ genType const I = A & B;
+ Error += I == genType(1) ? 0 : 1;
+ genType const D = A & C;
+ Error += D == genType(0) ? 0 : 1;
+
+ genType const E = A | B;
+ Error += E == genType(1) ? 0 : 1;
+ genType const F = A | C;
+ Error += F == genType(1) ? 0 : 1;
+
+ genType const G = A ^ B;
+ Error += G == genType(0) ? 0 : 1;
+ genType const H = A ^ C;
+ Error += H == genType(1) ? 0 : 1;
+ }
+
+ {
+ genType const A(0);
+ genType const B(1);
+ genType const C(2);
+
+ genType const D = B << B;
+ Error += D == genType(2) ? 0 : 1;
+ genType const E = C >> B;
+ Error += E == genType(1) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename genType>
+static int test_ctor()
+{
+ typedef typename genType::value_type T;
+
+ int Error = 0;
+
+ genType const A = genType(1);
+
+ genType const E(genType(1));
+ Error += A == E ? 0 : 1;
+
+ genType const F(E);
+ Error += A == F ? 0 : 1;
+
+ genType const B = genType(1);
+ genType const G(glm::vec<2, T>(1));
+ Error += B == G ? 0 : 1;
+
+ genType const H(glm::vec<3, T>(1));
+ Error += B == H ? 0 : 1;
+
+ genType const I(glm::vec<4, T>(1));
+ Error += B == I ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(typename genType::value_type) == sizeof(genType) ? 0 : 1;
+ Error += genType().length() == 1 ? 0 : 1;
+ Error += genType::length() == 1 ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_relational()
+{
+ int Error = 0;
+
+ genType const A(1);
+ genType const B(1);
+ genType const C(0);
+
+ Error += A == B ? 0 : 1;
+ Error += A != C ? 0 : 1;
+ Error += all(equal(A, B)) ? 0 : 1;
+ Error += any(notEqual(A, C)) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType>
+static int test_constexpr()
+{
+# if GLM_CONFIG_CONSTEXP == GLM_ENABLE
+ static_assert(genType::length() == 1, "GLM: Failed constexpr");
+ static_assert(genType(1)[0] == 1, "GLM: Failed constexpr");
+ static_assert(genType(1) == genType(1), "GLM: Failed constexpr");
+ static_assert(genType(1) != genType(0), "GLM: Failed constexpr");
+# endif
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_operators<glm::ivec1>();
+ Error += test_operators<glm::i8vec1>();
+ Error += test_operators<glm::i16vec1>();
+ Error += test_operators<glm::i32vec1>();
+ Error += test_operators<glm::i64vec1>();
+
+ Error += test_ctor<glm::ivec1>();
+ Error += test_ctor<glm::i8vec1>();
+ Error += test_ctor<glm::i16vec1>();
+ Error += test_ctor<glm::i32vec1>();
+ Error += test_ctor<glm::i64vec1>();
+
+ Error += test_size<glm::ivec1>();
+ Error += test_size<glm::i8vec1>();
+ Error += test_size<glm::i16vec1>();
+ Error += test_size<glm::i32vec1>();
+ Error += test_size<glm::i64vec1>();
+
+ Error += test_relational<glm::ivec1>();
+ Error += test_relational<glm::i8vec1>();
+ Error += test_relational<glm::i16vec1>();
+ Error += test_relational<glm::i32vec1>();
+ Error += test_relational<glm::i64vec1>();
+
+ Error += test_constexpr<glm::ivec1>();
+ Error += test_constexpr<glm::i8vec1>();
+ Error += test_constexpr<glm::i16vec1>();
+ Error += test_constexpr<glm::i32vec1>();
+ Error += test_constexpr<glm::i64vec1>();
+
+ Error += test_operators<glm::uvec1>();
+ Error += test_operators<glm::u8vec1>();
+ Error += test_operators<glm::u16vec1>();
+ Error += test_operators<glm::u32vec1>();
+ Error += test_operators<glm::u64vec1>();
+
+ Error += test_ctor<glm::uvec1>();
+ Error += test_ctor<glm::u8vec1>();
+ Error += test_ctor<glm::u16vec1>();
+ Error += test_ctor<glm::u32vec1>();
+ Error += test_ctor<glm::u64vec1>();
+
+ Error += test_size<glm::uvec1>();
+ Error += test_size<glm::u8vec1>();
+ Error += test_size<glm::u16vec1>();
+ Error += test_size<glm::u32vec1>();
+ Error += test_size<glm::u64vec1>();
+
+ Error += test_relational<glm::uvec1>();
+ Error += test_relational<glm::u8vec1>();
+ Error += test_relational<glm::u16vec1>();
+ Error += test_relational<glm::u32vec1>();
+ Error += test_relational<glm::u64vec1>();
+
+ Error += test_constexpr<glm::uvec1>();
+ Error += test_constexpr<glm::u8vec1>();
+ Error += test_constexpr<glm::u16vec1>();
+ Error += test_constexpr<glm::u32vec1>();
+ Error += test_constexpr<glm::u64vec1>();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_packing.cpp b/3rdparty/glm/source/test/ext/ext_vector_packing.cpp
new file mode 100644
index 0000000..d7cbce2
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_packing.cpp
@@ -0,0 +1,58 @@
+#include <glm/ext/vector_packing.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_uint2_sized.hpp>
+#include <glm/ext/vector_int2_sized.hpp>
+#include <glm/gtc/packing.hpp>
+#include <glm/vec2.hpp>
+#include <vector>
+
+int test_packUnorm()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2(1.0f, 0.7f));
+ A.push_back(glm::vec2(0.5f, 0.1f));
+
+ for (std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::u16vec2 C = glm::packUnorm<glm::uint16>(B);
+ glm::vec2 D = glm::unpackUnorm<float>(C);
+ Error += glm::all(glm::equal(B, D, 1.0f / 255.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2(1.0f, 0.0f));
+ A.push_back(glm::vec2(-0.5f, -0.7f));
+ A.push_back(glm::vec2(-0.1f, 0.1f));
+
+ for (std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::i16vec2 C = glm::packSnorm<glm::int16>(B);
+ glm::vec2 D = glm::unpackSnorm<float>(C);
+ Error += glm::all(glm::equal(B, D, 1.0f / 32767.0f * 2.0f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_packUnorm();
+ Error += test_packSnorm();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_reciprocal.cpp b/3rdparty/glm/source/test/ext/ext_vector_reciprocal.cpp
new file mode 100644
index 0000000..f4b9f18
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_reciprocal.cpp
@@ -0,0 +1,186 @@
+#include <glm/ext/vector_reciprocal.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/scalar_constants.hpp>
+#include <glm/ext/vector_double1.hpp>
+
+static int test_sec()
+{
+ int Error = 0;
+
+ glm::dvec1 const a = glm::sec(glm::dvec1(0.0));
+ Error += glm::all(glm::equal(a, glm::dvec1(1.0), 0.01)) ? 0 : 1;
+
+ glm::dvec1 const b = glm::sec(glm::dvec1(glm::pi<double>() * 2.0));
+ Error += glm::all(glm::equal(b, glm::dvec1(1.0), 0.01)) ? 0 : 1;
+
+ glm::dvec1 const c = glm::sec(glm::dvec1(glm::pi<double>() * -2.0));
+ Error += glm::all(glm::equal(c, glm::dvec1(1.0), 0.01)) ? 0 : 1;
+
+ glm::dvec1 const d = glm::sec(glm::dvec1(glm::pi<double>() * 1.0));
+ Error += glm::all(glm::equal(d, -glm::dvec1(1.0), 0.01)) ? 0 : 1;
+
+ glm::dvec1 const e = glm::sec(glm::dvec1(glm::pi<double>() * -1.0));
+ Error += glm::all(glm::equal(e, -glm::dvec1(1.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_csc()
+{
+ int Error = 0;
+
+ glm::dvec1 const a = glm::csc(glm::dvec1(glm::pi<double>() * 0.5));
+ Error += glm::all(glm::equal(a, glm::dvec1(1.0), 0.01)) ? 0 : 1;
+
+ glm::dvec1 const b = glm::csc(glm::dvec1(glm::pi<double>() * -0.5));
+ Error += glm::all(glm::equal(b, glm::dvec1(-1.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_cot()
+{
+ int Error = 0;
+
+ glm::dvec1 const a = glm::cot(glm::dvec1(glm::pi<double>() * 0.5));
+ Error += glm::all(glm::equal(a, glm::dvec1(0.0), 0.01)) ? 0 : 1;
+
+ glm::dvec1 const b = glm::cot(glm::dvec1(glm::pi<double>() * -0.5));
+ Error += glm::all(glm::equal(b, glm::dvec1(0.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_asec()
+{
+ int Error = 0;
+
+ Error += glm::all(glm::equal(glm::asec(glm::dvec1(100000.0)), glm::dvec1(glm::pi<double>() * 0.5), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::asec(glm::dvec1(-100000.0)), glm::dvec1(glm::pi<double>() * 0.5), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_acsc()
+{
+ int Error = 0;
+
+ Error += glm::all(glm::equal(glm::acsc(glm::dvec1(100000.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::acsc(glm::dvec1(-100000.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_acot()
+{
+ int Error = 0;
+
+ Error += glm::all(glm::equal(glm::acot(glm::dvec1(100000.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::acot(glm::dvec1(-100000.0)), glm::dvec1(glm::pi<double>()), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::acot(glm::dvec1(0.0)), glm::dvec1(glm::pi<double>() * 0.5), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_sech()
+{
+ int Error = 0;
+
+ Error += glm::all(glm::equal(glm::sech(glm::dvec1(100000.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::sech(glm::dvec1(-100000.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::sech(glm::dvec1(0.0)), glm::dvec1(1.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_csch()
+{
+ int Error = 0;
+
+ Error += glm::all(glm::equal(glm::csch(glm::dvec1(100000.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::csch(glm::dvec1(-100000.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_coth()
+{
+ int Error = 0;
+
+ glm::dvec1 const a = glm::coth(glm::dvec1(100.0));
+ Error += glm::all(glm::equal(a, glm::dvec1(1.0), 0.01)) ? 0 : 1;
+
+ glm::dvec1 const b = glm::coth(glm::dvec1(-100.0));
+ Error += glm::all(glm::equal(b, glm::dvec1(-1.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_asech()
+{
+ int Error = 0;
+
+ glm::dvec1 const a = glm::asech(glm::dvec1(1.0));
+ Error += glm::all(glm::equal(a, glm::dvec1(0.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_acsch()
+{
+ int Error = 0;
+
+ glm::dvec1 const a(glm::acsch(glm::dvec1(0.01)));
+ Error += a.x > 1.0 ? 0 : 1;
+
+ glm::dvec1 const b(glm::acsch(glm::dvec1(-0.01)));
+ Error += b.x < -1.0 ? 0 : 1;
+
+ Error += glm::all(glm::equal(glm::acsch(glm::dvec1(100.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::acsch(glm::dvec1(-100.0)), glm::dvec1(0.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_acoth()
+{
+ int Error = 0;
+
+ glm::dvec1 const a = glm::acoth(glm::dvec1(1.00001));
+ Error += a.x > 6.0 ? 0 : 1;
+
+ glm::dvec1 const b = glm::acoth(glm::dvec1(-1.00001));
+ Error += b.x < -6.0 ? 0 : 1;
+
+ glm::dvec1 const c = glm::acoth(glm::dvec1(10000.0));
+ Error += glm::all(glm::equal(c, glm::dvec1(0.0), 0.01)) ? 0 : 1;
+
+ glm::dvec1 const d = glm::acoth(glm::dvec1(-10000.0));
+ Error += glm::all(glm::equal(d, glm::dvec1(0.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_sec();
+ Error += test_csc();
+ Error += test_cot();
+
+ Error += test_asec();
+ Error += test_acsc();
+ Error += test_acot();
+
+ Error += test_sech();
+ Error += test_csch();
+ Error += test_coth();
+
+ Error += test_asech();
+ Error += test_acsch();
+ Error += test_acoth();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_relational.cpp b/3rdparty/glm/source/test/ext/ext_vector_relational.cpp
new file mode 100644
index 0000000..f6cd307
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_relational.cpp
@@ -0,0 +1,205 @@
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_float1.hpp>
+#include <glm/ext/vector_float1_precision.hpp>
+#include <glm/ext/vector_float2.hpp>
+#include <glm/ext/vector_float2_precision.hpp>
+#include <glm/ext/vector_float3.hpp>
+#include <glm/ext/vector_float3_precision.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/vector_float4_precision.hpp>
+#include <glm/ext/vector_double1.hpp>
+#include <glm/ext/vector_double1_precision.hpp>
+#include <glm/ext/vector_double2.hpp>
+#include <glm/ext/vector_double2_precision.hpp>
+#include <glm/ext/vector_double3.hpp>
+#include <glm/ext/vector_double3_precision.hpp>
+#include <glm/ext/vector_double4.hpp>
+#include <glm/ext/vector_double4_precision.hpp>
+#include <glm/ext/vector_ulp.hpp>
+
+template <typename vecType>
+static int test_equal()
+{
+ typedef typename vecType::value_type valType;
+
+ valType const A = static_cast<valType>(1.01f);
+ valType const B = static_cast<valType>(1.02f);
+ valType const Epsilon1 = static_cast<valType>(0.1f);
+ valType const Epsilon2 = static_cast<valType>(0.001f);
+
+ int Error = 0;
+
+ Error += glm::all(glm::equal(vecType(A), vecType(B), Epsilon1)) ? 0 : 1;
+ Error += glm::all(glm::equal(vecType(A), vecType(B), vecType(Epsilon1))) ? 0 : 1;
+
+ Error += !glm::any(glm::equal(vecType(A), vecType(B), Epsilon2)) ? 0 : 1;
+ Error += !glm::any(glm::equal(vecType(A), vecType(B), vecType(Epsilon2))) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename vecType>
+static int test_notEqual()
+{
+ typedef typename vecType::value_type valType;
+
+ valType const A = static_cast<valType>(1.01f);
+ valType const B = static_cast<valType>(1.02f);
+ valType const Epsilon1 = static_cast<valType>(0.1f);
+ valType const Epsilon2 = static_cast<valType>(0.001f);
+
+ int Error = 0;
+
+ Error += glm::all(glm::notEqual(vecType(A), vecType(B), Epsilon2)) ? 0 : 1;
+ Error += glm::all(glm::notEqual(vecType(A), vecType(B), vecType(Epsilon2))) ? 0 : 1;
+
+ Error += !glm::any(glm::notEqual(vecType(A), vecType(B), Epsilon1)) ? 0 : 1;
+ Error += !glm::any(glm::notEqual(vecType(A), vecType(B), vecType(Epsilon1))) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename genType, typename valType>
+static int test_constexpr()
+{
+# if GLM_CONFIG_CONSTEXP == GLM_ENABLE
+ static_assert(glm::all(glm::equal(genType(static_cast<valType>(1.01f)), genType(static_cast<valType>(1.02f)), static_cast<valType>(0.1f))), "GLM: Failed constexpr");
+# endif
+
+ return 0;
+}
+
+template <typename T>
+static int test_equal_ulps()
+{
+ typedef glm::vec<4, T, glm::defaultp> vec4;
+
+ T const One(1);
+ vec4 const Ones(1);
+
+ int Error = 0;
+
+ T const ULP1Plus = glm::nextFloat(One);
+ Error += glm::all(glm::equal(Ones, vec4(ULP1Plus), 1)) ? 0 : 1;
+
+ T const ULP2Plus = glm::nextFloat(ULP1Plus);
+ Error += !glm::all(glm::equal(Ones, vec4(ULP2Plus), 1)) ? 0 : 1;
+
+ T const ULP1Minus = glm::prevFloat(One);
+ Error += glm::all(glm::equal(Ones, vec4(ULP1Minus), 1)) ? 0 : 1;
+
+ T const ULP2Minus = glm::prevFloat(ULP1Minus);
+ Error += !glm::all(glm::equal(Ones, vec4(ULP2Minus), 1)) ? 0 : 1;
+
+ return Error;
+}
+
+template <typename T>
+static int test_notEqual_ulps()
+{
+ typedef glm::vec<4, T, glm::defaultp> vec4;
+
+ T const One(1);
+ vec4 const Ones(1);
+
+ int Error = 0;
+
+ T const ULP1Plus = glm::nextFloat(One);
+ Error += !glm::all(glm::notEqual(Ones, vec4(ULP1Plus), 1)) ? 0 : 1;
+
+ T const ULP2Plus = glm::nextFloat(ULP1Plus);
+ Error += glm::all(glm::notEqual(Ones, vec4(ULP2Plus), 1)) ? 0 : 1;
+
+ T const ULP1Minus = glm::prevFloat(One);
+ Error += !glm::all(glm::notEqual(Ones, vec4(ULP1Minus), 1)) ? 0 : 1;
+
+ T const ULP2Minus = glm::prevFloat(ULP1Minus);
+ Error += glm::all(glm::notEqual(Ones, vec4(ULP2Minus), 1)) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_equal_ulps<float>();
+ Error += test_equal_ulps<double>();
+ Error += test_notEqual_ulps<float>();
+ Error += test_notEqual_ulps<double>();
+
+ Error += test_equal<glm::vec1>();
+ Error += test_equal<glm::lowp_vec1>();
+ Error += test_equal<glm::mediump_vec1>();
+ Error += test_equal<glm::highp_vec1>();
+ Error += test_equal<glm::vec2>();
+ Error += test_equal<glm::lowp_vec2>();
+ Error += test_equal<glm::mediump_vec2>();
+ Error += test_equal<glm::highp_vec2>();
+ Error += test_equal<glm::vec3>();
+ Error += test_equal<glm::lowp_vec3>();
+ Error += test_equal<glm::mediump_vec3>();
+ Error += test_equal<glm::highp_vec3>();
+ Error += test_equal<glm::vec4>();
+ Error += test_equal<glm::lowp_vec4>();
+ Error += test_equal<glm::mediump_vec4>();
+ Error += test_equal<glm::highp_vec4>();
+
+ Error += test_equal<glm::dvec1>();
+ Error += test_equal<glm::lowp_dvec1>();
+ Error += test_equal<glm::mediump_dvec1>();
+ Error += test_equal<glm::highp_dvec1>();
+ Error += test_equal<glm::dvec2>();
+ Error += test_equal<glm::lowp_dvec2>();
+ Error += test_equal<glm::mediump_dvec2>();
+ Error += test_equal<glm::highp_dvec2>();
+ Error += test_equal<glm::dvec3>();
+ Error += test_equal<glm::lowp_dvec3>();
+ Error += test_equal<glm::mediump_dvec3>();
+ Error += test_equal<glm::highp_dvec3>();
+ Error += test_equal<glm::dvec4>();
+ Error += test_equal<glm::lowp_dvec4>();
+ Error += test_equal<glm::mediump_dvec4>();
+ Error += test_equal<glm::highp_dvec4>();
+
+ Error += test_notEqual<glm::vec1>();
+ Error += test_notEqual<glm::lowp_vec1>();
+ Error += test_notEqual<glm::mediump_vec1>();
+ Error += test_notEqual<glm::highp_vec1>();
+ Error += test_notEqual<glm::vec2>();
+ Error += test_notEqual<glm::lowp_vec2>();
+ Error += test_notEqual<glm::mediump_vec2>();
+ Error += test_notEqual<glm::highp_vec2>();
+ Error += test_notEqual<glm::vec3>();
+ Error += test_notEqual<glm::lowp_vec3>();
+ Error += test_notEqual<glm::mediump_vec3>();
+ Error += test_notEqual<glm::highp_vec3>();
+ Error += test_notEqual<glm::vec4>();
+ Error += test_notEqual<glm::lowp_vec4>();
+ Error += test_notEqual<glm::mediump_vec4>();
+ Error += test_notEqual<glm::highp_vec4>();
+
+ Error += test_notEqual<glm::dvec1>();
+ Error += test_notEqual<glm::lowp_dvec1>();
+ Error += test_notEqual<glm::mediump_dvec1>();
+ Error += test_notEqual<glm::highp_dvec1>();
+ Error += test_notEqual<glm::dvec2>();
+ Error += test_notEqual<glm::lowp_dvec2>();
+ Error += test_notEqual<glm::mediump_dvec2>();
+ Error += test_notEqual<glm::highp_dvec2>();
+ Error += test_notEqual<glm::dvec3>();
+ Error += test_notEqual<glm::lowp_dvec3>();
+ Error += test_notEqual<glm::mediump_dvec3>();
+ Error += test_notEqual<glm::highp_dvec3>();
+ Error += test_notEqual<glm::dvec4>();
+ Error += test_notEqual<glm::lowp_dvec4>();
+ Error += test_notEqual<glm::mediump_dvec4>();
+ Error += test_notEqual<glm::highp_dvec4>();
+
+ Error += test_constexpr<glm::vec1, float>();
+ Error += test_constexpr<glm::vec2, float>();
+ Error += test_constexpr<glm::vec3, float>();
+ Error += test_constexpr<glm::vec4, float>();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_uint1_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_uint1_sized.cpp
new file mode 100644
index 0000000..f2e4624
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_uint1_sized.cpp
@@ -0,0 +1,41 @@
+#include <glm/ext/vector_uint1_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+ static_assert(sizeof(glm::u8vec1) == 1, "uint8 size isn't 1 byte on this platform");
+ static_assert(sizeof(glm::u16vec1) == 2, "uint16 size isn't 2 bytes on this platform");
+ static_assert(sizeof(glm::u32vec1) == 4, "uint32 size isn't 4 bytes on this platform");
+ static_assert(sizeof(glm::u64vec1) == 8, "uint64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec1) == 1 ? 0 : 1;
+ Error += sizeof(glm::u16vec1) == 2 ? 0 : 1;
+ Error += sizeof(glm::u32vec1) == 4 ? 0 : 1;
+ Error += sizeof(glm::u64vec1) == 8 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec1) < sizeof(glm::u16vec1) ? 0 : 1;
+ Error += sizeof(glm::u16vec1) < sizeof(glm::u32vec1) ? 0 : 1;
+ Error += sizeof(glm::u32vec1) < sizeof(glm::u64vec1) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_uint2_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_uint2_sized.cpp
new file mode 100644
index 0000000..9c0977e
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_uint2_sized.cpp
@@ -0,0 +1,41 @@
+#include <glm/ext/vector_uint2_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8vec2) == 2, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16vec2) == 4, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32vec2) == 8, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64vec2) == 16, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec2) == 2 ? 0 : 1;
+ Error += sizeof(glm::u16vec2) == 4 ? 0 : 1;
+ Error += sizeof(glm::u32vec2) == 8 ? 0 : 1;
+ Error += sizeof(glm::u64vec2) == 16 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec2) < sizeof(glm::u16vec2) ? 0 : 1;
+ Error += sizeof(glm::u16vec2) < sizeof(glm::u32vec2) ? 0 : 1;
+ Error += sizeof(glm::u32vec2) < sizeof(glm::u64vec2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_uint3_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_uint3_sized.cpp
new file mode 100644
index 0000000..4cc2e44
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_uint3_sized.cpp
@@ -0,0 +1,41 @@
+#include <glm/ext/vector_uint3_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8vec3) == 3, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16vec3) == 6, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32vec3) == 12, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64vec3) == 24, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec3) == 3 ? 0 : 1;
+ Error += sizeof(glm::u16vec3) == 6 ? 0 : 1;
+ Error += sizeof(glm::u32vec3) == 12 ? 0 : 1;
+ Error += sizeof(glm::u64vec3) == 24 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec3) < sizeof(glm::u16vec3) ? 0 : 1;
+ Error += sizeof(glm::u16vec3) < sizeof(glm::u32vec3) ? 0 : 1;
+ Error += sizeof(glm::u32vec3) < sizeof(glm::u64vec3) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_uint4_sized.cpp b/3rdparty/glm/source/test/ext/ext_vector_uint4_sized.cpp
new file mode 100644
index 0000000..9e7ffe7
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_uint4_sized.cpp
@@ -0,0 +1,41 @@
+#include <glm/ext/vector_uint4_sized.hpp>
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::u8vec4) == 4, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::u16vec4) == 8, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::u32vec4) == 16, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::u64vec4) == 32, "int64 size isn't 8 bytes on this platform");
+#endif
+
+static int test_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec4) == 4 ? 0 : 1;
+ Error += sizeof(glm::u16vec4) == 8 ? 0 : 1;
+ Error += sizeof(glm::u32vec4) == 16 ? 0 : 1;
+ Error += sizeof(glm::u64vec4) == 32 ? 0 : 1;
+
+ return Error;
+}
+
+static int test_comp()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec4) < sizeof(glm::u16vec4) ? 0 : 1;
+ Error += sizeof(glm::u16vec4) < sizeof(glm::u32vec4) ? 0 : 1;
+ Error += sizeof(glm::u32vec4) < sizeof(glm::u64vec4) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_size();
+ Error += test_comp();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/ext/ext_vector_ulp.cpp b/3rdparty/glm/source/test/ext/ext_vector_ulp.cpp
new file mode 100644
index 0000000..6ebd1a1
--- /dev/null
+++ b/3rdparty/glm/source/test/ext/ext_vector_ulp.cpp
@@ -0,0 +1,99 @@
+#include <glm/ext/vector_ulp.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#include <glm/ext/vector_double4.hpp>
+#include <glm/ext/vector_int4.hpp>
+
+static int test_ulp_float_dist()
+{
+ int Error = 0;
+
+ glm::vec4 const A(1.0f);
+
+ glm::vec4 const B = glm::nextFloat(A);
+ Error += glm::any(glm::notEqual(A, B, 0)) ? 0 : 1;
+ glm::vec4 const C = glm::prevFloat(B);
+ Error += glm::all(glm::equal(A, C, 0)) ? 0 : 1;
+
+ glm::ivec4 const D = glm::floatDistance(A, B);
+ Error += D == glm::ivec4(1) ? 0 : 1;
+ glm::ivec4 const E = glm::floatDistance(A, C);
+ Error += E == glm::ivec4(0) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_ulp_float_step()
+{
+ int Error = 0;
+
+ glm::vec4 const A(1.0f);
+
+ for(int i = 10; i < 1000; i *= 10)
+ {
+ glm::vec4 const B = glm::nextFloat(A, i);
+ Error += glm::any(glm::notEqual(A, B, 0)) ? 0 : 1;
+ glm::vec4 const C = glm::prevFloat(B, i);
+ Error += glm::all(glm::equal(A, C, 0)) ? 0 : 1;
+
+ glm::ivec4 const D = glm::floatDistance(A, B);
+ Error += D == glm::ivec4(i) ? 0 : 1;
+ glm::ivec4 const E = glm::floatDistance(A, C);
+ Error += E == glm::ivec4(0) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_ulp_double_dist()
+{
+ int Error = 0;
+
+ glm::dvec4 const A(1.0);
+
+ glm::dvec4 const B = glm::nextFloat(A);
+ Error += glm::any(glm::notEqual(A, B, 0)) ? 0 : 1;
+ glm::dvec4 const C = glm::prevFloat(B);
+ Error += glm::all(glm::equal(A, C, 0)) ? 0 : 1;
+
+ glm::ivec4 const D(glm::floatDistance(A, B));
+ Error += D == glm::ivec4(1) ? 0 : 1;
+ glm::ivec4 const E = glm::floatDistance(A, C);
+ Error += E == glm::ivec4(0) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_ulp_double_step()
+{
+ int Error = 0;
+
+ glm::dvec4 const A(1.0);
+
+ for(int i = 10; i < 1000; i *= 10)
+ {
+ glm::dvec4 const B = glm::nextFloat(A, i);
+ Error += glm::any(glm::notEqual(A, B, 0)) ? 0 : 1;
+ glm::dvec4 const C = glm::prevFloat(B, i);
+ Error += glm::all(glm::equal(A, C, 0)) ? 0 : 1;
+
+ glm::ivec4 const D(glm::floatDistance(A, B));
+ Error += D == glm::ivec4(i) ? 0 : 1;
+ glm::ivec4 const E(glm::floatDistance(A, C));
+ Error += E == glm::ivec4(0) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_ulp_float_dist();
+ Error += test_ulp_float_step();
+ Error += test_ulp_double_dist();
+ Error += test_ulp_double_step();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/glm.cppcheck b/3rdparty/glm/source/test/glm.cppcheck
new file mode 100644
index 0000000..12081fe
--- /dev/null
+++ b/3rdparty/glm/source/test/glm.cppcheck
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="1">
+ <includedir>
+ <dir name="../glm"/>
+ </includedir>
+</project>
diff --git a/3rdparty/glm/source/test/gtc/CMakeLists.txt b/3rdparty/glm/source/test/gtc/CMakeLists.txt
new file mode 100644
index 0000000..4aef24a
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/CMakeLists.txt
@@ -0,0 +1,20 @@
+glmCreateTestGTC(gtc_bitfield)
+glmCreateTestGTC(gtc_color_space)
+glmCreateTestGTC(gtc_constants)
+glmCreateTestGTC(gtc_epsilon)
+glmCreateTestGTC(gtc_integer)
+glmCreateTestGTC(gtc_matrix_access)
+glmCreateTestGTC(gtc_matrix_integer)
+glmCreateTestGTC(gtc_matrix_inverse)
+glmCreateTestGTC(gtc_matrix_transform)
+glmCreateTestGTC(gtc_noise)
+glmCreateTestGTC(gtc_packing)
+glmCreateTestGTC(gtc_quaternion)
+glmCreateTestGTC(gtc_random)
+glmCreateTestGTC(gtc_round)
+glmCreateTestGTC(gtc_reciprocal)
+glmCreateTestGTC(gtc_type_aligned)
+glmCreateTestGTC(gtc_type_precision)
+glmCreateTestGTC(gtc_type_ptr)
+glmCreateTestGTC(gtc_ulp)
+glmCreateTestGTC(gtc_vec1)
diff --git a/3rdparty/glm/source/test/gtc/gtc_bitfield.cpp b/3rdparty/glm/source/test/gtc/gtc_bitfield.cpp
new file mode 100644
index 0000000..95c41f1
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_bitfield.cpp
@@ -0,0 +1,936 @@
+#include <glm/gtc/bitfield.hpp>
+#include <glm/gtc/type_precision.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/integer.hpp>
+#include <ctime>
+#include <cstdio>
+#include <vector>
+
+namespace mask
+{
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ genType Return;
+ };
+
+ inline int mask_zero(int Bits)
+ {
+ return ~((~0) << Bits);
+ }
+
+ inline int mask_mix(int Bits)
+ {
+ return Bits >= sizeof(int) * 8 ? 0xffffffff : (static_cast<int>(1) << Bits) - static_cast<int>(1);
+ }
+
+ inline int mask_half(int Bits)
+ {
+ // We do the shift in two steps because 1 << 32 on an int is undefined.
+
+ int const Half = Bits >> 1;
+ int const Fill = ~0;
+ int const ShiftHaft = (Fill << Half);
+ int const Rest = Bits - Half;
+ int const Reversed = ShiftHaft << Rest;
+
+ return ~Reversed;
+ }
+
+ inline int mask_loop(int Bits)
+ {
+ int Mask = 0;
+ for(int Bit = 0; Bit < Bits; ++Bit)
+ Mask |= (static_cast<int>(1) << Bit);
+ return Mask;
+ }
+
+ int perf()
+ {
+ int const Count = 100000000;
+
+ std::clock_t Timestamp1 = std::clock();
+
+ {
+ std::vector<int> Mask;
+ Mask.resize(Count);
+ for(int i = 0; i < Count; ++i)
+ Mask[i] = mask_mix(i % 32);
+ }
+
+ std::clock_t Timestamp2 = std::clock();
+
+ {
+ std::vector<int> Mask;
+ Mask.resize(Count);
+ for(int i = 0; i < Count; ++i)
+ Mask[i] = mask_loop(i % 32);
+ }
+
+ std::clock_t Timestamp3 = std::clock();
+
+ {
+ std::vector<int> Mask;
+ Mask.resize(Count);
+ for(int i = 0; i < Count; ++i)
+ Mask[i] = glm::mask(i % 32);
+ }
+
+ std::clock_t Timestamp4 = std::clock();
+
+ {
+ std::vector<int> Mask;
+ Mask.resize(Count);
+ for(int i = 0; i < Count; ++i)
+ Mask[i] = mask_zero(i % 32);
+ }
+
+ std::clock_t Timestamp5 = std::clock();
+
+ {
+ std::vector<int> Mask;
+ Mask.resize(Count);
+ for(int i = 0; i < Count; ++i)
+ Mask[i] = mask_half(i % 32);
+ }
+
+ std::clock_t Timestamp6 = std::clock();
+
+ std::clock_t TimeMix = Timestamp2 - Timestamp1;
+ std::clock_t TimeLoop = Timestamp3 - Timestamp2;
+ std::clock_t TimeDefault = Timestamp4 - Timestamp3;
+ std::clock_t TimeZero = Timestamp5 - Timestamp4;
+ std::clock_t TimeHalf = Timestamp6 - Timestamp5;
+
+ std::printf("mask[mix]: %d\n", static_cast<unsigned int>(TimeMix));
+ std::printf("mask[loop]: %d\n", static_cast<unsigned int>(TimeLoop));
+ std::printf("mask[default]: %d\n", static_cast<unsigned int>(TimeDefault));
+ std::printf("mask[zero]: %d\n", static_cast<unsigned int>(TimeZero));
+ std::printf("mask[half]: %d\n", static_cast<unsigned int>(TimeHalf));
+
+ return TimeDefault < TimeLoop ? 0 : 1;
+ }
+
+ int test_uint()
+ {
+ type<glm::uint> const Data[] =
+ {
+ { 0, 0x00000000},
+ { 1, 0x00000001},
+ { 2, 0x00000003},
+ { 3, 0x00000007},
+ {31, 0x7fffffff},
+ {32, 0xffffffff}
+ };
+
+ int Error = 0;
+/* mask_zero is sadly not a correct code
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
+ {
+ int Result = mask_zero(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+*/
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
+ {
+ int Result = mask_mix(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
+ {
+ int Result = mask_half(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
+ {
+ int Result = mask_loop(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
+ {
+ int Result = glm::mask(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_uvec4()
+ {
+ type<glm::ivec4> const Data[] =
+ {
+ {glm::ivec4( 0), glm::ivec4(0x00000000)},
+ {glm::ivec4( 1), glm::ivec4(0x00000001)},
+ {glm::ivec4( 2), glm::ivec4(0x00000003)},
+ {glm::ivec4( 3), glm::ivec4(0x00000007)},
+ {glm::ivec4(31), glm::ivec4(0x7fffffff)},
+ {glm::ivec4(32), glm::ivec4(0xffffffff)}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::ivec4>); i < n; ++i)
+ {
+ glm::ivec4 Result = glm::mask(Data[i].Value);
+ Error += glm::all(glm::equal(Data[i].Return, Result)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ Error += test_uint();
+ Error += test_uvec4();
+
+ return Error;
+ }
+}//namespace mask
+
+namespace bitfieldInterleave3
+{
+ template<typename PARAM, typename RET>
+ inline RET refBitfieldInterleave(PARAM x, PARAM y, PARAM z)
+ {
+ RET Result = 0;
+ for(RET i = 0; i < sizeof(PARAM) * 8; ++i)
+ {
+ Result |= ((RET(x) & (RET(1U) << i)) << ((i << 1) + 0));
+ Result |= ((RET(y) & (RET(1U) << i)) << ((i << 1) + 1));
+ Result |= ((RET(z) & (RET(1U) << i)) << ((i << 1) + 2));
+ }
+ return Result;
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ glm::uint16 x_max = 1 << 11;
+ glm::uint16 y_max = 1 << 11;
+ glm::uint16 z_max = 1 << 11;
+
+ for(glm::uint16 z = 0; z < z_max; z += 27)
+ for(glm::uint16 y = 0; y < y_max; y += 27)
+ for(glm::uint16 x = 0; x < x_max; x += 27)
+ {
+ glm::uint64 ResultA = refBitfieldInterleave<glm::uint16, glm::uint64>(x, y, z);
+ glm::uint64 ResultB = glm::bitfieldInterleave(x, y, z);
+ Error += ResultA == ResultB ? 0 : 1;
+ }
+
+ return Error;
+ }
+}
+
+namespace bitfieldInterleave4
+{
+ template<typename PARAM, typename RET>
+ inline RET loopBitfieldInterleave(PARAM x, PARAM y, PARAM z, PARAM w)
+ {
+ RET const v[4] = {x, y, z, w};
+ RET Result = 0;
+ for(RET i = 0; i < sizeof(PARAM) * 8; i++)
+ {
+ Result |= ((((v[0] >> i) & 1U)) << ((i << 2) + 0));
+ Result |= ((((v[1] >> i) & 1U)) << ((i << 2) + 1));
+ Result |= ((((v[2] >> i) & 1U)) << ((i << 2) + 2));
+ Result |= ((((v[3] >> i) & 1U)) << ((i << 2) + 3));
+ }
+ return Result;
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ glm::uint16 x_max = 1 << 11;
+ glm::uint16 y_max = 1 << 11;
+ glm::uint16 z_max = 1 << 11;
+ glm::uint16 w_max = 1 << 11;
+
+ for(glm::uint16 w = 0; w < w_max; w += 27)
+ for(glm::uint16 z = 0; z < z_max; z += 27)
+ for(glm::uint16 y = 0; y < y_max; y += 27)
+ for(glm::uint16 x = 0; x < x_max; x += 27)
+ {
+ glm::uint64 ResultA = loopBitfieldInterleave<glm::uint16, glm::uint64>(x, y, z, w);
+ glm::uint64 ResultB = glm::bitfieldInterleave(x, y, z, w);
+ Error += ResultA == ResultB ? 0 : 1;
+ }
+
+ return Error;
+ }
+}
+
+namespace bitfieldInterleave
+{
+ inline glm::uint64 fastBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+ {
+ glm::uint64 REG1;
+ glm::uint64 REG2;
+
+ REG1 = x;
+ REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+ REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+ REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
+ REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
+
+ REG2 = y;
+ REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+ REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+ REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
+ REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
+
+ return REG1 | (REG2 << 1);
+ }
+
+ inline glm::uint64 interleaveBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+ {
+ glm::uint64 REG1;
+ glm::uint64 REG2;
+
+ REG1 = x;
+ REG2 = y;
+
+ REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+ REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+
+ REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+ REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+
+ REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+
+ REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
+ REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
+
+ REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
+ REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
+
+ return REG1 | (REG2 << 1);
+ }
+/*
+ inline glm::uint64 loopBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+ {
+ static glm::uint64 const Mask[5] =
+ {
+ 0x5555555555555555,
+ 0x3333333333333333,
+ 0x0F0F0F0F0F0F0F0F,
+ 0x00FF00FF00FF00FF,
+ 0x0000FFFF0000FFFF
+ };
+
+ glm::uint64 REG1 = x;
+ glm::uint64 REG2 = y;
+ for(int i = 4; i >= 0; --i)
+ {
+ REG1 = ((REG1 << (1 << i)) | REG1) & Mask[i];
+ REG2 = ((REG2 << (1 << i)) | REG2) & Mask[i];
+ }
+
+ return REG1 | (REG2 << 1);
+ }
+*/
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+ inline glm::uint64 sseBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+ {
+ __m128i const Array = _mm_set_epi32(0, y, 0, x);
+
+ __m128i const Mask4 = _mm_set1_epi32(0x0000FFFF);
+ __m128i const Mask3 = _mm_set1_epi32(0x00FF00FF);
+ __m128i const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+ __m128i const Mask1 = _mm_set1_epi32(0x33333333);
+ __m128i const Mask0 = _mm_set1_epi32(0x55555555);
+
+ __m128i Reg1;
+ __m128i Reg2;
+
+ // REG1 = x;
+ // REG2 = y;
+ Reg1 = _mm_load_si128(&Array);
+
+ //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+ //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+ Reg2 = _mm_slli_si128(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask4);
+
+ //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+ //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+ Reg2 = _mm_slli_si128(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask3);
+
+ //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ Reg2 = _mm_slli_epi32(Reg1, 4);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask2);
+
+ //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
+ //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
+ Reg2 = _mm_slli_epi32(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask1);
+
+ //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
+ //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask0);
+
+ //return REG1 | (REG2 << 1);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg2 = _mm_srli_si128(Reg2, 8);
+ Reg1 = _mm_or_si128(Reg1, Reg2);
+
+ __m128i Result;
+ _mm_store_si128(&Result, Reg1);
+ return *reinterpret_cast<glm::uint64*>(&Result);
+ }
+
+ inline glm::uint64 sseUnalignedBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+ {
+ __m128i const Array = _mm_set_epi32(0, y, 0, x);
+
+ __m128i const Mask4 = _mm_set1_epi32(0x0000FFFF);
+ __m128i const Mask3 = _mm_set1_epi32(0x00FF00FF);
+ __m128i const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+ __m128i const Mask1 = _mm_set1_epi32(0x33333333);
+ __m128i const Mask0 = _mm_set1_epi32(0x55555555);
+
+ __m128i Reg1;
+ __m128i Reg2;
+
+ // REG1 = x;
+ // REG2 = y;
+ Reg1 = _mm_loadu_si128(&Array);
+
+ //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+ //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+ Reg2 = _mm_slli_si128(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask4);
+
+ //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+ //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+ Reg2 = _mm_slli_si128(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask3);
+
+ //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ Reg2 = _mm_slli_epi32(Reg1, 4);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask2);
+
+ //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
+ //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
+ Reg2 = _mm_slli_epi32(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask1);
+
+ //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
+ //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask0);
+
+ //return REG1 | (REG2 << 1);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg2 = _mm_srli_si128(Reg2, 8);
+ Reg1 = _mm_or_si128(Reg1, Reg2);
+
+ __m128i Result;
+ _mm_store_si128(&Result, Reg1);
+ return *reinterpret_cast<glm::uint64*>(&Result);
+ }
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+ int test()
+ {
+ int Error = 0;
+
+/*
+ {
+ for(glm::uint32 y = 0; y < (1 << 10); ++y)
+ for(glm::uint32 x = 0; x < (1 << 10); ++x)
+ {
+ glm::uint64 A = glm::bitfieldInterleave(x, y);
+ glm::uint64 B = fastBitfieldInterleave(x, y);
+ //glm::uint64 C = loopBitfieldInterleave(x, y);
+ glm::uint64 D = interleaveBitfieldInterleave(x, y);
+
+ assert(A == B);
+ //assert(A == C);
+ assert(A == D);
+
+# if GLM_ARCH & GLM_ARCH_SSE2_BIT
+ glm::uint64 E = sseBitfieldInterleave(x, y);
+ glm::uint64 F = sseUnalignedBitfieldInterleave(x, y);
+ assert(A == E);
+ assert(A == F);
+
+ __m128i G = glm_i128_interleave(_mm_set_epi32(0, y, 0, x));
+ glm::uint64 Result[2];
+ _mm_storeu_si128((__m128i*)Result, G);
+ assert(A == Result[0]);
+# endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+ }
+ }
+*/
+ {
+ for(glm::uint8 y = 0; y < 127; ++y)
+ for(glm::uint8 x = 0; x < 127; ++x)
+ {
+ glm::uint64 A(glm::bitfieldInterleave(glm::u8vec2(x, y)));
+ glm::uint64 B(glm::bitfieldInterleave(glm::u16vec2(x, y)));
+ glm::uint64 C(glm::bitfieldInterleave(glm::u32vec2(x, y)));
+
+ Error += A == B ? 0 : 1;
+ Error += A == C ? 0 : 1;
+
+ glm::u32vec2 const& D = glm::bitfieldDeinterleave(C);
+ Error += D.x == x ? 0 : 1;
+ Error += D.y == y ? 0 : 1;
+ }
+ }
+
+ {
+ for(glm::uint8 y = 0; y < 127; ++y)
+ for(glm::uint8 x = 0; x < 127; ++x)
+ {
+ glm::int64 A(glm::bitfieldInterleave(glm::int8(x), glm::int8(y)));
+ glm::int64 B(glm::bitfieldInterleave(glm::int16(x), glm::int16(y)));
+ glm::int64 C(glm::bitfieldInterleave(glm::int32(x), glm::int32(y)));
+
+ Error += A == B ? 0 : 1;
+ Error += A == C ? 0 : 1;
+ }
+ }
+
+ return Error;
+ }
+
+ int perf()
+ {
+ glm::uint32 x_max = 1 << 11;
+ glm::uint32 y_max = 1 << 10;
+
+ // ALU
+ std::vector<glm::uint64> Data(x_max * y_max);
+ std::vector<glm::u32vec2> Param(x_max * y_max);
+ for(glm::uint32 i = 0; i < Param.size(); ++i)
+ Param[i] = glm::u32vec2(i % x_max, i / y_max);
+
+ {
+ std::clock_t LastTime = std::clock();
+
+ for(std::size_t i = 0; i < Data.size(); ++i)
+ Data[i] = glm::bitfieldInterleave(Param[i].x, Param[i].y);
+
+ std::clock_t Time = std::clock() - LastTime;
+
+ std::printf("glm::bitfieldInterleave Time %d clocks\n", static_cast<int>(Time));
+ }
+
+ {
+ std::clock_t LastTime = std::clock();
+
+ for(std::size_t i = 0; i < Data.size(); ++i)
+ Data[i] = fastBitfieldInterleave(Param[i].x, Param[i].y);
+
+ std::clock_t Time = std::clock() - LastTime;
+
+ std::printf("fastBitfieldInterleave Time %d clocks\n", static_cast<int>(Time));
+ }
+/*
+ {
+ std::clock_t LastTime = std::clock();
+
+ for(std::size_t i = 0; i < Data.size(); ++i)
+ Data[i] = loopBitfieldInterleave(Param[i].x, Param[i].y);
+
+ std::clock_t Time = std::clock() - LastTime;
+
+ std::printf("loopBitfieldInterleave Time %d clocks\n", static_cast<int>(Time));
+ }
+*/
+ {
+ std::clock_t LastTime = std::clock();
+
+ for(std::size_t i = 0; i < Data.size(); ++i)
+ Data[i] = interleaveBitfieldInterleave(Param[i].x, Param[i].y);
+
+ std::clock_t Time = std::clock() - LastTime;
+
+ std::printf("interleaveBitfieldInterleave Time %d clocks\n", static_cast<int>(Time));
+ }
+
+# if GLM_ARCH & GLM_ARCH_SSE2_BIT
+ {
+ std::clock_t LastTime = std::clock();
+
+ for(std::size_t i = 0; i < Data.size(); ++i)
+ Data[i] = sseBitfieldInterleave(Param[i].x, Param[i].y);
+
+ std::clock_t Time = std::clock() - LastTime;
+
+ std::printf("sseBitfieldInterleave Time %d clocks\n", static_cast<int>(Time));
+ }
+
+ {
+ std::clock_t LastTime = std::clock();
+
+ for(std::size_t i = 0; i < Data.size(); ++i)
+ Data[i] = sseUnalignedBitfieldInterleave(Param[i].x, Param[i].y);
+
+ std::clock_t Time = std::clock() - LastTime;
+
+ std::printf("sseUnalignedBitfieldInterleave Time %d clocks\n", static_cast<int>(Time));
+ }
+# endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+ {
+ std::clock_t LastTime = std::clock();
+
+ for(std::size_t i = 0; i < Data.size(); ++i)
+ Data[i] = glm::bitfieldInterleave(Param[i].x, Param[i].y, Param[i].x);
+
+ std::clock_t Time = std::clock() - LastTime;
+
+ std::printf("glm::detail::bitfieldInterleave Time %d clocks\n", static_cast<int>(Time));
+ }
+
+# if(GLM_ARCH & GLM_ARCH_SSE2_BIT && !(GLM_COMPILER & GLM_COMPILER_GCC))
+ {
+ // SIMD
+ std::vector<__m128i> SimdData;
+ SimdData.resize(static_cast<std::size_t>(x_max * y_max));
+ std::vector<__m128i> SimdParam;
+ SimdParam.resize(static_cast<std::size_t>(x_max * y_max));
+ for(std::size_t i = 0; i < SimdParam.size(); ++i)
+ SimdParam[i] = _mm_set_epi32(static_cast<int>(i % static_cast<std::size_t>(x_max)), 0, static_cast<int>(i / static_cast<std::size_t>(y_max)), 0);
+
+ std::clock_t LastTime = std::clock();
+
+ for(std::size_t i = 0; i < SimdData.size(); ++i)
+ SimdData[i] = glm_i128_interleave(SimdParam[i]);
+
+ std::clock_t Time = std::clock() - LastTime;
+
+ std::printf("_mm_bit_interleave_si128 Time %d clocks\n", static_cast<int>(Time));
+ }
+# endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+ return 0;
+ }
+}//namespace bitfieldInterleave
+
+namespace bitfieldInterleave5
+{
+ GLM_FUNC_QUALIFIER glm::uint16 bitfieldInterleave_u8vec2(glm::uint8 x, glm::uint8 y)
+ {
+ glm::uint32 Result = (glm::uint32(y) << 16) | glm::uint32(x);
+ Result = ((Result << 4) | Result) & 0x0F0F0F0F;
+ Result = ((Result << 2) | Result) & 0x33333333;
+ Result = ((Result << 1) | Result) & 0x55555555;
+ return static_cast<glm::uint16>((Result & 0x0000FFFF) | (Result >> 15));
+ }
+
+ GLM_FUNC_QUALIFIER glm::u8vec2 bitfieldDeinterleave_u8vec2(glm::uint16 InterleavedBitfield)
+ {
+ glm::uint32 Result(InterleavedBitfield);
+ Result = ((Result << 15) | Result) & 0x55555555;
+ Result = ((Result >> 1) | Result) & 0x33333333;
+ Result = ((Result >> 2) | Result) & 0x0F0F0F0F;
+ Result = ((Result >> 4) | Result) & 0x00FF00FF;
+ return glm::u8vec2(Result & 0x0000FFFF, Result >> 16);
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave_u8vec4(glm::uint8 x, glm::uint8 y, glm::uint8 z, glm::uint8 w)
+ {
+ glm::uint64 Result = (glm::uint64(w) << 48) | (glm::uint64(z) << 32) | (glm::uint64(y) << 16) | glm::uint64(x);
+ Result = ((Result << 12) | Result) & 0x000F000F000F000Full;
+ Result = ((Result << 6) | Result) & 0x0303030303030303ull;
+ Result = ((Result << 3) | Result) & 0x1111111111111111ull;
+
+ const glm::uint32 a = static_cast<glm::uint32>((Result & 0x000000000000FFFF) >> ( 0 - 0));
+ const glm::uint32 b = static_cast<glm::uint32>((Result & 0x00000000FFFF0000) >> (16 - 3));
+ const glm::uint32 c = static_cast<glm::uint32>((Result & 0x0000FFFF00000000) >> (32 - 6));
+ const glm::uint32 d = static_cast<glm::uint32>((Result & 0xFFFF000000000000) >> (48 - 12));
+
+ return a | b | c | d;
+ }
+
+ GLM_FUNC_QUALIFIER glm::u8vec4 bitfieldDeinterleave_u8vec4(glm::uint32 InterleavedBitfield)
+ {
+ glm::uint64 Result(InterleavedBitfield);
+ Result = ((Result << 15) | Result) & 0x9249249249249249ull;
+ Result = ((Result >> 1) | Result) & 0x30C30C30C30C30C3ull;
+ Result = ((Result >> 2) | Result) & 0xF00F00F00F00F00Full;
+ Result = ((Result >> 4) | Result) & 0x00FF0000FF0000FFull;
+ return glm::u8vec4(
+ (Result >> 0) & 0x000000000000FFFFull,
+ (Result >> 16) & 0x00000000FFFF0000ull,
+ (Result >> 32) & 0x0000FFFF00000000ull,
+ (Result >> 48) & 0xFFFF000000000000ull);
+ }
+
+ GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave_u16vec2(glm::uint16 x, glm::uint16 y)
+ {
+ glm::uint64 Result = (glm::uint64(y) << 32) | glm::uint64(x);
+ Result = ((Result << 8) | Result) & static_cast<glm::uint32>(0x00FF00FF00FF00FFull);
+ Result = ((Result << 4) | Result) & static_cast<glm::uint32>(0x0F0F0F0F0F0F0F0Full);
+ Result = ((Result << 2) | Result) & static_cast<glm::uint32>(0x3333333333333333ull);
+ Result = ((Result << 1) | Result) & static_cast<glm::uint32>(0x5555555555555555ull);
+ return static_cast<glm::uint32>((Result & 0x00000000FFFFFFFFull) | (Result >> 31));
+ }
+
+ GLM_FUNC_QUALIFIER glm::u16vec2 bitfieldDeinterleave_u16vec2(glm::uint32 InterleavedBitfield)
+ {
+ glm::uint64 Result(InterleavedBitfield);
+ Result = ((Result << 31) | Result) & 0x5555555555555555ull;
+ Result = ((Result >> 1) | Result) & 0x3333333333333333ull;
+ Result = ((Result >> 2) | Result) & 0x0F0F0F0F0F0F0F0Full;
+ Result = ((Result >> 4) | Result) & 0x00FF00FF00FF00FFull;
+ Result = ((Result >> 8) | Result) & 0x0000FFFF0000FFFFull;
+ return glm::u16vec2(Result & 0x00000000FFFFFFFFull, Result >> 32);
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ {
+ glm::uint16 A = bitfieldInterleave_u8vec2(glm::uint8(i), glm::uint8(j));
+ glm::uint16 B = glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j));
+ Error += A == B ? 0 : 1;
+
+ glm::u8vec2 C = bitfieldDeinterleave_u8vec2(A);
+ Error += C.x == glm::uint8(i) ? 0 : 1;
+ Error += C.y == glm::uint8(j) ? 0 : 1;
+ }
+
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ {
+ glm::uint32 A = bitfieldInterleave_u8vec4(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j));
+ glm::uint32 B = glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j));
+ Error += A == B ? 0 : 1;
+/*
+ glm::u8vec4 C = bitfieldDeinterleave_u8vec4(A);
+ Error += C.x == glm::uint8(i) ? 0 : 1;
+ Error += C.y == glm::uint8(j) ? 0 : 1;
+ Error += C.z == glm::uint8(i) ? 0 : 1;
+ Error += C.w == glm::uint8(j) ? 0 : 1;
+*/
+ }
+
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ {
+ glm::uint32 A = bitfieldInterleave_u16vec2(glm::uint16(i), glm::uint16(j));
+ glm::uint32 B = glm::bitfieldInterleave(glm::uint16(i), glm::uint16(j));
+ Error += A == B ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int perf_old_u8vec2(std::vector<glm::uint16>& Result)
+ {
+ int Error = 0;
+
+ const std::clock_t BeginTime = std::clock();
+
+ for(glm::size_t k = 0; k < 10000; ++k)
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Error += Result[j * 256 + i] == glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j)) ? 0 : 1;
+
+ const std::clock_t EndTime = std::clock();
+
+ std::printf("glm::bitfieldInterleave<u8vec2> Time %d clocks\n", static_cast<int>(EndTime - BeginTime));
+
+ return Error;
+ }
+
+ int perf_new_u8vec2(std::vector<glm::uint16>& Result)
+ {
+ int Error = 0;
+
+ const std::clock_t BeginTime = std::clock();
+
+ for(glm::size_t k = 0; k < 10000; ++k)
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Error += Result[j * 256 + i] == bitfieldInterleave_u8vec2(glm::uint8(i), glm::uint8(j)) ? 0 : 1;
+
+ const std::clock_t EndTime = std::clock();
+
+ std::printf("bitfieldInterleave_u8vec2 Time %d clocks\n", static_cast<int>(EndTime - BeginTime));
+
+ return Error;
+ }
+
+ int perf_old_u8vec4(std::vector<glm::uint32>& Result)
+ {
+ int Error = 0;
+
+ const std::clock_t BeginTime = std::clock();
+
+ for(glm::size_t k = 0; k < 10000; ++k)
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Error += Result[j * 256 + i] == glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j)) ? 0 : 1;
+
+ const std::clock_t EndTime = std::clock();
+
+ std::printf("glm::bitfieldInterleave<u8vec4> Time %d clocks\n", static_cast<int>(EndTime - BeginTime));
+
+ return Error;
+ }
+
+ int perf_new_u8vec4(std::vector<glm::uint32>& Result)
+ {
+ int Error = 0;
+
+ const std::clock_t BeginTime = std::clock();
+
+ for(glm::size_t k = 0; k < 10000; ++k)
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Error += Result[j * 256 + i] == bitfieldInterleave_u8vec4(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j)) ? 0 : 1;
+
+ const std::clock_t EndTime = std::clock();
+
+ std::printf("bitfieldInterleave_u8vec4 Time %d clocks\n", static_cast<int>(EndTime - BeginTime));
+
+ return Error;
+ }
+
+ int perf_old_u16vec2(std::vector<glm::uint32>& Result)
+ {
+ int Error = 0;
+
+ const std::clock_t BeginTime = std::clock();
+
+ for(glm::size_t k = 0; k < 10000; ++k)
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Error += Result[j * 256 + i] == glm::bitfieldInterleave(glm::uint16(i), glm::uint16(j)) ? 0 : 1;
+
+ const std::clock_t EndTime = std::clock();
+
+ std::printf("glm::bitfieldInterleave<u16vec2> Time %d clocks\n", static_cast<int>(EndTime - BeginTime));
+
+ return Error;
+ }
+
+ int perf_new_u16vec2(std::vector<glm::uint32>& Result)
+ {
+ int Error = 0;
+
+ const std::clock_t BeginTime = std::clock();
+
+ for(glm::size_t k = 0; k < 10000; ++k)
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Error += Result[j * 256 + i] == bitfieldInterleave_u16vec2(glm::uint16(i), glm::uint16(j)) ? 0 : 1;
+
+ const std::clock_t EndTime = std::clock();
+
+ std::printf("bitfieldInterleave_u16vec2 Time %d clocks\n", static_cast<int>(EndTime - BeginTime));
+
+ return Error;
+ }
+
+ int perf()
+ {
+ int Error = 0;
+
+ std::printf("bitfieldInterleave perf: init\r");
+
+ std::vector<glm::uint16> Result_u8vec2(256 * 256, 0);
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Result_u8vec2[j * 256 + i] = glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j));
+
+ Error += perf_old_u8vec2(Result_u8vec2);
+ Error += perf_new_u8vec2(Result_u8vec2);
+
+ std::vector<glm::uint32> Result_u8vec4(256 * 256, 0);
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Result_u8vec4[j * 256 + i] = glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j));
+
+ Error += perf_old_u8vec4(Result_u8vec4);
+ Error += perf_new_u8vec4(Result_u8vec4);
+
+ std::vector<glm::uint32> Result_u16vec2(256 * 256, 0);
+ for(glm::size_t j = 0; j < 256; ++j)
+ for(glm::size_t i = 0; i < 256; ++i)
+ Result_u16vec2[j * 256 + i] = glm::bitfieldInterleave(glm::uint16(i), glm::uint16(j));
+
+ Error += perf_old_u16vec2(Result_u16vec2);
+ Error += perf_new_u16vec2(Result_u16vec2);
+
+ std::printf("bitfieldInterleave perf: %d Errors\n", Error);
+
+ return Error;
+ }
+
+}//namespace bitfieldInterleave5
+
+static int test_bitfieldRotateRight()
+{
+ glm::ivec4 const A = glm::bitfieldRotateRight(glm::ivec4(2), 1);
+ glm::ivec4 const B = glm::ivec4(2) >> 1;
+
+ return A == B;
+}
+
+static int test_bitfieldRotateLeft()
+{
+ glm::ivec4 const A = glm::bitfieldRotateLeft(glm::ivec4(2), 1);
+ glm::ivec4 const B = glm::ivec4(2) << 1;
+
+ return A == B;
+}
+
+int main()
+{
+ int Error = 0;
+
+/* Tests for a faster and to reserve bitfieldInterleave
+ Error += ::bitfieldInterleave5::test();
+ Error += ::bitfieldInterleave5::perf();
+*/
+ Error += ::mask::test();
+ Error += ::bitfieldInterleave3::test();
+ Error += ::bitfieldInterleave4::test();
+ Error += ::bitfieldInterleave::test();
+
+ Error += test_bitfieldRotateRight();
+ Error += test_bitfieldRotateLeft();
+
+# ifdef NDEBUG
+ Error += ::mask::perf();
+ Error += ::bitfieldInterleave::perf();
+# endif//NDEBUG
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_color_space.cpp b/3rdparty/glm/source/test/gtc/gtc_color_space.cpp
new file mode 100644
index 0000000..67650c5
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_color_space.cpp
@@ -0,0 +1,78 @@
+#include <glm/gtc/color_space.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/constants.hpp>
+
+namespace srgb
+{
+ int test()
+ {
+ int Error(0);
+
+ glm::vec3 const ColorSourceRGB(1.0, 0.5, 0.0);
+
+ {
+ glm::vec3 const ColorSRGB = glm::convertLinearToSRGB(ColorSourceRGB);
+ glm::vec3 const ColorRGB = glm::convertSRGBToLinear(ColorSRGB);
+ Error += glm::all(glm::epsilonEqual(ColorSourceRGB, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec3 const ColorSRGB = glm::convertLinearToSRGB(ColorSourceRGB, 2.8f);
+ glm::vec3 const ColorRGB = glm::convertSRGBToLinear(ColorSRGB, 2.8f);
+ Error += glm::all(glm::epsilonEqual(ColorSourceRGB, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+
+ glm::vec4 const ColorSourceRGBA(1.0, 0.5, 0.0, 1.0);
+
+ {
+ glm::vec4 const ColorSRGB = glm::convertLinearToSRGB(ColorSourceRGBA);
+ glm::vec4 const ColorRGB = glm::convertSRGBToLinear(ColorSRGB);
+ Error += glm::all(glm::epsilonEqual(ColorSourceRGBA, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 const ColorSRGB = glm::convertLinearToSRGB(ColorSourceRGBA, 2.8f);
+ glm::vec4 const ColorRGB = glm::convertSRGBToLinear(ColorSRGB, 2.8f);
+ Error += glm::all(glm::epsilonEqual(ColorSourceRGBA, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+
+ glm::vec4 const ColorSourceGNI = glm::vec4(107, 107, 104, 131) / glm::vec4(255);
+
+ {
+ glm::vec4 const ColorGNA = glm::convertSRGBToLinear(ColorSourceGNI) * glm::vec4(255);
+ glm::vec4 const ColorGNE = glm::convertLinearToSRGB(ColorSourceGNI) * glm::vec4(255);
+ glm::vec4 const ColorSRGB = glm::convertLinearToSRGB(ColorSourceGNI);
+ glm::vec4 const ColorRGB = glm::convertSRGBToLinear(ColorSRGB);
+ Error += glm::all(glm::epsilonEqual(ColorSourceGNI, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace srgb
+
+namespace srgb_lowp
+{
+ int test()
+ {
+ int Error(0);
+
+ for(float Color = 0.0f; Color < 1.0f; Color += 0.01f)
+ {
+ glm::highp_vec3 const HighpSRGB = glm::convertLinearToSRGB(glm::highp_vec3(Color));
+ glm::lowp_vec3 const LowpSRGB = glm::convertLinearToSRGB(glm::lowp_vec3(Color));
+ Error += glm::all(glm::epsilonEqual(glm::abs(HighpSRGB - glm::highp_vec3(LowpSRGB)), glm::highp_vec3(0), 0.1f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace srgb_lowp
+
+int main()
+{
+ int Error(0);
+
+ Error += srgb::test();
+ Error += srgb_lowp::test();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_constants.cpp b/3rdparty/glm/source/test/gtc/gtc_constants.cpp
new file mode 100644
index 0000000..3897cd0
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_constants.cpp
@@ -0,0 +1,30 @@
+#include <glm/gtc/constants.hpp>
+
+int test_epsilon()
+{
+ int Error = 0;
+
+ {
+ float Test = glm::epsilon<float>();
+ Error += Test > 0.0f ? 0 : 1;
+ }
+
+ {
+ double Test = glm::epsilon<double>();
+ Error += Test > 0.0 ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error(0);
+
+ //float MinHalf = 0.0f;
+ //while (glm::half(MinHalf) == glm::half(0.0f))
+ // MinHalf += std::numeric_limits<float>::epsilon();
+ Error += test_epsilon();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_epsilon.cpp b/3rdparty/glm/source/test/gtc/gtc_epsilon.cpp
new file mode 100644
index 0000000..f0e6c8a
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_epsilon.cpp
@@ -0,0 +1,78 @@
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/vector_relational.hpp>
+
+int test_defined()
+{
+ glm::epsilonEqual(glm::vec2(), glm::vec2(), glm::vec2());
+ glm::epsilonEqual(glm::vec3(), glm::vec3(), glm::vec3());
+ glm::epsilonEqual(glm::vec4(), glm::vec4(), glm::vec4());
+
+ glm::epsilonNotEqual(glm::vec2(), glm::vec2(), glm::vec2());
+ glm::epsilonNotEqual(glm::vec3(), glm::vec3(), glm::vec3());
+ glm::epsilonNotEqual(glm::vec4(), glm::vec4(), glm::vec4());
+
+ glm::epsilonEqual(glm::vec2(), glm::vec2(), 0.0f);
+ glm::epsilonEqual(glm::vec3(), glm::vec3(), 0.0f);
+ glm::epsilonEqual(glm::vec4(), glm::vec4(), 0.0f);
+ glm::epsilonEqual(glm::quat(), glm::quat(), 0.0f);
+
+ glm::epsilonNotEqual(glm::vec2(), glm::vec2(), 0.0f);
+ glm::epsilonNotEqual(glm::vec3(), glm::vec3(), 0.0f);
+ glm::epsilonNotEqual(glm::vec4(), glm::vec4(), 0.0f);
+ glm::epsilonNotEqual(glm::quat(), glm::quat(), 0.0f);
+
+ return 0;
+}
+
+template<typename T>
+int test_equal()
+{
+ int Error(0);
+
+ {
+ T A = glm::epsilon<T>();
+ T B = glm::epsilon<T>();
+ Error += glm::epsilonEqual(A, B, glm::epsilon<T>() * T(2)) ? 0 : 1;
+ }
+
+ {
+ T A(0);
+ T B = static_cast<T>(0) + glm::epsilon<T>();
+ Error += glm::epsilonEqual(A, B, glm::epsilon<T>() * T(2)) ? 0 : 1;
+ }
+
+ {
+ T A(0);
+ T B = static_cast<T>(0) - glm::epsilon<T>();
+ Error += glm::epsilonEqual(A, B, glm::epsilon<T>() * T(2)) ? 0 : 1;
+ }
+
+ {
+ T A = static_cast<T>(0) + glm::epsilon<T>();
+ T B = static_cast<T>(0);
+ Error += glm::epsilonEqual(A, B, glm::epsilon<T>() * T(2)) ? 0 : 1;
+ }
+
+ {
+ T A = static_cast<T>(0) - glm::epsilon<T>();
+ T B = static_cast<T>(0);
+ Error += glm::epsilonEqual(A, B, glm::epsilon<T>() * T(2)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error(0);
+
+ Error += test_defined();
+ Error += test_equal<float>();
+ Error += test_equal<double>();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtc/gtc_integer.cpp b/3rdparty/glm/source/test/gtc/gtc_integer.cpp
new file mode 100644
index 0000000..769d969
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_integer.cpp
@@ -0,0 +1,233 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#define GLM_FORCE_INLINE
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/integer.hpp>
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/gtx/type_aligned.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/vec2.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec4.hpp>
+#include <ctime>
+#include <cstdio>
+#include <vector>
+#include <cmath>
+
+namespace log2_
+{
+ int test()
+ {
+ int Error = 0;
+
+ int A0 = static_cast<int>(glm::log2(16.f));
+ glm::ivec1 B0(glm::log2(glm::vec1(16.f)));
+ glm::ivec2 C0(glm::log2(glm::vec2(16.f)));
+ glm::ivec3 D0(glm::log2(glm::vec3(16.f)));
+ glm::ivec4 E0(glm::log2(glm::vec4(16.f)));
+
+ int A1 = glm::log2(int(16));
+ glm::ivec1 B1 = glm::log2(glm::ivec1(16));
+ glm::ivec2 C1 = glm::log2(glm::ivec2(16));
+ glm::ivec3 D1 = glm::log2(glm::ivec3(16));
+ glm::ivec4 E1 = glm::log2(glm::ivec4(16));
+
+ Error += A0 == A1 ? 0 : 1;
+ Error += glm::all(glm::equal(B0, B1)) ? 0 : 1;
+ Error += glm::all(glm::equal(C0, C1)) ? 0 : 1;
+ Error += glm::all(glm::equal(D0, D1)) ? 0 : 1;
+ Error += glm::all(glm::equal(E0, E1)) ? 0 : 1;
+
+ glm::uint64 A2 = glm::log2(glm::uint64(16));
+ glm::u64vec1 B2 = glm::log2(glm::u64vec1(16));
+ glm::u64vec2 C2 = glm::log2(glm::u64vec2(16));
+ glm::u64vec3 D2 = glm::log2(glm::u64vec3(16));
+ glm::u64vec4 E2 = glm::log2(glm::u64vec4(16));
+
+ Error += A2 == glm::uint64(4) ? 0 : 1;
+ Error += glm::all(glm::equal(B2, glm::u64vec1(4))) ? 0 : 1;
+ Error += glm::all(glm::equal(C2, glm::u64vec2(4))) ? 0 : 1;
+ Error += glm::all(glm::equal(D2, glm::u64vec3(4))) ? 0 : 1;
+ Error += glm::all(glm::equal(E2, glm::u64vec4(4))) ? 0 : 1;
+
+ return Error;
+ }
+
+ int perf(std::size_t Count)
+ {
+ int Error = 0;
+
+ {
+ std::vector<int> Result;
+ Result.resize(Count);
+
+ std::clock_t Begin = clock();
+
+ for(int i = 0; i < static_cast<int>(Count); ++i)
+ Result[i] = glm::log2(static_cast<int>(i));
+
+ std::clock_t End = clock();
+
+ std::printf("glm::log2<int>: %d clocks\n", static_cast<int>(End - Begin));
+ }
+
+ {
+ std::vector<glm::ivec4> Result;
+ Result.resize(Count);
+
+ std::clock_t Begin = clock();
+
+ for(int i = 0; i < static_cast<int>(Count); ++i)
+ Result[i] = glm::log2(glm::ivec4(i));
+
+ std::clock_t End = clock();
+
+ std::printf("glm::log2<ivec4>: %d clocks\n", static_cast<int>(End - Begin));
+ }
+
+# if GLM_HAS_BITSCAN_WINDOWS
+ {
+ std::vector<glm::ivec4> Result;
+ Result.resize(Count);
+
+ std::clock_t Begin = clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ glm::vec<4, unsigned long, glm::defaultp> Tmp;
+ _BitScanReverse(&Tmp.x, i);
+ _BitScanReverse(&Tmp.y, i);
+ _BitScanReverse(&Tmp.z, i);
+ _BitScanReverse(&Tmp.w, i);
+ Result[i] = glm::ivec4(Tmp);
+ }
+
+ std::clock_t End = clock();
+
+ std::printf("glm::log2<ivec4> inlined: %d clocks\n", static_cast<int>(End - Begin));
+ }
+
+
+ {
+ std::vector<glm::vec<4, unsigned long, glm::defaultp> > Result;
+ Result.resize(Count);
+
+ std::clock_t Begin = clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ _BitScanReverse(&Result[i].x, i);
+ _BitScanReverse(&Result[i].y, i);
+ _BitScanReverse(&Result[i].z, i);
+ _BitScanReverse(&Result[i].w, i);
+ }
+
+ std::clock_t End = clock();
+
+ std::printf("glm::log2<ivec4> inlined no cast: %d clocks\n", static_cast<int>(End - Begin));
+ }
+
+
+ {
+ std::vector<glm::ivec4> Result;
+ Result.resize(Count);
+
+ std::clock_t Begin = clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ {
+ _BitScanReverse(reinterpret_cast<unsigned long*>(&Result[i].x), i);
+ _BitScanReverse(reinterpret_cast<unsigned long*>(&Result[i].y), i);
+ _BitScanReverse(reinterpret_cast<unsigned long*>(&Result[i].z), i);
+ _BitScanReverse(reinterpret_cast<unsigned long*>(&Result[i].w), i);
+ }
+
+ std::clock_t End = clock();
+
+ std::printf("glm::log2<ivec4> reinterpret: %d clocks\n", static_cast<int>(End - Begin));
+ }
+# endif//GLM_HAS_BITSCAN_WINDOWS
+
+ {
+ std::vector<float> Result;
+ Result.resize(Count);
+
+ std::clock_t Begin = clock();
+
+ for(std::size_t i = 0; i < Count; ++i)
+ Result[i] = glm::log2(static_cast<float>(i));
+
+ std::clock_t End = clock();
+
+ std::printf("glm::log2<float>: %d clocks\n", static_cast<int>(End - Begin));
+ }
+
+ {
+ std::vector<glm::vec4> Result;
+ Result.resize(Count);
+
+ std::clock_t Begin = clock();
+
+ for(int i = 0; i < static_cast<int>(Count); ++i)
+ Result[i] = glm::log2(glm::vec4(static_cast<float>(i)));
+
+ std::clock_t End = clock();
+
+ std::printf("glm::log2<vec4>: %d clocks\n", static_cast<int>(End - Begin));
+ }
+
+ return Error;
+ }
+}//namespace log2_
+
+namespace iround
+{
+ int test()
+ {
+ int Error = 0;
+
+ for(float f = 0.0f; f < 3.1f; f += 0.05f)
+ {
+ int RoundFast = static_cast<int>(glm::iround(f));
+ int RoundSTD = static_cast<int>(glm::round(f));
+ Error += RoundFast == RoundSTD ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+ }
+}//namespace iround
+
+namespace uround
+{
+ int test()
+ {
+ int Error = 0;
+
+ for(float f = 0.0f; f < 3.1f; f += 0.05f)
+ {
+ int RoundFast = static_cast<int>(glm::uround(f));
+ int RoundSTD = static_cast<int>(glm::round(f));
+ Error += RoundFast == RoundSTD ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+ }
+}//namespace uround
+
+int main()
+{
+ int Error(0);
+
+ Error += ::log2_::test();
+ Error += ::iround::test();
+ Error += ::uround::test();
+
+# ifdef NDEBUG
+ std::size_t const Samples(1000);
+ Error += ::log2_::perf(Samples);
+# endif//NDEBUG
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_matrix_access.cpp b/3rdparty/glm/source/test/gtc/gtc_matrix_access.cpp
new file mode 100644
index 0000000..1b966e2
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_matrix_access.cpp
@@ -0,0 +1,383 @@
+#include <glm/ext/vector_relational.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/matrix_access.hpp>
+#include <glm/mat2x2.hpp>
+#include <glm/mat2x3.hpp>
+#include <glm/mat2x4.hpp>
+#include <glm/mat3x2.hpp>
+#include <glm/mat3x3.hpp>
+#include <glm/mat3x4.hpp>
+#include <glm/mat4x2.hpp>
+#include <glm/mat4x3.hpp>
+#include <glm/mat4x4.hpp>
+
+int test_mat2x2_row_set()
+{
+ int Error = 0;
+
+ glm::mat2x2 m(1);
+
+ m = glm::row(m, 0, glm::vec2( 0, 1));
+ m = glm::row(m, 1, glm::vec2( 4, 5));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec2( 0, 1), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec2( 4, 5), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat2x2_col_set()
+{
+ int Error = 0;
+
+ glm::mat2x2 m(1);
+
+ m = glm::column(m, 0, glm::vec2( 0, 1));
+ m = glm::column(m, 1, glm::vec2( 4, 5));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec2( 0, 1), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec2( 4, 5), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat2x3_row_set()
+{
+ int Error = 0;
+
+ glm::mat2x3 m(1);
+
+ m = glm::row(m, 0, glm::vec2( 0, 1));
+ m = glm::row(m, 1, glm::vec2( 4, 5));
+ m = glm::row(m, 2, glm::vec2( 8, 9));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec2( 0, 1), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec2( 4, 5), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 2), glm::vec2( 8, 9), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat2x3_col_set()
+{
+ int Error = 0;
+
+ glm::mat2x3 m(1);
+
+ m = glm::column(m, 0, glm::vec3( 0, 1, 2));
+ m = glm::column(m, 1, glm::vec3( 4, 5, 6));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec3( 0, 1, 2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec3( 4, 5, 6), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat2x4_row_set()
+{
+ int Error = 0;
+
+ glm::mat2x4 m(1);
+
+ m = glm::row(m, 0, glm::vec2( 0, 1));
+ m = glm::row(m, 1, glm::vec2( 4, 5));
+ m = glm::row(m, 2, glm::vec2( 8, 9));
+ m = glm::row(m, 3, glm::vec2(12, 13));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec2( 0, 1), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec2( 4, 5), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 2), glm::vec2( 8, 9), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 3), glm::vec2(12, 13), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat2x4_col_set()
+{
+ int Error = 0;
+
+ glm::mat2x4 m(1);
+
+ m = glm::column(m, 0, glm::vec4( 0, 1, 2, 3));
+ m = glm::column(m, 1, glm::vec4( 4, 5, 6, 7));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec4( 0, 1, 2, 3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec4( 4, 5, 6, 7), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat3x2_row_set()
+{
+ int Error = 0;
+
+ glm::mat3x2 m(1);
+
+ m = glm::row(m, 0, glm::vec3( 0, 1, 2));
+ m = glm::row(m, 1, glm::vec3( 4, 5, 6));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec3( 0, 1, 2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec3( 4, 5, 6), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat3x2_col_set()
+{
+ int Error = 0;
+
+ glm::mat3x2 m(1);
+
+ m = glm::column(m, 0, glm::vec2( 0, 1));
+ m = glm::column(m, 1, glm::vec2( 4, 5));
+ m = glm::column(m, 2, glm::vec2( 8, 9));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec2( 0, 1), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec2( 4, 5), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 2), glm::vec2( 8, 9), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat3x3_row_set()
+{
+ int Error = 0;
+
+ glm::mat3x3 m(1);
+
+ m = glm::row(m, 0, glm::vec3( 0, 1, 2));
+ m = glm::row(m, 1, glm::vec3( 4, 5, 6));
+ m = glm::row(m, 2, glm::vec3( 8, 9, 10));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec3( 0, 1, 2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec3( 4, 5, 6), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 2), glm::vec3( 8, 9, 10), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat3x3_col_set()
+{
+ int Error = 0;
+
+ glm::mat3x3 m(1);
+
+ m = glm::column(m, 0, glm::vec3( 0, 1, 2));
+ m = glm::column(m, 1, glm::vec3( 4, 5, 6));
+ m = glm::column(m, 2, glm::vec3( 8, 9, 10));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec3( 0, 1, 2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec3( 4, 5, 6), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 2), glm::vec3( 8, 9, 10), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat3x4_row_set()
+{
+ int Error = 0;
+
+ glm::mat3x4 m(1);
+
+ m = glm::row(m, 0, glm::vec3( 0, 1, 2));
+ m = glm::row(m, 1, glm::vec3( 4, 5, 6));
+ m = glm::row(m, 2, glm::vec3( 8, 9, 10));
+ m = glm::row(m, 3, glm::vec3(12, 13, 14));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec3( 0, 1, 2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec3( 4, 5, 6), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 2), glm::vec3( 8, 9, 10), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 3), glm::vec3(12, 13, 14), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat3x4_col_set()
+{
+ int Error = 0;
+
+ glm::mat3x4 m(1);
+
+ m = glm::column(m, 0, glm::vec4( 0, 1, 2, 3));
+ m = glm::column(m, 1, glm::vec4( 4, 5, 6, 7));
+ m = glm::column(m, 2, glm::vec4( 8, 9, 10, 11));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec4( 0, 1, 2, 3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec4( 4, 5, 6, 7), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 2), glm::vec4( 8, 9, 10, 11), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat4x2_row_set()
+{
+ int Error = 0;
+
+ glm::mat4x2 m(1);
+
+ m = glm::row(m, 0, glm::vec4( 0, 1, 2, 3));
+ m = glm::row(m, 1, glm::vec4( 4, 5, 6, 7));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec4( 0, 1, 2, 3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec4( 4, 5, 6, 7), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat4x2_col_set()
+{
+ int Error = 0;
+
+ glm::mat4x2 m(1);
+
+ m = glm::column(m, 0, glm::vec2( 0, 1));
+ m = glm::column(m, 1, glm::vec2( 4, 5));
+ m = glm::column(m, 2, glm::vec2( 8, 9));
+ m = glm::column(m, 3, glm::vec2(12, 13));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec2( 0, 1), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec2( 4, 5), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 2), glm::vec2( 8, 9), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 3), glm::vec2(12, 13), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat4x3_row_set()
+{
+ int Error = 0;
+
+ glm::mat4x3 m(1);
+
+ m = glm::row(m, 0, glm::vec4( 0, 1, 2, 3));
+ m = glm::row(m, 1, glm::vec4( 4, 5, 6, 7));
+ m = glm::row(m, 2, glm::vec4( 8, 9, 10, 11));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec4( 0, 1, 2, 3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec4( 4, 5, 6, 7), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 2), glm::vec4( 8, 9, 10, 11), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat4x3_col_set()
+{
+ int Error = 0;
+
+ glm::mat4x3 m(1);
+
+ m = glm::column(m, 0, glm::vec3( 0, 1, 2));
+ m = glm::column(m, 1, glm::vec3( 4, 5, 6));
+ m = glm::column(m, 2, glm::vec3( 8, 9, 10));
+ m = glm::column(m, 3, glm::vec3(12, 13, 14));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec3( 0, 1, 2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec3( 4, 5, 6), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 2), glm::vec3( 8, 9, 10), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 3), glm::vec3(12, 13, 14), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat4x4_row_set()
+{
+ int Error = 0;
+
+ glm::mat4 m(1);
+
+ m = glm::row(m, 0, glm::vec4( 0, 1, 2, 3));
+ m = glm::row(m, 1, glm::vec4( 4, 5, 6, 7));
+ m = glm::row(m, 2, glm::vec4( 8, 9, 10, 11));
+ m = glm::row(m, 3, glm::vec4(12, 13, 14, 15));
+
+ Error += glm::all(glm::equal(glm::row(m, 0), glm::vec4( 0, 1, 2, 3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 1), glm::vec4( 4, 5, 6, 7), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 2), glm::vec4( 8, 9, 10, 11), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::row(m, 3), glm::vec4(12, 13, 14, 15), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat4x4_col_set()
+{
+ int Error = 0;
+
+ glm::mat4 m(1);
+
+ m = glm::column(m, 0, glm::vec4( 0, 1, 2, 3));
+ m = glm::column(m, 1, glm::vec4( 4, 5, 6, 7));
+ m = glm::column(m, 2, glm::vec4( 8, 9, 10, 11));
+ m = glm::column(m, 3, glm::vec4(12, 13, 14, 15));
+
+ Error += glm::all(glm::equal(glm::column(m, 0), glm::vec4( 0, 1, 2, 3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 1), glm::vec4( 4, 5, 6, 7), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 2), glm::vec4( 8, 9, 10, 11), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(glm::column(m, 3), glm::vec4(12, 13, 14, 15), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat4x4_row_get()
+{
+ int Error = 0;
+
+ glm::mat4 m(1);
+
+ glm::vec4 A = glm::row(m, 0);
+ Error += glm::all(glm::equal(A, glm::vec4(1, 0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ glm::vec4 B = glm::row(m, 1);
+ Error += glm::all(glm::equal(B, glm::vec4(0, 1, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ glm::vec4 C = glm::row(m, 2);
+ Error += glm::all(glm::equal(C, glm::vec4(0, 0, 1, 0), glm::epsilon<float>())) ? 0 : 1;
+ glm::vec4 D = glm::row(m, 3);
+ Error += glm::all(glm::equal(D, glm::vec4(0, 0, 0, 1), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int test_mat4x4_col_get()
+{
+ int Error = 0;
+
+ glm::mat4 m(1);
+
+ glm::vec4 A = glm::column(m, 0);
+ Error += glm::all(glm::equal(A, glm::vec4(1, 0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ glm::vec4 B = glm::column(m, 1);
+ Error += glm::all(glm::equal(B, glm::vec4(0, 1, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ glm::vec4 C = glm::column(m, 2);
+ Error += glm::all(glm::equal(C, glm::vec4(0, 0, 1, 0), glm::epsilon<float>())) ? 0 : 1;
+ glm::vec4 D = glm::column(m, 3);
+ Error += glm::all(glm::equal(D, glm::vec4(0, 0, 0, 1), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_mat2x2_row_set();
+ Error += test_mat2x2_col_set();
+ Error += test_mat2x3_row_set();
+ Error += test_mat2x3_col_set();
+ Error += test_mat2x4_row_set();
+ Error += test_mat2x4_col_set();
+ Error += test_mat3x2_row_set();
+ Error += test_mat3x2_col_set();
+ Error += test_mat3x3_row_set();
+ Error += test_mat3x3_col_set();
+ Error += test_mat3x4_row_set();
+ Error += test_mat3x4_col_set();
+ Error += test_mat4x2_row_set();
+ Error += test_mat4x2_col_set();
+ Error += test_mat4x3_row_set();
+ Error += test_mat4x3_col_set();
+ Error += test_mat4x4_row_set();
+ Error += test_mat4x4_col_set();
+
+ Error += test_mat4x4_row_get();
+ Error += test_mat4x4_col_get();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_matrix_integer.cpp b/3rdparty/glm/source/test/gtc/gtc_matrix_integer.cpp
new file mode 100644
index 0000000..108016a
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_matrix_integer.cpp
@@ -0,0 +1,8 @@
+#include <glm/gtc/matrix_integer.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_matrix_inverse.cpp b/3rdparty/glm/source/test/gtc/gtc_matrix_inverse.cpp
new file mode 100644
index 0000000..eaec6e1
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_matrix_inverse.cpp
@@ -0,0 +1,51 @@
+#include <glm/gtc/matrix_inverse.hpp>
+#include <glm/gtc/epsilon.hpp>
+
+int test_affine()
+{
+ int Error = 0;
+
+ {
+ glm::mat3 const M(
+ 2.f, 0.f, 0.f,
+ 0.f, 2.f, 0.f,
+ 0.f, 0.f, 1.f);
+ glm::mat3 const A = glm::affineInverse(M);
+ glm::mat3 const I = glm::inverse(M);
+ glm::mat3 const R = glm::affineInverse(A);
+
+ for(glm::length_t i = 0; i < A.length(); ++i)
+ {
+ Error += glm::all(glm::epsilonEqual(M[i], R[i], 0.01f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(A[i], I[i], 0.01f)) ? 0 : 1;
+ }
+ }
+
+ {
+ glm::mat4 const M(
+ 2.f, 0.f, 0.f, 0.f,
+ 0.f, 2.f, 0.f, 0.f,
+ 0.f, 0.f, 2.f, 0.f,
+ 0.f, 0.f, 0.f, 1.f);
+ glm::mat4 const A = glm::affineInverse(M);
+ glm::mat4 const I = glm::inverse(M);
+ glm::mat4 const R = glm::affineInverse(A);
+
+ for(glm::length_t i = 0; i < A.length(); ++i)
+ {
+ Error += glm::all(glm::epsilonEqual(M[i], R[i], 0.01f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(A[i], I[i], 0.01f)) ? 0 : 1;
+ }
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_affine();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_matrix_transform.cpp b/3rdparty/glm/source/test/gtc/gtc_matrix_transform.cpp
new file mode 100644
index 0000000..b50666e
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_matrix_transform.cpp
@@ -0,0 +1,55 @@
+#include <glm/gtc/matrix_transform.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/matrix_relational.hpp>
+
+int test_perspective()
+{
+ int Error = 0;
+
+ glm::mat4 Projection = glm::perspective(glm::pi<float>() * 0.25f, 4.0f / 3.0f, 0.1f, 100.0f);
+
+ return Error;
+}
+
+int test_pick()
+{
+ int Error = 0;
+
+ glm::mat4 Pick = glm::pickMatrix(glm::vec2(1, 2), glm::vec2(3, 4), glm::ivec4(0, 0, 320, 240));
+
+ return Error;
+}
+
+int test_tweakedInfinitePerspective()
+{
+ int Error = 0;
+
+ glm::mat4 ProjectionA = glm::tweakedInfinitePerspective(45.f, 640.f/480.f, 1.0f);
+ glm::mat4 ProjectionB = glm::tweakedInfinitePerspective(45.f, 640.f/480.f, 1.0f, 0.001f);
+
+
+ return Error;
+}
+
+int test_translate()
+{
+ int Error = 0;
+
+ glm::lowp_vec3 v(1.0);
+ glm::lowp_mat4 m(0);
+ glm::lowp_mat4 t = glm::translate(m, v);
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_translate();
+ Error += test_tweakedInfinitePerspective();
+ Error += test_pick();
+ Error += test_perspective();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_noise.cpp b/3rdparty/glm/source/test/gtc/gtc_noise.cpp
new file mode 100644
index 0000000..6ecec22
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_noise.cpp
@@ -0,0 +1,86 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/noise.hpp>
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtx/raw_data.hpp>
+
+static int test_simplex_float()
+{
+ int Error = 0;
+
+ glm::u8vec4 const PixelSimplex2D(glm::byte(glm::abs(glm::simplex(glm::vec2(0.f, 0.f))) * 255.f));
+ glm::u8vec4 const PixelSimplex3D(glm::byte(glm::abs(glm::simplex(glm::vec3(0.f, 0.f, 0.f))) * 255.f));
+ glm::u8vec4 const PixelSimplex4D(glm::byte(glm::abs(glm::simplex(glm::vec4(0.f, 0.f, 0.f, 0.f))) * 255.f));
+
+ return Error;
+}
+
+static int test_simplex_double()
+{
+ int Error = 0;
+
+ glm::u8vec4 const PixelSimplex2D(glm::byte(glm::abs(glm::simplex(glm::dvec2(0.f, 0.f))) * 255.));
+ glm::u8vec4 const PixelSimplex3D(glm::byte(glm::abs(glm::simplex(glm::dvec3(0.f, 0.f, 0.f))) * 255.));
+ glm::u8vec4 const PixelSimplex4D(glm::byte(glm::abs(glm::simplex(glm::dvec4(0.f, 0.f, 0.f, 0.f))) * 255.));
+
+ return Error;
+}
+
+static int test_perlin_float()
+{
+ int Error = 0;
+
+ glm::u8vec4 const PixelPerlin2D(glm::byte(glm::abs(glm::perlin(glm::vec2(0.f, 0.f))) * 255.f));
+ glm::u8vec4 const PixelPerlin3D(glm::byte(glm::abs(glm::perlin(glm::vec3(0.f, 0.f, 0.f))) * 255.f));
+ glm::u8vec4 const PixelPerlin4D(glm::byte(glm::abs(glm::perlin(glm::vec4(0.f, 0.f, 0.f, 0.f))) * 255.f));
+
+ return Error;
+}
+
+static int test_perlin_double()
+{
+ int Error = 0;
+
+ glm::u8vec4 const PixelPerlin2D(glm::byte(glm::abs(glm::perlin(glm::dvec2(0.f, 0.f))) * 255.));
+ glm::u8vec4 const PixelPerlin3D(glm::byte(glm::abs(glm::perlin(glm::dvec3(0.f, 0.f, 0.f))) * 255.));
+ glm::u8vec4 const PixelPerlin4D(glm::byte(glm::abs(glm::perlin(glm::dvec4(0.f, 0.f, 0.f, 0.f))) * 255.));
+
+ return Error;
+}
+
+static int test_perlin_pedioric_float()
+{
+ int Error = 0;
+
+ glm::u8vec4 const PixelPeriodic2D(glm::byte(glm::abs(glm::perlin(glm::vec2(0.f, 0.f), glm::vec2(2.0f))) * 255.f));
+ glm::u8vec4 const PixelPeriodic3D(glm::byte(glm::abs(glm::perlin(glm::vec3(0.f, 0.f, 0.f), glm::vec3(2.0f))) * 255.f));
+ glm::u8vec4 const PixelPeriodic4D(glm::byte(glm::abs(glm::perlin(glm::vec4(0.f, 0.f, 0.f, 0.f), glm::vec4(2.0f))) * 255.f));
+
+ return Error;
+}
+
+static int test_perlin_pedioric_double()
+{
+ int Error = 0;
+
+ glm::u8vec4 const PixelPeriodic2D(glm::byte(glm::abs(glm::perlin(glm::dvec2(0.f, 0.f), glm::dvec2(2.0))) * 255.));
+ glm::u8vec4 const PixelPeriodic3D(glm::byte(glm::abs(glm::perlin(glm::dvec3(0.f, 0.f, 0.f), glm::dvec3(2.0))) * 255.));
+ glm::u8vec4 const PixelPeriodic4D(glm::byte(glm::abs(glm::perlin(glm::dvec4(0.f, 0.f, 0.f, 0.f), glm::dvec4(2.0))) * 255.));
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_simplex_float();
+ Error += test_simplex_double();
+
+ Error += test_perlin_float();
+ Error += test_perlin_double();
+
+ Error += test_perlin_pedioric_float();
+ Error += test_perlin_pedioric_double();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_packing.cpp b/3rdparty/glm/source/test/gtc/gtc_packing.cpp
new file mode 100644
index 0000000..df5b3bb
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_packing.cpp
@@ -0,0 +1,878 @@
+#include <glm/packing.hpp>
+#include <glm/gtc/packing.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <cstdio>
+#include <vector>
+
+void print_bits(float const& s)
+{
+ union
+ {
+ float f;
+ unsigned int i;
+ } uif;
+
+ uif.f = s;
+
+ std::printf("f32: ");
+ for(std::size_t j = sizeof(s) * 8; j > 0; --j)
+ {
+ if(j == 23 || j == 31)
+ std::printf(" ");
+ std::printf("%d", (uif.i & (1 << (j - 1))) ? 1 : 0);
+ }
+}
+
+void print_10bits(glm::uint const& s)
+{
+ std::printf("10b: ");
+ for(std::size_t j = 10; j > 0; --j)
+ {
+ if(j == 5)
+ std::printf(" ");
+ std::printf("%d", (s & (1 << (j - 1))) ? 1 : 0);
+ }
+}
+
+void print_11bits(glm::uint const& s)
+{
+ std::printf("11b: ");
+ for(std::size_t j = 11; j > 0; --j)
+ {
+ if(j == 6)
+ std::printf(" ");
+ std::printf("%d", (s & (1 << (j - 1))) ? 1 : 0);
+ }
+}
+
+void print_value(float const& s)
+{
+ std::printf("%2.5f, ", static_cast<double>(s));
+ print_bits(s);
+ std::printf(", ");
+// print_11bits(detail::floatTo11bit(s));
+// std::printf(", ");
+// print_10bits(detail::floatTo10bit(s));
+ std::printf("\n");
+}
+
+int test_Half1x16()
+{
+ int Error = 0;
+
+ std::vector<float> Tests;
+ Tests.push_back(0.0f);
+ Tests.push_back(1.0f);
+ Tests.push_back(-1.0f);
+ Tests.push_back(2.0f);
+ Tests.push_back(-2.0f);
+ Tests.push_back(1.9f);
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ {
+ glm::uint16 p0 = glm::packHalf1x16(Tests[i]);
+ float v0 = glm::unpackHalf1x16(p0);
+ glm::uint16 p1 = glm::packHalf1x16(v0);
+ float v1 = glm::unpackHalf1x16(p1);
+ Error += glm::epsilonEqual(v0, v1, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_Half4x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> Tests;
+ Tests.push_back(glm::vec4(1.0f));
+ Tests.push_back(glm::vec4(0.0f));
+ Tests.push_back(glm::vec4(2.0f));
+ Tests.push_back(glm::vec4(0.1f));
+ Tests.push_back(glm::vec4(0.5f));
+ Tests.push_back(glm::vec4(-0.9f));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ {
+ glm::uint64 p0 = glm::packHalf4x16(Tests[i]);
+ glm::vec4 v0 = glm::unpackHalf4x16(p0);
+ glm::uint64 p1 = glm::packHalf4x16(v0);
+ glm::vec4 v1 = glm::unpackHalf4x16(p1);
+ glm::u16vec4 p2 = glm::packHalf(v0);
+ glm::vec4 v2 = glm::unpackHalf(p2);
+
+ Error += glm::all(glm::equal(v0, v1, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v0, v2, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_I3x10_1x2()
+{
+ int Error = 0;
+
+ std::vector<glm::ivec4> Tests;
+ Tests.push_back(glm::ivec4(0));
+ Tests.push_back(glm::ivec4(1));
+ Tests.push_back(glm::ivec4(-1));
+ Tests.push_back(glm::ivec4(2));
+ Tests.push_back(glm::ivec4(-2));
+ Tests.push_back(glm::ivec4(3));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ {
+ glm::uint32 p0 = glm::packI3x10_1x2(Tests[i]);
+ glm::ivec4 v0 = glm::unpackI3x10_1x2(p0);
+ glm::uint32 p1 = glm::packI3x10_1x2(v0);
+ glm::ivec4 v1 = glm::unpackI3x10_1x2(p1);
+ Error += glm::all(glm::equal(v0, v1)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_U3x10_1x2()
+{
+ int Error = 0;
+
+ std::vector<glm::uvec4> Tests;
+ Tests.push_back(glm::uvec4(0));
+ Tests.push_back(glm::uvec4(1));
+ Tests.push_back(glm::uvec4(2));
+ Tests.push_back(glm::uvec4(3));
+ Tests.push_back(glm::uvec4(4));
+ Tests.push_back(glm::uvec4(5));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ {
+ glm::uint32 p0 = glm::packU3x10_1x2(Tests[i]);
+ glm::uvec4 v0 = glm::unpackU3x10_1x2(p0);
+ glm::uint32 p1 = glm::packU3x10_1x2(v0);
+ glm::uvec4 v1 = glm::unpackU3x10_1x2(p1);
+ Error += glm::all(glm::equal(v0, v1)) ? 0 : 1;
+ }
+
+ glm::u8vec4 const v0(0xff, 0x77, 0x0, 0x33);
+ glm::uint32 const p0 = *reinterpret_cast<glm::uint32 const*>(&v0[0]);
+ glm::uint32 const r0 = 0x330077ff;
+
+ Error += p0 == r0 ? 0 : 1;
+
+ glm::uvec4 const v1(0xff, 0x77, 0x0, 0x33);
+ glm::uint32 const p1 = glm::packU3x10_1x2(v1);
+ glm::uint32 const r1 = 0xc001dcff;
+
+ Error += p1 == r1 ? 0 : 1;
+
+ return Error;
+}
+
+int test_Snorm3x10_1x2()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> Tests;
+ Tests.push_back(glm::vec4(1.0f));
+ Tests.push_back(glm::vec4(0.0f));
+ Tests.push_back(glm::vec4(2.0f));
+ Tests.push_back(glm::vec4(0.1f));
+ Tests.push_back(glm::vec4(0.5f));
+ Tests.push_back(glm::vec4(0.9f));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ {
+ glm::uint32 p0 = glm::packSnorm3x10_1x2(Tests[i]);
+ glm::vec4 v0 = glm::unpackSnorm3x10_1x2(p0);
+ glm::uint32 p1 = glm::packSnorm3x10_1x2(v0);
+ glm::vec4 v1 = glm::unpackSnorm3x10_1x2(p1);
+
+ Error += glm::all(glm::epsilonEqual(v0, v1, 0.01f)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_Unorm3x10_1x2()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> Tests;
+ Tests.push_back(glm::vec4(1.0f));
+ Tests.push_back(glm::vec4(0.0f));
+ Tests.push_back(glm::vec4(2.0f));
+ Tests.push_back(glm::vec4(0.1f));
+ Tests.push_back(glm::vec4(0.5f));
+ Tests.push_back(glm::vec4(0.9f));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ {
+ glm::uint32 p0 = glm::packUnorm3x10_1x2(Tests[i]);
+ glm::vec4 v0 = glm::unpackUnorm3x10_1x2(p0);
+ glm::uint32 p1 = glm::packUnorm3x10_1x2(v0);
+ glm::vec4 v1 = glm::unpackUnorm3x10_1x2(p1);
+
+ Error += glm::all(glm::epsilonEqual(v0, v1, 0.001f)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_F2x11_1x10()
+{
+ int Error = 0;
+
+ std::vector<glm::vec3> Tests;
+ Tests.push_back(glm::vec3(1.0f));
+ Tests.push_back(glm::vec3(0.0f));
+ Tests.push_back(glm::vec3(2.0f));
+ Tests.push_back(glm::vec3(0.1f));
+ Tests.push_back(glm::vec3(0.5f));
+ Tests.push_back(glm::vec3(0.9f));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ {
+ glm::uint32 p0 = glm::packF2x11_1x10(Tests[i]);
+ glm::vec3 v0 = glm::unpackF2x11_1x10(p0);
+ glm::uint32 p1 = glm::packF2x11_1x10(v0);
+ glm::vec3 v1 = glm::unpackF2x11_1x10(p1);
+ Error += glm::all(glm::equal(v0, v1, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_F3x9_E1x5()
+{
+ int Error = 0;
+
+ std::vector<glm::vec3> Tests;
+ Tests.push_back(glm::vec3(1.0f));
+ Tests.push_back(glm::vec3(0.0f));
+ Tests.push_back(glm::vec3(2.0f));
+ Tests.push_back(glm::vec3(0.1f));
+ Tests.push_back(glm::vec3(0.5f));
+ Tests.push_back(glm::vec3(0.9f));
+
+ for(std::size_t i = 0; i < Tests.size(); ++i)
+ {
+ glm::uint32 p0 = glm::packF3x9_E1x5(Tests[i]);
+ glm::vec3 v0 = glm::unpackF3x9_E1x5(p0);
+ glm::uint32 p1 = glm::packF3x9_E1x5(v0);
+ glm::vec3 v1 = glm::unpackF3x9_E1x5(p1);
+ Error += glm::all(glm::equal(v0, v1, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_RGBM()
+{
+ int Error = 0;
+
+ for(std::size_t i = 0; i < 1024; ++i)
+ {
+ glm::vec3 const Color(static_cast<float>(i));
+ glm::vec4 const RGBM = glm::packRGBM(Color);
+ glm::vec3 const Result= glm::unpackRGBM(RGBM);
+
+ Error += glm::all(glm::equal(Color, Result, 0.01f)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_packUnorm1x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec1> A;
+ A.push_back(glm::vec1(1.0f));
+ A.push_back(glm::vec1(0.5f));
+ A.push_back(glm::vec1(0.1f));
+ A.push_back(glm::vec1(0.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec1 B(A[i]);
+ glm::uint16 C = glm::packUnorm1x16(B.x);
+ glm::vec1 D(glm::unpackUnorm1x16(C));
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 65535.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm1x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec1> A;
+ A.push_back(glm::vec1( 1.0f));
+ A.push_back(glm::vec1( 0.0f));
+ A.push_back(glm::vec1(-0.5f));
+ A.push_back(glm::vec1(-0.1f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec1 B(A[i]);
+ glm::uint16 C = glm::packSnorm1x16(B.x);
+ glm::vec1 D(glm::unpackSnorm1x16(C));
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 32767.0f * 2.0f)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_packUnorm2x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2(1.0f, 0.0f));
+ A.push_back(glm::vec2(0.5f, 0.7f));
+ A.push_back(glm::vec2(0.1f, 0.2f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::uint32 C = glm::packUnorm2x16(B);
+ glm::vec2 D = glm::unpackUnorm2x16(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 65535.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm2x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2( 1.0f, 0.0f));
+ A.push_back(glm::vec2(-0.5f,-0.7f));
+ A.push_back(glm::vec2(-0.1f, 0.1f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::uint32 C = glm::packSnorm2x16(B);
+ glm::vec2 D = glm::unpackSnorm2x16(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 32767.0f * 2.0f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm4x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> A;
+ A.push_back(glm::vec4(1.0f));
+ A.push_back(glm::vec4(0.5f));
+ A.push_back(glm::vec4(0.1f));
+ A.push_back(glm::vec4(0.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec4 B(A[i]);
+ glm::uint64 C = glm::packUnorm4x16(B);
+ glm::vec4 D(glm::unpackUnorm4x16(C));
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 65535.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm4x16()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> A;
+ A.push_back(glm::vec4( 1.0f, 0.0f, -0.5f, 0.5f));
+ A.push_back(glm::vec4(-0.3f,-0.7f, 0.3f, 0.7f));
+ A.push_back(glm::vec4(-0.1f, 0.1f, -0.2f, 0.2f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec4 B(A[i]);
+ glm::uint64 C = glm::packSnorm4x16(B);
+ glm::vec4 D(glm::unpackSnorm4x16(C));
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 32767.0f * 2.0f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm1x8()
+{
+ int Error = 0;
+
+ std::vector<glm::vec1> A;
+ A.push_back(glm::vec1(1.0f));
+ A.push_back(glm::vec1(0.5f));
+ A.push_back(glm::vec1(0.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec1 B(A[i]);
+ glm::uint8 C = glm::packUnorm1x8(B.x);
+ glm::vec1 D(glm::unpackUnorm1x8(C));
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 255.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm1x8()
+{
+ int Error = 0;
+
+ std::vector<glm::vec1> A;
+ A.push_back(glm::vec1( 1.0f));
+ A.push_back(glm::vec1(-0.7f));
+ A.push_back(glm::vec1(-1.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec1 B(A[i]);
+ glm::uint8 C = glm::packSnorm1x8(B.x);
+ glm::vec1 D(glm::unpackSnorm1x8(C));
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 127.f)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_packUnorm2x8()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2(1.0f, 0.7f));
+ A.push_back(glm::vec2(0.5f, 0.1f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::uint16 C = glm::packUnorm2x8(B);
+ glm::vec2 D = glm::unpackUnorm2x8(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 255.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm2x8()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2( 1.0f, 0.0f));
+ A.push_back(glm::vec2(-0.7f,-0.1f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::uint16 C = glm::packSnorm2x8(B);
+ glm::vec2 D = glm::unpackSnorm2x8(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 127.f)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_packUnorm4x8()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> A;
+ A.push_back(glm::vec4(1.0f, 0.7f, 0.3f, 0.0f));
+ A.push_back(glm::vec4(0.5f, 0.1f, 0.2f, 0.3f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec4 B(A[i]);
+ glm::uint32 C = glm::packUnorm4x8(B);
+ glm::vec4 D = glm::unpackUnorm4x8(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 255.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm4x8()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> A;
+ A.push_back(glm::vec4( 1.0f, 0.0f,-0.5f,-1.0f));
+ A.push_back(glm::vec4(-0.7f,-0.1f, 0.1f, 0.7f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec4 B(A[i]);
+ glm::uint32 C = glm::packSnorm4x8(B);
+ glm::vec4 D = glm::unpackSnorm4x8(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 127.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2(1.0f, 0.7f));
+ A.push_back(glm::vec2(0.5f, 0.1f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::u16vec2 C = glm::packUnorm<glm::uint16>(B);
+ glm::vec2 D = glm::unpackUnorm<float>(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 255.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packSnorm()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2( 1.0f, 0.0f));
+ A.push_back(glm::vec2(-0.5f,-0.7f));
+ A.push_back(glm::vec2(-0.1f, 0.1f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::i16vec2 C = glm::packSnorm<glm::int16>(B);
+ glm::vec2 D = glm::unpackSnorm<float>(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 32767.0f * 2.0f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm2x4()
+{
+ int Error = 0;
+
+ std::vector<glm::vec2> A;
+ A.push_back(glm::vec2(1.0f, 0.7f));
+ A.push_back(glm::vec2(0.5f, 0.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec2 B(A[i]);
+ glm::uint8 C = glm::packUnorm2x4(B);
+ glm::vec2 D = glm::unpackUnorm2x4(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 15.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm4x4()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> A;
+ A.push_back(glm::vec4(1.0f, 0.7f, 0.5f, 0.0f));
+ A.push_back(glm::vec4(0.5f, 0.1f, 0.0f, 1.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec4 B(A[i]);
+ glm::uint16 C = glm::packUnorm4x4(B);
+ glm::vec4 D = glm::unpackUnorm4x4(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 15.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm3x5_1x1()
+{
+ int Error = 0;
+
+ std::vector<glm::vec4> A;
+ A.push_back(glm::vec4(1.0f, 0.7f, 0.5f, 0.0f));
+ A.push_back(glm::vec4(0.5f, 0.1f, 0.0f, 1.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec4 B(A[i]);
+ glm::uint16 C = glm::packUnorm3x5_1x1(B);
+ glm::vec4 D = glm::unpackUnorm3x5_1x1(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 15.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm1x5_1x6_1x5()
+{
+ int Error = 0;
+
+ std::vector<glm::vec3> A;
+ A.push_back(glm::vec3(1.0f, 0.7f, 0.5f));
+ A.push_back(glm::vec3(0.5f, 0.1f, 0.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec3 B(A[i]);
+ glm::uint16 C = glm::packUnorm1x5_1x6_1x5(B);
+ glm::vec3 D = glm::unpackUnorm1x5_1x6_1x5(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 15.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUnorm2x3_1x2()
+{
+ int Error = 0;
+
+ std::vector<glm::vec3> A;
+ A.push_back(glm::vec3(1.0f, 0.7f, 0.5f));
+ A.push_back(glm::vec3(0.5f, 0.1f, 0.0f));
+
+ for(std::size_t i = 0; i < A.size(); ++i)
+ {
+ glm::vec3 B(A[i]);
+ glm::uint8 C = glm::packUnorm2x3_1x2(B);
+ glm::vec3 D = glm::unpackUnorm2x3_1x2(C);
+ Error += glm::all(glm::epsilonEqual(B, D, 1.0f / 3.f)) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_packUint2x8()
+{
+ int Error = 0;
+
+ glm::u8vec2 const Source(1, 2);
+
+ glm::uint16 const Packed = glm::packUint2x8(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::u8vec2 const Unpacked = glm::unpackUint2x8(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packUint4x8()
+{
+ int Error = 0;
+
+ glm::u8vec4 const Source(1, 2, 3, 4);
+
+ glm::uint32 const Packed = glm::packUint4x8(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::u8vec4 const Unpacked = glm::unpackUint4x8(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packUint2x16()
+{
+ int Error = 0;
+
+ glm::u16vec2 const Source(1, 2);
+
+ glm::uint32 const Packed = glm::packUint2x16(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::u16vec2 const Unpacked = glm::unpackUint2x16(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packUint4x16()
+{
+ int Error = 0;
+
+ glm::u16vec4 const Source(1, 2, 3, 4);
+
+ glm::uint64 const Packed = glm::packUint4x16(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::u16vec4 const Unpacked = glm::unpackUint4x16(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packUint2x32()
+{
+ int Error = 0;
+
+ glm::u32vec2 const Source(1, 2);
+
+ glm::uint64 const Packed = glm::packUint2x32(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::u32vec2 const Unpacked = glm::unpackUint2x32(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packInt2x8()
+{
+ int Error = 0;
+
+ glm::i8vec2 const Source(1, 2);
+
+ glm::int16 const Packed = glm::packInt2x8(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::i8vec2 const Unpacked = glm::unpackInt2x8(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packInt4x8()
+{
+ int Error = 0;
+
+ glm::i8vec4 const Source(1, 2, 3, 4);
+
+ glm::int32 const Packed = glm::packInt4x8(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::i8vec4 const Unpacked = glm::unpackInt4x8(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packInt2x16()
+{
+ int Error = 0;
+
+ glm::i16vec2 const Source(1, 2);
+
+ glm::int32 const Packed = glm::packInt2x16(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::i16vec2 const Unpacked = glm::unpackInt2x16(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packInt4x16()
+{
+ int Error = 0;
+
+ glm::i16vec4 const Source(1, 2, 3, 4);
+
+ glm::int64 const Packed = glm::packInt4x16(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::i16vec4 const Unpacked = glm::unpackInt4x16(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int test_packInt2x32()
+{
+ int Error = 0;
+
+ glm::i32vec2 const Source(1, 2);
+
+ glm::int64 const Packed = glm::packInt2x32(Source);
+ Error += Packed != 0 ? 0 : 1;
+
+ glm::i32vec2 const Unpacked = glm::unpackInt2x32(Packed);
+ Error += Source == Unpacked ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_packUnorm();
+ Error += test_packSnorm();
+
+ Error += test_packSnorm1x16();
+ Error += test_packSnorm2x16();
+ Error += test_packSnorm4x16();
+
+ Error += test_packSnorm1x8();
+ Error += test_packSnorm2x8();
+ Error += test_packSnorm4x8();
+
+ Error += test_packUnorm1x16();
+ Error += test_packUnorm2x16();
+ Error += test_packUnorm4x16();
+
+ Error += test_packUnorm1x8();
+ Error += test_packUnorm2x8();
+ Error += test_packUnorm4x8();
+
+ Error += test_packUnorm2x4();
+ Error += test_packUnorm4x4();
+ Error += test_packUnorm3x5_1x1();
+ Error += test_packUnorm1x5_1x6_1x5();
+ Error += test_packUnorm2x3_1x2();
+
+ Error += test_packUint2x8();
+ Error += test_packUint4x8();
+ Error += test_packUint2x16();
+ Error += test_packUint4x16();
+ Error += test_packUint2x32();
+
+ Error += test_packInt2x8();
+ Error += test_packInt4x8();
+ Error += test_packInt2x16();
+ Error += test_packInt4x16();
+ Error += test_packInt2x32();
+
+ Error += test_F2x11_1x10();
+ Error += test_F3x9_E1x5();
+ Error += test_RGBM();
+ Error += test_Unorm3x10_1x2();
+ Error += test_Snorm3x10_1x2();
+
+ Error += test_I3x10_1x2();
+ Error += test_U3x10_1x2();
+ Error += test_Half1x16();
+ Error += test_Half4x16();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_quaternion.cpp b/3rdparty/glm/source/test/gtc/gtc_quaternion.cpp
new file mode 100644
index 0000000..540ca42
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_quaternion.cpp
@@ -0,0 +1,345 @@
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/gtc/matrix_transform.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/glm.hpp>
+#include <vector>
+
+int test_quat_angle()
+{
+ int Error = 0;
+
+ {
+ glm::quat Q = glm::angleAxis(glm::pi<float>() * 0.25f, glm::vec3(0, 0, 1));
+ glm::quat N = glm::normalize(Q);
+ float L = glm::length(N);
+ Error += glm::equal(L, 1.0f, 0.01f) ? 0 : 1;
+ float A = glm::angle(N);
+ Error += glm::equal(A, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ }
+ {
+ glm::quat Q = glm::angleAxis(glm::pi<float>() * 0.25f, glm::normalize(glm::vec3(0, 1, 1)));
+ glm::quat N = glm::normalize(Q);
+ float L = glm::length(N);
+ Error += glm::equal(L, 1.0f, 0.01f) ? 0 : 1;
+ float A = glm::angle(N);
+ Error += glm::equal(A, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ }
+ {
+ glm::quat Q = glm::angleAxis(glm::pi<float>() * 0.25f, glm::normalize(glm::vec3(1, 2, 3)));
+ glm::quat N = glm::normalize(Q);
+ float L = glm::length(N);
+ Error += glm::equal(L, 1.0f, 0.01f) ? 0 : 1;
+ float A = glm::angle(N);
+ Error += glm::equal(A, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_quat_angleAxis()
+{
+ int Error = 0;
+
+ glm::quat A = glm::angleAxis(0.f, glm::vec3(0.f, 0.f, 1.f));
+ glm::quat B = glm::angleAxis(glm::pi<float>() * 0.5f, glm::vec3(0, 0, 1));
+ glm::quat C = glm::mix(A, B, 0.5f);
+ glm::quat D = glm::angleAxis(glm::pi<float>() * 0.25f, glm::vec3(0, 0, 1));
+
+ Error += glm::equal(C.x, D.x, 0.01f) ? 0 : 1;
+ Error += glm::equal(C.y, D.y, 0.01f) ? 0 : 1;
+ Error += glm::equal(C.z, D.z, 0.01f) ? 0 : 1;
+ Error += glm::equal(C.w, D.w, 0.01f) ? 0 : 1;
+
+ return Error;
+}
+
+int test_quat_mix()
+{
+ int Error = 0;
+
+ glm::quat A = glm::angleAxis(0.f, glm::vec3(0.f, 0.f, 1.f));
+ glm::quat B = glm::angleAxis(glm::pi<float>() * 0.5f, glm::vec3(0, 0, 1));
+ glm::quat C = glm::mix(A, B, 0.5f);
+ glm::quat D = glm::angleAxis(glm::pi<float>() * 0.25f, glm::vec3(0, 0, 1));
+
+ Error += glm::equal(C.x, D.x, 0.01f) ? 0 : 1;
+ Error += glm::equal(C.y, D.y, 0.01f) ? 0 : 1;
+ Error += glm::equal(C.z, D.z, 0.01f) ? 0 : 1;
+ Error += glm::equal(C.w, D.w, 0.01f) ? 0 : 1;
+
+ return Error;
+}
+
+int test_quat_normalize()
+{
+ int Error(0);
+
+ {
+ glm::quat Q = glm::angleAxis(glm::pi<float>() * 0.25f, glm::vec3(0, 0, 1));
+ glm::quat N = glm::normalize(Q);
+ float L = glm::length(N);
+ Error += glm::equal(L, 1.0f, 0.000001f) ? 0 : 1;
+ }
+ {
+ glm::quat Q = glm::angleAxis(glm::pi<float>() * 0.25f, glm::vec3(0, 0, 2));
+ glm::quat N = glm::normalize(Q);
+ float L = glm::length(N);
+ Error += glm::equal(L, 1.0f, 0.000001f) ? 0 : 1;
+ }
+ {
+ glm::quat Q = glm::angleAxis(glm::pi<float>() * 0.25f, glm::vec3(1, 2, 3));
+ glm::quat N = glm::normalize(Q);
+ float L = glm::length(N);
+ Error += glm::equal(L, 1.0f, 0.000001f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_quat_euler()
+{
+ int Error = 0;
+
+ {
+ glm::quat q(1.0f, 0.0f, 0.0f, 1.0f);
+ float Roll = glm::roll(q);
+ float Pitch = glm::pitch(q);
+ float Yaw = glm::yaw(q);
+ glm::vec3 Angles = glm::eulerAngles(q);
+ Error += glm::all(glm::equal(Angles, glm::vec3(Pitch, Yaw, Roll), 0.000001f)) ? 0 : 1;
+ }
+
+ {
+ glm::dquat q(1.0, 0.0, 0.0, 1.0);
+ double Roll = glm::roll(q);
+ double Pitch = glm::pitch(q);
+ double Yaw = glm::yaw(q);
+ glm::dvec3 Angles = glm::eulerAngles(q);
+ Error += glm::all(glm::equal(Angles, glm::dvec3(Pitch, Yaw, Roll), 0.000001)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_quat_slerp()
+{
+ int Error = 0;
+
+ float const Epsilon = 0.0001f;//glm::epsilon<float>();
+
+ float sqrt2 = std::sqrt(2.0f)/2.0f;
+ glm::quat id(static_cast<float>(1), static_cast<float>(0), static_cast<float>(0), static_cast<float>(0));
+ glm::quat Y90rot(sqrt2, 0.0f, sqrt2, 0.0f);
+ glm::quat Y180rot(0.0f, 0.0f, 1.0f, 0.0f);
+
+ // Testing a == 0
+ // Must be id
+ glm::quat id2 = glm::slerp(id, Y90rot, 0.0f);
+ Error += glm::all(glm::equal(id, id2, Epsilon)) ? 0 : 1;
+
+ // Testing a == 1
+ // Must be 90� rotation on Y : 0 0.7 0 0.7
+ glm::quat Y90rot2 = glm::slerp(id, Y90rot, 1.0f);
+ Error += glm::all(glm::equal(Y90rot, Y90rot2, Epsilon)) ? 0 : 1;
+
+ // Testing standard, easy case
+ // Must be 45� rotation on Y : 0 0.38 0 0.92
+ glm::quat Y45rot1 = glm::slerp(id, Y90rot, 0.5f);
+
+ // Testing reverse case
+ // Must be 45� rotation on Y : 0 0.38 0 0.92
+ glm::quat Ym45rot2 = glm::slerp(Y90rot, id, 0.5f);
+
+ // Testing against full circle around the sphere instead of shortest path
+ // Must be 45� rotation on Y
+ // certainly not a 135� rotation
+ glm::quat Y45rot3 = glm::slerp(id , -Y90rot, 0.5f);
+ float Y45angle3 = glm::angle(Y45rot3);
+ Error += glm::equal(Y45angle3, glm::pi<float>() * 0.25f, Epsilon) ? 0 : 1;
+ Error += glm::all(glm::equal(Ym45rot2, Y45rot3, Epsilon)) ? 0 : 1;
+
+ // Same, but inverted
+ // Must also be 45� rotation on Y : 0 0.38 0 0.92
+ // -0 -0.38 -0 -0.92 is ok too
+ glm::quat Y45rot4 = glm::slerp(-Y90rot, id, 0.5f);
+ Error += glm::all(glm::equal(Ym45rot2, -Y45rot4, Epsilon)) ? 0 : 1;
+
+ // Testing q1 = q2
+ // Must be 90� rotation on Y : 0 0.7 0 0.7
+ glm::quat Y90rot3 = glm::slerp(Y90rot, Y90rot, 0.5f);
+ Error += glm::all(glm::equal(Y90rot, Y90rot3, Epsilon)) ? 0 : 1;
+
+ // Testing 180� rotation
+ // Must be 90� rotation on almost any axis that is on the XZ plane
+ glm::quat XZ90rot = glm::slerp(id, -Y90rot, 0.5f);
+ float XZ90angle = glm::angle(XZ90rot); // Must be PI/4 = 0.78;
+ Error += glm::equal(XZ90angle, glm::pi<float>() * 0.25f, Epsilon) ? 0 : 1;
+
+ // Testing almost equal quaternions (this test should pass through the linear interpolation)
+ // Must be 0 0.00X 0 0.99999
+ glm::quat almostid = glm::slerp(id, glm::angleAxis(0.1f, glm::vec3(0.0f, 1.0f, 0.0f)), 0.5f);
+
+ // Testing quaternions with opposite sign
+ {
+ glm::quat a(-1, 0, 0, 0);
+
+ glm::quat result = glm::slerp(a, id, 0.5f);
+
+ Error += glm::equal(glm::pow(glm::dot(id, result), 2.f), 1.f, 0.01f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_quat_slerp_spins()
+{
+ int Error = 0;
+
+ float const Epsilon = 0.0001f;//glm::epsilon<float>();
+
+ float sqrt2 = std::sqrt(2.0f) / 2.0f;
+ glm::quat id(static_cast<float>(1), static_cast<float>(0), static_cast<float>(0), static_cast<float>(0));
+ glm::quat Y90rot(sqrt2, 0.0f, sqrt2, 0.0f);
+ glm::quat Y180rot(0.0f, 0.0f, 1.0f, 0.0f);
+
+ // Testing a == 0, k == 1
+ // Must be id
+ glm::quat id2 = glm::slerp(id, id, 1.0f, 1);
+ Error += glm::all(glm::equal(id, id2, Epsilon)) ? 0 : 1;
+
+ // Testing a == 1, k == 2
+ // Must be id
+ glm::quat id3 = glm::slerp(id, id, 1.0f, 2);
+ Error += glm::all(glm::equal(id, id3, Epsilon)) ? 0 : 1;
+
+ // Testing a == 1, k == 1
+ // Must be 90� rotation on Y : 0 0.7 0 0.7
+ // Negative quaternion is representing same orientation
+ glm::quat Y90rot2 = glm::slerp(id, Y90rot, 1.0f, 1);
+ Error += glm::all(glm::equal(Y90rot, -Y90rot2, Epsilon)) ? 0 : 1;
+
+ // Testing a == 1, k == 2
+ // Must be id
+ glm::quat Y90rot3 = glm::slerp(id, Y90rot, 8.0f / 9.0f, 2);
+ Error += glm::all(glm::equal(id, Y90rot3, Epsilon)) ? 0 : 1;
+
+ // Testing a == 1, k == 1
+ // Must be 90� rotation on Y : 0 0.7 0 0.7
+ glm::quat Y90rot4 = glm::slerp(id, Y90rot, 0.2f, 1);
+ Error += glm::all(glm::equal(Y90rot, Y90rot4, Epsilon)) ? 0 : 1;
+
+ // Testing reverse case
+ // Must be 45� rotation on Y : 0 0.38 0 0.92
+ // Negative quaternion is representing same orientation
+ glm::quat Ym45rot2 = glm::slerp(Y90rot, id, 0.9f, 1);
+ glm::quat Ym45rot3 = glm::slerp(Y90rot, id, 0.5f);
+ Error += glm::all(glm::equal(-Ym45rot2, Ym45rot3, Epsilon)) ? 0 : 1;
+
+ // Testing against full circle around the sphere instead of shortest path
+ // Must be 45� rotation on Y
+ // certainly not a 135� rotation
+ glm::quat Y45rot3 = glm::slerp(id, -Y90rot, 0.5f, 0);
+ float Y45angle3 = glm::angle(Y45rot3);
+ Error += glm::equal(Y45angle3, glm::pi<float>() * 0.25f, Epsilon) ? 0 : 1;
+ Error += glm::all(glm::equal(Ym45rot3, Y45rot3, Epsilon)) ? 0 : 1;
+
+ // Same, but inverted
+ // Must also be 45� rotation on Y : 0 0.38 0 0.92
+ // -0 -0.38 -0 -0.92 is ok too
+ glm::quat Y45rot4 = glm::slerp(-Y90rot, id, 0.5f, 0);
+ Error += glm::all(glm::equal(Ym45rot2, Y45rot4, Epsilon)) ? 0 : 1;
+
+ // Testing q1 = q2 k == 2
+ // Must be 90� rotation on Y : 0 0.7 0 0.7
+ glm::quat Y90rot5 = glm::slerp(Y90rot, Y90rot, 0.5f, 2);
+ Error += glm::all(glm::equal(Y90rot, Y90rot5, Epsilon)) ? 0 : 1;
+
+ // Testing 180� rotation
+ // Must be 90� rotation on almost any axis that is on the XZ plane
+ glm::quat XZ90rot = glm::slerp(id, -Y90rot, 0.5f, 1);
+ float XZ90angle = glm::angle(XZ90rot); // Must be PI/4 = 0.78;
+ Error += glm::equal(XZ90angle, glm::pi<float>() * 1.25f, Epsilon) ? 0 : 1;
+
+ // Testing rotation over long arc
+ // Distance from id to 90� is 270�, so 2/3 of it should be 180�
+ // Negative quaternion is representing same orientation
+ glm::quat Neg90rot = glm::slerp(id, Y90rot, 2.0f / 3.0f, -1);
+ Error += glm::all(glm::equal(Y180rot, -Neg90rot, Epsilon)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_quat_mul_vec()
+{
+ int Error(0);
+
+ glm::quat q = glm::angleAxis(glm::pi<float>() * 0.5f, glm::vec3(0, 0, 1));
+ glm::vec3 v(1, 0, 0);
+ glm::vec3 u(q * v);
+ glm::vec3 w(u * q);
+
+ Error += glm::all(glm::equal(v, w, 0.01f)) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_mul()
+{
+ int Error = 0;
+
+ glm::quat temp1 = glm::normalize(glm::quat(1.0f, glm::vec3(0.0, 1.0, 0.0)));
+ glm::quat temp2 = glm::normalize(glm::quat(0.5f, glm::vec3(1.0, 0.0, 0.0)));
+
+ glm::vec3 transformed0 = (temp1 * glm::vec3(0.0, 1.0, 0.0) * glm::inverse(temp1));
+ glm::vec3 temp4 = temp2 * transformed0 * glm::inverse(temp2);
+
+ glm::quat temp5 = glm::normalize(temp1 * temp2);
+ glm::vec3 temp6 = temp5 * glm::vec3(0.0, 1.0, 0.0) * glm::inverse(temp5);
+
+ glm::quat temp7(1.0f, glm::vec3(0.0, 1.0, 0.0));
+
+ temp7 *= temp5;
+ temp7 *= glm::inverse(temp5);
+
+ Error += glm::any(glm::notEqual(temp7, glm::quat(1.0f, glm::vec3(0.0, 1.0, 0.0)), glm::epsilon<float>())) ? 1 : 0;
+
+ return Error;
+}
+
+int test_identity()
+{
+ int Error = 0;
+
+ glm::quat const Q = glm::identity<glm::quat>();
+
+ Error += glm::all(glm::equal(Q, glm::quat(1, 0, 0, 0), 0.0001f)) ? 0 : 1;
+ Error += glm::any(glm::notEqual(Q, glm::quat(1, 0, 0, 0), 0.0001f)) ? 1 : 0;
+
+ glm::mat4 const M = glm::identity<glm::mat4x4>();
+ glm::mat4 const N(1.0f);
+
+ Error += glm::all(glm::equal(M, N, 0.0001f)) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_mul();
+ Error += test_quat_mul_vec();
+ Error += test_quat_angle();
+ Error += test_quat_angleAxis();
+ Error += test_quat_mix();
+ Error += test_quat_normalize();
+ Error += test_quat_euler();
+ Error += test_quat_slerp();
+ Error += test_quat_slerp_spins();
+ Error += test_identity();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_random.cpp b/3rdparty/glm/source/test/gtc/gtc_random.cpp
new file mode 100644
index 0000000..60fb60c
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_random.cpp
@@ -0,0 +1,381 @@
+#define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES
+#include <glm/gtc/random.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/type_precision.hpp>
+#if GLM_LANG & GLM_LANG_CXX0X_FLAG
+# include <array>
+#endif
+
+std::size_t const TestSamples = 10000;
+
+int test_linearRand()
+{
+ int Error = 0;
+
+ glm::int32 const Min = 16;
+ glm::int32 const Max = 32;
+
+ {
+ glm::u8vec2 AMin(std::numeric_limits<glm::u8>::max());
+ glm::u8vec2 AMax(std::numeric_limits<glm::u8>::min());
+ {
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::u8vec2 A = glm::linearRand(glm::u8vec2(Min), glm::u8vec2(Max));
+ AMin = glm::min(AMin, A);
+ AMax = glm::max(AMax, A);
+
+ if(!glm::all(glm::lessThanEqual(A, glm::u8vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(A, glm::u8vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ Error += glm::all(glm::equal(AMin, glm::u8vec2(Min))) ? 0 : 1;
+ Error += glm::all(glm::equal(AMax, glm::u8vec2(Max))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ glm::u16vec2 BMin(std::numeric_limits<glm::u16>::max());
+ glm::u16vec2 BMax(std::numeric_limits<glm::u16>::min());
+ {
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::u16vec2 B = glm::linearRand(glm::u16vec2(Min), glm::u16vec2(Max));
+ BMin = glm::min(BMin, B);
+ BMax = glm::max(BMax, B);
+
+ if(!glm::all(glm::lessThanEqual(B, glm::u16vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(B, glm::u16vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ Error += glm::all(glm::equal(BMin, glm::u16vec2(Min))) ? 0 : 1;
+ Error += glm::all(glm::equal(BMax, glm::u16vec2(Max))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ glm::u32vec2 CMin(std::numeric_limits<glm::u32>::max());
+ glm::u32vec2 CMax(std::numeric_limits<glm::u32>::min());
+ {
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::u32vec2 C = glm::linearRand(glm::u32vec2(Min), glm::u32vec2(Max));
+ CMin = glm::min(CMin, C);
+ CMax = glm::max(CMax, C);
+
+ if(!glm::all(glm::lessThanEqual(C, glm::u32vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(C, glm::u32vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ Error += glm::all(glm::equal(CMin, glm::u32vec2(Min))) ? 0 : 1;
+ Error += glm::all(glm::equal(CMax, glm::u32vec2(Max))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ glm::u64vec2 DMin(std::numeric_limits<glm::u64>::max());
+ glm::u64vec2 DMax(std::numeric_limits<glm::u64>::min());
+ {
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::u64vec2 D = glm::linearRand(glm::u64vec2(Min), glm::u64vec2(Max));
+ DMin = glm::min(DMin, D);
+ DMax = glm::max(DMax, D);
+
+ if(!glm::all(glm::lessThanEqual(D, glm::u64vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(D, glm::u64vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ Error += glm::all(glm::equal(DMin, glm::u64vec2(Min))) ? 0 : 1;
+ Error += glm::all(glm::equal(DMax, glm::u64vec2(Max))) ? 0 : 1;
+ assert(!Error);
+ }
+ }
+
+ {
+ glm::i8vec2 AMin(std::numeric_limits<glm::i8>::max());
+ glm::i8vec2 AMax(std::numeric_limits<glm::i8>::min());
+ {
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::i8vec2 A = glm::linearRand(glm::i8vec2(Min), glm::i8vec2(Max));
+ AMin = glm::min(AMin, A);
+ AMax = glm::max(AMax, A);
+
+ if(!glm::all(glm::lessThanEqual(A, glm::i8vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(A, glm::i8vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ Error += glm::all(glm::equal(AMin, glm::i8vec2(Min))) ? 0 : 1;
+ Error += glm::all(glm::equal(AMax, glm::i8vec2(Max))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ glm::i16vec2 BMin(std::numeric_limits<glm::i16>::max());
+ glm::i16vec2 BMax(std::numeric_limits<glm::i16>::min());
+ {
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::i16vec2 B = glm::linearRand(glm::i16vec2(Min), glm::i16vec2(Max));
+ BMin = glm::min(BMin, B);
+ BMax = glm::max(BMax, B);
+
+ if(!glm::all(glm::lessThanEqual(B, glm::i16vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(B, glm::i16vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ Error += glm::all(glm::equal(BMin, glm::i16vec2(Min))) ? 0 : 1;
+ Error += glm::all(glm::equal(BMax, glm::i16vec2(Max))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ glm::i32vec2 CMin(std::numeric_limits<glm::i32>::max());
+ glm::i32vec2 CMax(std::numeric_limits<glm::i32>::min());
+ {
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::i32vec2 C = glm::linearRand(glm::i32vec2(Min), glm::i32vec2(Max));
+ CMin = glm::min(CMin, C);
+ CMax = glm::max(CMax, C);
+
+ if(!glm::all(glm::lessThanEqual(C, glm::i32vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(C, glm::i32vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ Error += glm::all(glm::equal(CMin, glm::i32vec2(Min))) ? 0 : 1;
+ Error += glm::all(glm::equal(CMax, glm::i32vec2(Max))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ glm::i64vec2 DMin(std::numeric_limits<glm::i64>::max());
+ glm::i64vec2 DMax(std::numeric_limits<glm::i64>::min());
+ {
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::i64vec2 D = glm::linearRand(glm::i64vec2(Min), glm::i64vec2(Max));
+ DMin = glm::min(DMin, D);
+ DMax = glm::max(DMax, D);
+
+ if(!glm::all(glm::lessThanEqual(D, glm::i64vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(D, glm::i64vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ Error += glm::all(glm::equal(DMin, glm::i64vec2(Min))) ? 0 : 1;
+ Error += glm::all(glm::equal(DMax, glm::i64vec2(Max))) ? 0 : 1;
+ assert(!Error);
+ }
+ }
+
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ glm::f32vec2 const A(glm::linearRand(glm::f32vec2(static_cast<float>(Min)), glm::f32vec2(static_cast<float>(Max))));
+ if(!glm::all(glm::lessThanEqual(A, glm::f32vec2(static_cast<float>(Max)))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(A, glm::f32vec2(static_cast<float>(Min)))))
+ ++Error;
+
+ glm::f64vec2 const B(glm::linearRand(glm::f64vec2(Min), glm::f64vec2(Max)));
+ if(!glm::all(glm::lessThanEqual(B, glm::f64vec2(Max))))
+ ++Error;
+ if(!glm::all(glm::greaterThanEqual(B, glm::f64vec2(Min))))
+ ++Error;
+ assert(!Error);
+ }
+
+ {
+ float ResultFloat = 0.0f;
+ double ResultDouble = 0.0;
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ ResultFloat += glm::linearRand(-1.0f, 1.0f);
+ ResultDouble += glm::linearRand(-1.0, 1.0);
+ }
+
+ Error += glm::epsilonEqual(ResultFloat, 0.0f, 0.0001f);
+ Error += glm::epsilonEqual(ResultDouble, 0.0, 0.0001);
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_circularRand()
+{
+ int Error = 0;
+
+ {
+ std::size_t Max = TestSamples;
+ float ResultFloat = 0.0f;
+ double ResultDouble = 0.0;
+ double Radius = 2.0;
+
+ for(std::size_t i = 0; i < Max; ++i)
+ {
+ ResultFloat += glm::length(glm::circularRand(1.0f));
+ ResultDouble += glm::length(glm::circularRand(Radius));
+ }
+
+ Error += glm::epsilonEqual(ResultFloat, float(Max), 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(ResultDouble, double(Max) * double(Radius), 0.01) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_sphericalRand()
+{
+ int Error = 0;
+
+ {
+ std::size_t Max = TestSamples;
+ float ResultFloatA = 0.0f;
+ float ResultFloatB = 0.0f;
+ float ResultFloatC = 0.0f;
+ double ResultDoubleA = 0.0;
+ double ResultDoubleB = 0.0;
+ double ResultDoubleC = 0.0;
+
+ for(std::size_t i = 0; i < Max; ++i)
+ {
+ ResultFloatA += glm::length(glm::sphericalRand(1.0f));
+ ResultDoubleA += glm::length(glm::sphericalRand(1.0));
+ ResultFloatB += glm::length(glm::sphericalRand(2.0f));
+ ResultDoubleB += glm::length(glm::sphericalRand(2.0));
+ ResultFloatC += glm::length(glm::sphericalRand(3.0f));
+ ResultDoubleC += glm::length(glm::sphericalRand(3.0));
+ }
+
+ Error += glm::epsilonEqual(ResultFloatA, float(Max), 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(ResultDoubleA, double(Max), 0.0001) ? 0 : 1;
+ Error += glm::epsilonEqual(ResultFloatB, float(Max * 2), 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(ResultDoubleB, double(Max * 2), 0.0001) ? 0 : 1;
+ Error += glm::epsilonEqual(ResultFloatC, float(Max * 3), 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(ResultDoubleC, double(Max * 3), 0.01) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_diskRand()
+{
+ int Error = 0;
+
+ {
+ float ResultFloat = 0.0f;
+ double ResultDouble = 0.0;
+
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ ResultFloat += glm::length(glm::diskRand(2.0f));
+ ResultDouble += glm::length(glm::diskRand(2.0));
+ }
+
+ Error += ResultFloat < float(TestSamples) * 2.f ? 0 : 1;
+ Error += ResultDouble < double(TestSamples) * 2.0 ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_ballRand()
+{
+ int Error = 0;
+
+ {
+ float ResultFloat = 0.0f;
+ double ResultDouble = 0.0;
+
+ for(std::size_t i = 0; i < TestSamples; ++i)
+ {
+ ResultFloat += glm::length(glm::ballRand(2.0f));
+ ResultDouble += glm::length(glm::ballRand(2.0));
+ }
+
+ Error += ResultFloat < float(TestSamples) * 2.f ? 0 : 1;
+ Error += ResultDouble < double(TestSamples) * 2.0 ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+/*
+#if(GLM_LANG & GLM_LANG_CXX0X_FLAG)
+int test_grid()
+{
+ int Error = 0;
+
+ typedef std::array<int, 8> colors;
+ typedef std::array<int, 8 * 8> grid;
+
+ grid Grid;
+ colors Colors;
+
+ grid GridBest;
+ colors ColorsBest;
+
+ while(true)
+ {
+ for(std::size_t i = 0; i < Grid.size(); ++i)
+ Grid[i] = int(glm::linearRand(0.0, 8.0 * 8.0 * 8.0 - 1.0) / 64.0);
+
+ for(std::size_t i = 0; i < Grid.size(); ++i)
+ ++Colors[Grid[i]];
+
+ bool Exit = true;
+ for(std::size_t i = 0; i < Colors.size(); ++i)
+ {
+ if(Colors[i] == 8)
+ continue;
+
+ Exit = false;
+ break;
+ }
+
+ if(Exit == true)
+ break;
+ }
+
+ return Error;
+}
+#endif
+*/
+int main()
+{
+ int Error = 0;
+
+ Error += test_linearRand();
+ Error += test_circularRand();
+ Error += test_sphericalRand();
+ Error += test_diskRand();
+ Error += test_ballRand();
+/*
+#if(GLM_LANG & GLM_LANG_CXX0X_FLAG)
+ Error += test_grid();
+#endif
+*/
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_reciprocal.cpp b/3rdparty/glm/source/test/gtc/gtc_reciprocal.cpp
new file mode 100644
index 0000000..5158413
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_reciprocal.cpp
@@ -0,0 +1,8 @@
+#include <glm/gtc/reciprocal.hpp>
+#include <ctime>
+
+int main()
+{
+ return 0;
+}
+
diff --git a/3rdparty/glm/source/test/gtc/gtc_round.cpp b/3rdparty/glm/source/test/gtc/gtc_round.cpp
new file mode 100644
index 0000000..60d9a85
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_round.cpp
@@ -0,0 +1,458 @@
+#include <glm/gtc/round.hpp>
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <vector>
+#include <ctime>
+#include <cstdio>
+
+namespace isPowerOfTwo
+{
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ bool Return;
+ };
+
+ int test_int16()
+ {
+ type<glm::int16> const Data[] =
+ {
+ {0x0001, true},
+ {0x0002, true},
+ {0x0004, true},
+ {0x0080, true},
+ {0x0000, true},
+ {0x0003, false}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::int16>); i < n; ++i)
+ {
+ bool Result = glm::isPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_uint16()
+ {
+ type<glm::uint16> const Data[] =
+ {
+ {0x0001, true},
+ {0x0002, true},
+ {0x0004, true},
+ {0x0000, true},
+ {0x0000, true},
+ {0x0003, false}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::uint16>); i < n; ++i)
+ {
+ bool Result = glm::isPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_int32()
+ {
+ type<int> const Data[] =
+ {
+ {0x00000001, true},
+ {0x00000002, true},
+ {0x00000004, true},
+ {0x0000000f, false},
+ {0x00000000, true},
+ {0x00000003, false}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<int>); i < n; ++i)
+ {
+ bool Result = glm::isPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<int>); i < n; ++i)
+ {
+ glm::bvec1 Result = glm::isPowerOfTwo(glm::ivec1(Data[i].Value));
+ Error += glm::all(glm::equal(glm::bvec1(Data[i].Return), Result)) ? 0 : 1;
+ }
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<int>); i < n; ++i)
+ {
+ glm::bvec2 Result = glm::isPowerOfTwo(glm::ivec2(Data[i].Value));
+ Error += glm::all(glm::equal(glm::bvec2(Data[i].Return), Result)) ? 0 : 1;
+ }
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<int>); i < n; ++i)
+ {
+ glm::bvec3 Result = glm::isPowerOfTwo(glm::ivec3(Data[i].Value));
+ Error += glm::all(glm::equal(glm::bvec3(Data[i].Return), Result)) ? 0 : 1;
+ }
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<int>); i < n; ++i)
+ {
+ glm::bvec4 Result = glm::isPowerOfTwo(glm::ivec4(Data[i].Value));
+ Error += glm::all(glm::equal(glm::bvec4(Data[i].Return), Result)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_uint32()
+ {
+ type<glm::uint> const Data[] =
+ {
+ {0x00000001, true},
+ {0x00000002, true},
+ {0x00000004, true},
+ {0x80000000, true},
+ {0x00000000, true},
+ {0x00000003, false}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::uint>); i < n; ++i)
+ {
+ bool Result = glm::isPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ Error += test_int16();
+ Error += test_uint16();
+ Error += test_int32();
+ Error += test_uint32();
+
+ return Error;
+ }
+}//isPowerOfTwo
+
+namespace ceilPowerOfTwo_advanced
+{
+ template<typename genIUType>
+ GLM_FUNC_QUALIFIER genIUType highestBitValue(genIUType Value)
+ {
+ genIUType tmp = Value;
+ genIUType result = genIUType(0);
+ while(tmp)
+ {
+ result = (tmp & (~tmp + 1)); // grab lowest bit
+ tmp &= ~result; // clear lowest bit
+ }
+ return result;
+ }
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType ceilPowerOfTwo_loop(genType value)
+ {
+ return glm::isPowerOfTwo(value) ? value : highestBitValue(value) << 1;
+ }
+
+ template<typename genType>
+ struct type
+ {
+ genType Value;
+ genType Return;
+ };
+
+ int test_int32()
+ {
+ type<glm::int32> const Data[] =
+ {
+ {0x0000ffff, 0x00010000},
+ {-3, -4},
+ {-8, -8},
+ {0x00000001, 0x00000001},
+ {0x00000002, 0x00000002},
+ {0x00000004, 0x00000004},
+ {0x00000007, 0x00000008},
+ {0x0000fff0, 0x00010000},
+ {0x0000f000, 0x00010000},
+ {0x08000000, 0x08000000},
+ {0x00000000, 0x00000000},
+ {0x00000003, 0x00000004}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::int32>); i < n; ++i)
+ {
+ glm::int32 Result = glm::ceilPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_uint32()
+ {
+ type<glm::uint32> const Data[] =
+ {
+ {0x00000001, 0x00000001},
+ {0x00000002, 0x00000002},
+ {0x00000004, 0x00000004},
+ {0x00000007, 0x00000008},
+ {0x0000ffff, 0x00010000},
+ {0x0000fff0, 0x00010000},
+ {0x0000f000, 0x00010000},
+ {0x80000000, 0x80000000},
+ {0x00000000, 0x00000000},
+ {0x00000003, 0x00000004}
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::uint32>); i < n; ++i)
+ {
+ glm::uint32 Result = glm::ceilPowerOfTwo(Data[i].Value);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int perf()
+ {
+ int Error(0);
+
+ std::vector<glm::uint> v;
+ v.resize(100000000);
+
+ std::clock_t Timestramp0 = std::clock();
+
+ for(glm::uint32 i = 0, n = static_cast<glm::uint>(v.size()); i < n; ++i)
+ v[i] = ceilPowerOfTwo_loop(i);
+
+ std::clock_t Timestramp1 = std::clock();
+
+ for(glm::uint32 i = 0, n = static_cast<glm::uint>(v.size()); i < n; ++i)
+ v[i] = glm::ceilPowerOfTwo(i);
+
+ std::clock_t Timestramp2 = std::clock();
+
+ std::printf("ceilPowerOfTwo_loop: %d clocks\n", static_cast<int>(Timestramp1 - Timestramp0));
+ std::printf("glm::ceilPowerOfTwo: %d clocks\n", static_cast<int>(Timestramp2 - Timestramp1));
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ Error += test_int32();
+ Error += test_uint32();
+
+ return Error;
+ }
+}//namespace ceilPowerOfTwo_advanced
+
+namespace roundPowerOfTwo
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::uint32 const A = glm::roundPowerOfTwo(7u);
+ Error += A == 8u ? 0 : 1;
+
+ glm::uint32 const B = glm::roundPowerOfTwo(15u);
+ Error += B == 16u ? 0 : 1;
+
+ glm::uint32 const C = glm::roundPowerOfTwo(31u);
+ Error += C == 32u ? 0 : 1;
+
+ glm::uint32 const D = glm::roundPowerOfTwo(9u);
+ Error += D == 8u ? 0 : 1;
+
+ glm::uint32 const E = glm::roundPowerOfTwo(17u);
+ Error += E == 16u ? 0 : 1;
+
+ glm::uint32 const F = glm::roundPowerOfTwo(33u);
+ Error += F == 32u ? 0 : 1;
+
+ return Error;
+ }
+}//namespace roundPowerOfTwo
+
+namespace floorPowerOfTwo
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::uint32 const A = glm::floorPowerOfTwo(7u);
+ Error += A == 4u ? 0 : 1;
+
+ glm::uint32 const B = glm::floorPowerOfTwo(15u);
+ Error += B == 8u ? 0 : 1;
+
+ glm::uint32 const C = glm::floorPowerOfTwo(31u);
+ Error += C == 16u ? 0 : 1;
+
+ return Error;
+ }
+}//namespace floorPowerOfTwo
+
+namespace ceilPowerOfTwo
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::uint32 const A = glm::ceilPowerOfTwo(7u);
+ Error += A == 8u ? 0 : 1;
+
+ glm::uint32 const B = glm::ceilPowerOfTwo(15u);
+ Error += B == 16u ? 0 : 1;
+
+ glm::uint32 const C = glm::ceilPowerOfTwo(31u);
+ Error += C == 32u ? 0 : 1;
+
+ return Error;
+ }
+}//namespace ceilPowerOfTwo
+
+namespace floorMultiple
+{
+ template<typename genType>
+ struct type
+ {
+ genType Source;
+ genType Multiple;
+ genType Return;
+ genType Epsilon;
+ };
+
+ int test_float()
+ {
+ type<glm::float64> const Data[] =
+ {
+ {3.4, 0.3, 3.3, 0.0001},
+ {-1.4, 0.3, -1.5, 0.0001},
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::float64>); i < n; ++i)
+ {
+ glm::float64 Result = glm::floorMultiple(Data[i].Source, Data[i].Multiple);
+ Error += glm::epsilonEqual(Data[i].Return, Result, Data[i].Epsilon) ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ Error += test_float();
+
+ return Error;
+ }
+}//namespace floorMultiple
+
+namespace ceilMultiple
+{
+ template<typename genType>
+ struct type
+ {
+ genType Source;
+ genType Multiple;
+ genType Return;
+ genType Epsilon;
+ };
+
+ int test_float()
+ {
+ type<glm::float64> const Data[] =
+ {
+ {3.4, 0.3, 3.6, 0.0001},
+ {-1.4, 0.3, -1.2, 0.0001},
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::float64>); i < n; ++i)
+ {
+ glm::float64 Result = glm::ceilMultiple(Data[i].Source, Data[i].Multiple);
+ Error += glm::epsilonEqual(Data[i].Return, Result, Data[i].Epsilon) ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test_int()
+ {
+ type<int> const Data[] =
+ {
+ {3, 4, 4, 0},
+ {7, 4, 8, 0},
+ {5, 4, 8, 0},
+ {1, 4, 4, 0},
+ {1, 3, 3, 0},
+ {4, 3, 6, 0},
+ {4, 1, 4, 0},
+ {1, 1, 1, 0},
+ {7, 1, 7, 0},
+ };
+
+ int Error(0);
+
+ for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<int>); i < n; ++i)
+ {
+ int Result = glm::ceilMultiple(Data[i].Source, Data[i].Multiple);
+ Error += Data[i].Return == Result ? 0 : 1;
+ }
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ Error += test_int();
+ Error += test_float();
+
+ return Error;
+ }
+}//namespace ceilMultiple
+
+int main()
+{
+ int Error(0);
+
+ Error += isPowerOfTwo::test();
+ Error += floorPowerOfTwo::test();
+ Error += roundPowerOfTwo::test();
+ Error += ceilPowerOfTwo::test();
+ Error += ceilPowerOfTwo_advanced::test();
+
+# ifdef NDEBUG
+ Error += ceilPowerOfTwo_advanced::perf();
+# endif//NDEBUG
+
+ Error += floorMultiple::test();
+ Error += ceilMultiple::test();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_type_aligned.cpp b/3rdparty/glm/source/test/gtc/gtc_type_aligned.cpp
new file mode 100644
index 0000000..3c071ef
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_type_aligned.cpp
@@ -0,0 +1,181 @@
+#include <glm/glm.hpp>
+
+#if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+#include <glm/gtc/type_aligned.hpp>
+#include <glm/gtc/type_precision.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+
+GLM_STATIC_ASSERT(glm::detail::is_aligned<glm::aligned_lowp>::value, "aligned_lowp is not aligned");
+GLM_STATIC_ASSERT(glm::detail::is_aligned<glm::aligned_mediump>::value, "aligned_mediump is not aligned");
+GLM_STATIC_ASSERT(glm::detail::is_aligned<glm::aligned_highp>::value, "aligned_highp is not aligned");
+GLM_STATIC_ASSERT(!glm::detail::is_aligned<glm::packed_highp>::value, "packed_highp is aligned");
+GLM_STATIC_ASSERT(!glm::detail::is_aligned<glm::packed_mediump>::value, "packed_mediump is aligned");
+GLM_STATIC_ASSERT(!glm::detail::is_aligned<glm::packed_lowp>::value, "packed_lowp is aligned");
+
+struct my_vec4_packed
+{
+ glm::uint32 a;
+ glm::vec4 b;
+};
+GLM_STATIC_ASSERT(sizeof(my_vec4_packed) == sizeof(glm::uint32) + sizeof(glm::vec4), "glm::vec4 packed is not correct");
+
+struct my_vec4_aligned
+{
+ glm::uint32 a;
+ glm::aligned_vec4 b;
+};
+GLM_STATIC_ASSERT(sizeof(my_vec4_aligned) == sizeof(glm::aligned_vec4) * 2, "glm::vec4 aligned is not correct");
+
+struct my_dvec4_packed
+{
+ glm::uint64 a;
+ glm::dvec4 b;
+};
+GLM_STATIC_ASSERT(sizeof(my_dvec4_packed) == sizeof(glm::uint64) + sizeof(glm::dvec4), "glm::dvec4 packed is not correct");
+
+struct my_dvec4_aligned
+{
+ glm::uint64 a;
+ glm::aligned_dvec4 b;
+};
+//GLM_STATIC_ASSERT(sizeof(my_dvec4_aligned) == sizeof(glm::aligned_dvec4) * 2, "glm::dvec4 aligned is not correct");
+
+struct my_ivec4_packed
+{
+ glm::uint32 a;
+ glm::ivec4 b;
+};
+GLM_STATIC_ASSERT(sizeof(my_ivec4_packed) == sizeof(glm::uint32) + sizeof(glm::ivec4), "glm::ivec4 packed is not correct");
+
+struct my_ivec4_aligned
+{
+ glm::uint32 a;
+ glm::aligned_ivec4 b;
+};
+GLM_STATIC_ASSERT(sizeof(my_ivec4_aligned) == sizeof(glm::aligned_ivec4) * 2, "glm::ivec4 aligned is not correct");
+
+struct my_u8vec4_packed
+{
+ glm::uint32 a;
+ glm::u8vec4 b;
+};
+GLM_STATIC_ASSERT(sizeof(my_u8vec4_packed) == sizeof(glm::uint32) + sizeof(glm::u8vec4), "glm::u8vec4 packed is not correct");
+
+static int test_copy()
+{
+ int Error = 0;
+
+ {
+ glm::aligned_ivec4 const a(1, 2, 3, 4);
+ glm::ivec4 const u(a);
+
+ Error += a.x == u.x ? 0 : 1;
+ Error += a.y == u.y ? 0 : 1;
+ Error += a.z == u.z ? 0 : 1;
+ Error += a.w == u.w ? 0 : 1;
+ }
+
+ {
+ my_ivec4_aligned a;
+ a.b = glm::ivec4(1, 2, 3, 4);
+
+ my_ivec4_packed u;
+ u.b = a.b;
+
+ Error += a.b.x == u.b.x ? 0 : 1;
+ Error += a.b.y == u.b.y ? 0 : 1;
+ Error += a.b.z == u.b.z ? 0 : 1;
+ Error += a.b.w == u.b.w ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_ctor()
+{
+ int Error = 0;
+
+# if GLM_HAS_CONSTEXPR
+ {
+ constexpr glm::aligned_ivec4 v(1);
+
+ Error += v.x == 1 ? 0 : 1;
+ Error += v.y == 1 ? 0 : 1;
+ Error += v.z == 1 ? 0 : 1;
+ Error += v.w == 1 ? 0 : 1;
+ }
+
+ {
+ constexpr glm::packed_ivec4 v(1);
+
+ Error += v.x == 1 ? 0 : 1;
+ Error += v.y == 1 ? 0 : 1;
+ Error += v.z == 1 ? 0 : 1;
+ Error += v.w == 1 ? 0 : 1;
+ }
+
+ {
+ constexpr glm::ivec4 v(1);
+
+ Error += v.x == 1 ? 0 : 1;
+ Error += v.y == 1 ? 0 : 1;
+ Error += v.z == 1 ? 0 : 1;
+ Error += v.w == 1 ? 0 : 1;
+ }
+# endif//GLM_HAS_CONSTEXPR
+
+ return Error;
+}
+
+static int test_aligned_ivec4()
+{
+ int Error = 0;
+
+ glm::aligned_ivec4 const v(1, 2, 3, 4);
+ Error += glm::all(glm::equal(v, glm::aligned_ivec4(1, 2, 3, 4))) ? 0 : 1;
+
+ glm::aligned_ivec4 const u = v * 2;
+ Error += glm::all(glm::equal(u, glm::aligned_ivec4(2, 4, 6, 8))) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_aligned_mat4()
+{
+ int Error = 0;
+
+ glm::aligned_vec4 const u(1.f, 2.f, 3.f, 4.f);
+ Error += glm::all(glm::equal(u, glm::aligned_vec4(1.f, 2.f, 3.f, 4.f), 0.0001f)) ? 0 : 1;
+
+ glm::aligned_vec4 const v(1, 2, 3, 4);
+ Error += glm::all(glm::equal(v, glm::aligned_vec4(1.f, 2.f, 3.f, 4.f), 0.0001f)) ? 0 : 1;
+
+ glm::aligned_mat4 const m(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ glm::aligned_mat4 const t = glm::transpose(m);
+ glm::aligned_mat4 const expected = glm::mat4(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15);
+ Error += glm::all(glm::equal(t, expected, 0.0001f)) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_ctor();
+ Error += test_copy();
+ Error += test_aligned_ivec4();
+ Error += test_aligned_mat4();
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/test/gtc/gtc_type_precision.cpp b/3rdparty/glm/source/test/gtc/gtc_type_precision.cpp
new file mode 100644
index 0000000..77f0686
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_type_precision.cpp
@@ -0,0 +1,1041 @@
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <vector>
+#if GLM_HAS_OPENMP
+# include <omp.h>
+#endif
+
+#if GLM_HAS_STATIC_ASSERT
+static_assert(sizeof(glm::lowp_u8vec1) == 1, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::mediump_u8vec1) == 1, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::highp_u8vec1) == 1, "uint8 size isn't 1 byte on this platform");
+
+static_assert(sizeof(glm::lowp_u16vec1) == 2, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::mediump_u16vec1) == 2, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::highp_u16vec1) == 2, "uint16 size isn't 2 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u32vec1) == 4, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::mediump_u32vec1) == 4, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::highp_u32vec1) == 4, "uint32 size isn't 4 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u64vec1) == 8, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::mediump_u64vec1) == 8, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::highp_u64vec1) == 8, "uint64 size isn't 8 bytes on this platform");
+
+
+static_assert(sizeof(glm::lowp_u8vec2) == 2, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::mediump_u8vec2) == 2, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::highp_u8vec2) == 2, "uint8 size isn't 1 byte on this platform");
+
+static_assert(sizeof(glm::lowp_u16vec2) == 4, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::mediump_u16vec2) == 4, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::highp_u16vec2) == 4, "uint16 size isn't 2 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u32vec2) == 8, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::mediump_u32vec2) == 8, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::highp_u32vec2) == 8, "uint32 size isn't 4 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u64vec2) == 16, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::mediump_u64vec2) == 16, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::highp_u64vec2) == 16, "uint64 size isn't 8 bytes on this platform");
+
+
+static_assert(sizeof(glm::lowp_u8vec3) == 3, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::mediump_u8vec3) == 3, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::highp_u8vec3) == 3, "uint8 size isn't 1 byte on this platform");
+
+static_assert(sizeof(glm::lowp_u16vec3) == 6, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::mediump_u16vec3) == 6, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::highp_u16vec3) == 6, "uint16 size isn't 2 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u32vec3) == 12, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::mediump_u32vec3) == 12, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::highp_u32vec3) == 12, "uint32 size isn't 4 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u64vec3) == 24, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::mediump_u64vec3) == 24, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::highp_u64vec3) == 24, "uint64 size isn't 8 bytes on this platform");
+
+
+static_assert(sizeof(glm::lowp_u8vec4) == 4, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::mediump_u8vec4) == 4, "int8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::highp_u8vec4) == 4, "int8 size isn't 1 byte on this platform");
+
+static_assert(sizeof(glm::lowp_u16vec4) == 8, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::mediump_u16vec4) == 8, "int16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::highp_u16vec4) == 8, "int16 size isn't 2 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u32vec4) == 16, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::mediump_u32vec4) == 16, "int32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::highp_u32vec4) == 16, "int32 size isn't 4 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u64vec4) == 32, "int64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::mediump_u64vec4) == 32, "int64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::highp_u64vec4) == 32, "int64 size isn't 8 bytes on this platform");
+
+
+static_assert(sizeof(glm::lowp_u8vec1) == 1, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::mediump_u8vec1) == 1, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::highp_u8vec1) == 1, "uint8 size isn't 1 byte on this platform");
+
+static_assert(sizeof(glm::lowp_u16vec1) == 2, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::mediump_u16vec1) == 2, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::highp_u16vec1) == 2, "uint16 size isn't 2 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u32vec1) == 4, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::mediump_u32vec1) == 4, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::highp_u32vec1) == 4, "uint32 size isn't 4 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u64vec1) == 8, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::mediump_u64vec1) == 8, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::highp_u64vec1) == 8, "uint64 size isn't 8 bytes on this platform");
+
+
+static_assert(sizeof(glm::lowp_u8vec2) == 2, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::mediump_u8vec2) == 2, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::highp_u8vec2) == 2, "uint8 size isn't 1 byte on this platform");
+
+static_assert(sizeof(glm::lowp_u16vec2) == 4, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::mediump_u16vec2) == 4, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::highp_u16vec2) == 4, "uint16 size isn't 2 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u32vec2) == 8, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::mediump_u32vec2) == 8, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::highp_u32vec2) == 8, "uint32 size isn't 4 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u64vec2) == 16, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::mediump_u64vec2) == 16, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::highp_u64vec2) == 16, "uint64 size isn't 8 bytes on this platform");
+
+
+static_assert(sizeof(glm::lowp_u8vec3) == 3, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::mediump_u8vec3) == 3, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::highp_u8vec3) == 3, "uint8 size isn't 1 byte on this platform");
+
+static_assert(sizeof(glm::lowp_u16vec3) == 6, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::mediump_u16vec3) == 6, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::highp_u16vec3) == 6, "uint16 size isn't 2 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u32vec3) == 12, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::mediump_u32vec3) == 12, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::highp_u32vec3) == 12, "uint32 size isn't 4 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u64vec3) == 24, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::mediump_u64vec3) == 24, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::highp_u64vec3) == 24, "uint64 size isn't 8 bytes on this platform");
+
+
+static_assert(sizeof(glm::lowp_u8vec4) == 4, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::mediump_u8vec4) == 4, "uint8 size isn't 1 byte on this platform");
+static_assert(sizeof(glm::highp_u8vec4) == 4, "uint8 size isn't 1 byte on this platform");
+
+static_assert(sizeof(glm::lowp_u16vec4) == 8, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::mediump_u16vec4) == 8, "uint16 size isn't 2 bytes on this platform");
+static_assert(sizeof(glm::highp_u16vec4) == 8, "uint16 size isn't 2 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u32vec4) == 16, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::mediump_u32vec4) == 16, "uint32 size isn't 4 bytes on this platform");
+static_assert(sizeof(glm::highp_u32vec4) == 16, "uint32 size isn't 4 bytes on this platform");
+
+static_assert(sizeof(glm::lowp_u64vec4) == 32, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::mediump_u64vec4) == 32, "uint64 size isn't 8 bytes on this platform");
+static_assert(sizeof(glm::highp_u64vec4) == 32, "uint64 size isn't 8 bytes on this platform");
+
+#endif
+
+static int test_scalar_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::int8) != 1;
+ Error += sizeof(glm::int16) != 2;
+ Error += sizeof(glm::int32) != 4;
+ Error += sizeof(glm::int64) != 8;
+ Error += sizeof(glm::uint8) != 1;
+ Error += sizeof(glm::uint16) != 2;
+ Error += sizeof(glm::uint32) != 4;
+ Error += sizeof(glm::uint64) != 8;
+ Error += sizeof(glm::float32) != 4;
+ Error += sizeof(glm::float64) != 8;
+
+ Error += sizeof(glm::lowp_int8) != 1;
+ Error += sizeof(glm::lowp_int16) != 2;
+ Error += sizeof(glm::lowp_int32) != 4;
+ Error += sizeof(glm::lowp_int64) != 8;
+ Error += sizeof(glm::lowp_uint8) != 1;
+ Error += sizeof(glm::lowp_uint16) != 2;
+ Error += sizeof(glm::lowp_uint32) != 4;
+ Error += sizeof(glm::lowp_uint64) != 8;
+ Error += sizeof(glm::lowp_float32) != 4;
+ Error += sizeof(glm::lowp_float64) != 8;
+
+ Error += sizeof(glm::mediump_int8) != 1;
+ Error += sizeof(glm::mediump_int16) != 2;
+ Error += sizeof(glm::mediump_int32) != 4;
+ Error += sizeof(glm::mediump_int64) != 8;
+ Error += sizeof(glm::mediump_uint8) != 1;
+ Error += sizeof(glm::mediump_uint16) != 2;
+ Error += sizeof(glm::mediump_uint32) != 4;
+ Error += sizeof(glm::mediump_uint64) != 8;
+ Error += sizeof(glm::mediump_float32) != 4;
+ Error += sizeof(glm::mediump_float64) != 8;
+
+ Error += sizeof(glm::highp_int8) != 1;
+ Error += sizeof(glm::highp_int16) != 2;
+ Error += sizeof(glm::highp_int32) != 4;
+ Error += sizeof(glm::highp_int64) != 8;
+ Error += sizeof(glm::highp_uint8) != 1;
+ Error += sizeof(glm::highp_uint16) != 2;
+ Error += sizeof(glm::highp_uint32) != 4;
+ Error += sizeof(glm::highp_uint64) != 8;
+ Error += sizeof(glm::highp_float32) != 4;
+ Error += sizeof(glm::highp_float64) != 8;
+
+ return Error;
+}
+
+static int test_fvec_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::f32vec2) != 8;
+ Error += sizeof(glm::f32vec3) != 12;
+ Error += sizeof(glm::f32vec4) != 16;
+ Error += sizeof(glm::f64vec2) != 16;
+ Error += sizeof(glm::f64vec3) != 24;
+ Error += sizeof(glm::f64vec4) != 32;
+
+ Error += sizeof(glm::lowp_f32vec2) != 8;
+ Error += sizeof(glm::lowp_f32vec3) != 12;
+ Error += sizeof(glm::lowp_f32vec4) != 16;
+ Error += sizeof(glm::lowp_f64vec2) != 16;
+ Error += sizeof(glm::lowp_f64vec3) != 24;
+ Error += sizeof(glm::lowp_f64vec4) != 32;
+
+ Error += sizeof(glm::mediump_f32vec2) != 8;
+ Error += sizeof(glm::mediump_f32vec3) != 12;
+ Error += sizeof(glm::mediump_f32vec4) != 16;
+ Error += sizeof(glm::mediump_f64vec2) != 16;
+ Error += sizeof(glm::mediump_f64vec3) != 24;
+ Error += sizeof(glm::mediump_f64vec4) != 32;
+
+ Error += sizeof(glm::highp_f32vec2) != 8;
+ Error += sizeof(glm::highp_f32vec3) != 12;
+ Error += sizeof(glm::highp_f32vec4) != 16;
+ Error += sizeof(glm::highp_f64vec2) != 16;
+ Error += sizeof(glm::highp_f64vec3) != 24;
+ Error += sizeof(glm::highp_f64vec4) != 32;
+
+ return Error;
+}
+
+static int test_fvec_precision()
+{
+ int Error = 0;
+
+ {
+ glm::f32vec2 v1(1.f);
+ glm::lowp_f32vec2 v2(v1);
+ glm::mediump_f32vec2 v3(v1);
+ glm::highp_f32vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::f32vec2(v2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f32vec2(v3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f32vec2(v4), glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::f32vec3 v1(1.f);
+ glm::lowp_f32vec3 v2(v1);
+ glm::mediump_f32vec3 v3(v1);
+ glm::highp_f32vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::f32vec3(v2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f32vec3(v3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f32vec3(v4), glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ {
+ glm::f32vec4 v1(1.f);
+ glm::lowp_f32vec4 v2(v1);
+ glm::mediump_f32vec4 v3(v1);
+ glm::highp_f32vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::f32vec4(v2), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f32vec4(v3), glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f32vec4(v4), glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_dvec_precision()
+{
+ int Error = 0;
+
+ {
+ glm::f64vec2 v1(1.0);
+ glm::lowp_f64vec2 v2(v1);
+ glm::mediump_f64vec2 v3(v1);
+ glm::highp_f64vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::f64vec2(v2), glm::epsilon<double>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f64vec2(v3), glm::epsilon<double>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f64vec2(v4), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::f64vec3 v1(1.0);
+ glm::lowp_f64vec3 v2(v1);
+ glm::mediump_f64vec3 v3(v1);
+ glm::highp_f64vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::f64vec3(v2), glm::epsilon<double>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f64vec3(v3), glm::epsilon<double>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f64vec3(v4), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ {
+ glm::f64vec4 v1(1.0);
+ glm::lowp_f64vec4 v2(v1);
+ glm::mediump_f64vec4 v3(v1);
+ glm::highp_f64vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::f64vec4(v2), glm::epsilon<double>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f64vec4(v3), glm::epsilon<double>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::f64vec4(v4), glm::epsilon<double>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_ivec_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::i8vec2) != 2;
+ Error += sizeof(glm::i8vec3) != 3;
+ Error += sizeof(glm::i8vec4) != 4;
+ Error += sizeof(glm::i16vec2) != 4;
+ Error += sizeof(glm::i16vec3) != 6;
+ Error += sizeof(glm::i16vec4) != 8;
+ Error += sizeof(glm::i32vec2) != 8;
+ Error += sizeof(glm::i32vec3) != 12;
+ Error += sizeof(glm::i32vec4) != 16;
+ Error += sizeof(glm::i64vec2) != 16;
+ Error += sizeof(glm::i64vec3) != 24;
+ Error += sizeof(glm::i64vec4) != 32;
+
+ Error += sizeof(glm::lowp_i8vec2) != 2;
+ Error += sizeof(glm::lowp_i8vec3) != 3;
+ Error += sizeof(glm::lowp_i8vec4) != 4;
+ Error += sizeof(glm::lowp_i16vec2) != 4;
+ Error += sizeof(glm::lowp_i16vec3) != 6;
+ Error += sizeof(glm::lowp_i16vec4) != 8;
+ Error += sizeof(glm::lowp_i32vec2) != 8;
+ Error += sizeof(glm::lowp_i32vec3) != 12;
+ Error += sizeof(glm::lowp_i32vec4) != 16;
+ Error += sizeof(glm::lowp_i64vec2) != 16;
+ Error += sizeof(glm::lowp_i64vec3) != 24;
+ Error += sizeof(glm::lowp_i64vec4) != 32;
+
+ Error += sizeof(glm::mediump_i8vec2) != 2;
+ Error += sizeof(glm::mediump_i8vec3) != 3;
+ Error += sizeof(glm::mediump_i8vec4) != 4;
+ Error += sizeof(glm::mediump_i16vec2) != 4;
+ Error += sizeof(glm::mediump_i16vec3) != 6;
+ Error += sizeof(glm::mediump_i16vec4) != 8;
+ Error += sizeof(glm::mediump_i32vec2) != 8;
+ Error += sizeof(glm::mediump_i32vec3) != 12;
+ Error += sizeof(glm::mediump_i32vec4) != 16;
+ Error += sizeof(glm::mediump_i64vec2) != 16;
+ Error += sizeof(glm::mediump_i64vec3) != 24;
+ Error += sizeof(glm::mediump_i64vec4) != 32;
+
+ Error += sizeof(glm::highp_i8vec2) != 2;
+ Error += sizeof(glm::highp_i8vec3) != 3;
+ Error += sizeof(glm::highp_i8vec4) != 4;
+ Error += sizeof(glm::highp_i16vec2) != 4;
+ Error += sizeof(glm::highp_i16vec3) != 6;
+ Error += sizeof(glm::highp_i16vec4) != 8;
+ Error += sizeof(glm::highp_i32vec2) != 8;
+ Error += sizeof(glm::highp_i32vec3) != 12;
+ Error += sizeof(glm::highp_i32vec4) != 16;
+ Error += sizeof(glm::highp_i64vec2) != 16;
+ Error += sizeof(glm::highp_i64vec3) != 24;
+ Error += sizeof(glm::highp_i64vec4) != 32;
+
+ return Error;
+}
+
+static int test_ivec_precision()
+{
+ int Error = 0;
+
+ {
+ glm::i8vec2 v1(0);
+ glm::lowp_i8vec2 v2(v1);
+ glm::mediump_i8vec2 v3(v1);
+ glm::highp_i8vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i8vec2(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i8vec2(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i8vec2(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i8vec3 v1(0);
+ glm::lowp_i8vec3 v2(v1);
+ glm::mediump_i8vec3 v3(v1);
+ glm::highp_i8vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i8vec3(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i8vec3(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i8vec3(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i8vec4 v1(0);
+ glm::lowp_i8vec4 v2(v1);
+ glm::mediump_i8vec4 v3(v1);
+ glm::highp_i8vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i8vec4(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i8vec4(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i8vec4(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i16vec2 v1(0);
+ glm::lowp_i16vec2 v2(v1);
+ glm::mediump_i16vec2 v3(v1);
+ glm::highp_i16vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i16vec2(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i16vec2(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i16vec2(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i16vec3 v1(0);
+ glm::lowp_i16vec3 v2(v1);
+ glm::mediump_i16vec3 v3(v1);
+ glm::highp_i16vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i16vec3(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i16vec3(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i16vec3(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i16vec4 v1(0);
+ glm::lowp_i16vec4 v2(v1);
+ glm::mediump_i16vec4 v3(v1);
+ glm::highp_i16vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i16vec4(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i16vec4(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i16vec4(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i32vec2 v1(0);
+ glm::lowp_i32vec2 v2(v1);
+ glm::mediump_i32vec2 v3(v1);
+ glm::highp_i32vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i32vec2(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i32vec2(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i32vec2(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i32vec3 v1(0);
+ glm::lowp_i32vec3 v2(v1);
+ glm::mediump_i32vec3 v3(v1);
+ glm::highp_i32vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i32vec3(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i32vec3(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i32vec3(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i32vec4 v1(0);
+ glm::lowp_i32vec4 v2(v1);
+ glm::mediump_i32vec4 v3(v1);
+ glm::highp_i32vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i32vec4(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i32vec4(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i32vec4(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i64vec2 v1(0);
+ glm::lowp_i64vec2 v2(v1);
+ glm::mediump_i64vec2 v3(v1);
+ glm::highp_i64vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i64vec2(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i64vec2(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i64vec2(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i64vec3 v1(0);
+ glm::lowp_i64vec3 v2(v1);
+ glm::mediump_i64vec3 v3(v1);
+ glm::highp_i64vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i64vec3(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i64vec3(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i64vec3(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::i64vec4 v1(0);
+ glm::lowp_i64vec4 v2(v1);
+ glm::mediump_i64vec4 v3(v1);
+ glm::highp_i64vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::i64vec4(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i64vec4(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::i64vec4(v4))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_uvec_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::u8vec2) != 2;
+ Error += sizeof(glm::u8vec3) != 3;
+ Error += sizeof(glm::u8vec4) != 4;
+ Error += sizeof(glm::u16vec2) != 4;
+ Error += sizeof(glm::u16vec3) != 6;
+ Error += sizeof(glm::u16vec4) != 8;
+ Error += sizeof(glm::u32vec2) != 8;
+ Error += sizeof(glm::u32vec3) != 12;
+ Error += sizeof(glm::u32vec4) != 16;
+ Error += sizeof(glm::u64vec2) != 16;
+ Error += sizeof(glm::u64vec3) != 24;
+ Error += sizeof(glm::u64vec4) != 32;
+
+ Error += sizeof(glm::lowp_u8vec2) != 2;
+ Error += sizeof(glm::lowp_u8vec3) != 3;
+ Error += sizeof(glm::lowp_u8vec4) != 4;
+ Error += sizeof(glm::lowp_u16vec2) != 4;
+ Error += sizeof(glm::lowp_u16vec3) != 6;
+ Error += sizeof(glm::lowp_u16vec4) != 8;
+ Error += sizeof(glm::lowp_u32vec2) != 8;
+ Error += sizeof(glm::lowp_u32vec3) != 12;
+ Error += sizeof(glm::lowp_u32vec4) != 16;
+ Error += sizeof(glm::lowp_u64vec2) != 16;
+ Error += sizeof(glm::lowp_u64vec3) != 24;
+ Error += sizeof(glm::lowp_u64vec4) != 32;
+
+ Error += sizeof(glm::mediump_u8vec2) != 2;
+ Error += sizeof(glm::mediump_u8vec3) != 3;
+ Error += sizeof(glm::mediump_u8vec4) != 4;
+ Error += sizeof(glm::mediump_u16vec2) != 4;
+ Error += sizeof(glm::mediump_u16vec3) != 6;
+ Error += sizeof(glm::mediump_u16vec4) != 8;
+ Error += sizeof(glm::mediump_u32vec2) != 8;
+ Error += sizeof(glm::mediump_u32vec3) != 12;
+ Error += sizeof(glm::mediump_u32vec4) != 16;
+ Error += sizeof(glm::mediump_u64vec2) != 16;
+ Error += sizeof(glm::mediump_u64vec3) != 24;
+ Error += sizeof(glm::mediump_u64vec4) != 32;
+
+ Error += sizeof(glm::highp_u8vec2) != 2;
+ Error += sizeof(glm::highp_u8vec3) != 3;
+ Error += sizeof(glm::highp_u8vec4) != 4;
+ Error += sizeof(glm::highp_u16vec2) != 4;
+ Error += sizeof(glm::highp_u16vec3) != 6;
+ Error += sizeof(glm::highp_u16vec4) != 8;
+ Error += sizeof(glm::highp_u32vec2) != 8;
+ Error += sizeof(glm::highp_u32vec3) != 12;
+ Error += sizeof(glm::highp_u32vec4) != 16;
+ Error += sizeof(glm::highp_u64vec2) != 16;
+ Error += sizeof(glm::highp_u64vec3) != 24;
+ Error += sizeof(glm::highp_u64vec4) != 32;
+
+ return Error;
+}
+
+static int test_uvec_precision()
+{
+ int Error = 0;
+
+ {
+ glm::u8vec2 v1(0);
+ glm::lowp_u8vec2 v2(v1);
+ glm::mediump_u8vec2 v3(v1);
+ glm::highp_u8vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u8vec2(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u8vec2(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u8vec2(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u8vec3 v1(0);
+ glm::lowp_u8vec3 v2(v1);
+ glm::mediump_u8vec3 v3(v1);
+ glm::highp_u8vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u8vec3(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u8vec3(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u8vec3(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u8vec4 v1(0);
+ glm::lowp_u8vec4 v2(v1);
+ glm::mediump_u8vec4 v3(v1);
+ glm::highp_u8vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u8vec4(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u8vec4(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u8vec4(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u16vec2 v1(0);
+ glm::lowp_u16vec2 v2(v1);
+ glm::mediump_u16vec2 v3(v1);
+ glm::highp_u16vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u16vec2(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u16vec2(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u16vec2(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u16vec3 v1(0);
+ glm::lowp_u16vec3 v2(v1);
+ glm::mediump_u16vec3 v3(v1);
+ glm::highp_u16vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u16vec3(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u16vec3(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u16vec3(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u16vec4 v1(0);
+ glm::lowp_u16vec4 v2(v1);
+ glm::mediump_u16vec4 v3(v1);
+ glm::highp_u16vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u16vec4(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u16vec4(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u16vec4(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u32vec2 v1(0);
+ glm::lowp_u32vec2 v2(v1);
+ glm::mediump_u32vec2 v3(v1);
+ glm::highp_u32vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u32vec2(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u32vec2(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u32vec2(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u32vec3 v1(0);
+ glm::lowp_u32vec3 v2(v1);
+ glm::mediump_u32vec3 v3(v1);
+ glm::highp_u32vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u32vec3(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u32vec3(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u32vec3(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u32vec4 v1(0);
+ glm::lowp_u32vec4 v2(v1);
+ glm::mediump_u32vec4 v3(v1);
+ glm::highp_u32vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u32vec4(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u32vec4(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u32vec4(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u64vec2 v1(0);
+ glm::lowp_u64vec2 v2(v1);
+ glm::mediump_u64vec2 v3(v1);
+ glm::highp_u64vec2 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u64vec2(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u64vec2(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u64vec2(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u64vec3 v1(0);
+ glm::lowp_u64vec3 v2(v1);
+ glm::mediump_u64vec3 v3(v1);
+ glm::highp_u64vec3 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u64vec3(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u64vec3(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u64vec3(v4))) ? 0 : 1;
+ }
+
+ {
+ glm::u64vec4 v1(0);
+ glm::lowp_u64vec4 v2(v1);
+ glm::mediump_u64vec4 v3(v1);
+ glm::highp_u64vec4 v4(v1);
+
+ Error += glm::all(glm::equal(v1, glm::u64vec4(v2))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u64vec4(v3))) ? 0 : 1;
+ Error += glm::all(glm::equal(v1, glm::u64vec4(v4))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_fmat_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::mat2) != 16;
+ Error += sizeof(glm::mat3) != 36;
+ Error += sizeof(glm::mat4) != 64;
+ Error += sizeof(glm::mat2x2) != 16;
+ Error += sizeof(glm::mat2x3) != 24;
+ Error += sizeof(glm::mat2x4) != 32;
+ Error += sizeof(glm::mat3x2) != 24;
+ Error += sizeof(glm::mat3x3) != 36;
+ Error += sizeof(glm::mat3x4) != 48;
+ Error += sizeof(glm::mat4x2) != 32;
+ Error += sizeof(glm::mat4x3) != 48;
+ Error += sizeof(glm::mat4x4) != 64;
+
+ Error += sizeof(glm::fmat2) != 16;
+ Error += sizeof(glm::fmat3) != 36;
+ Error += sizeof(glm::fmat4) != 64;
+ Error += sizeof(glm::fmat2x2) != 16;
+ Error += sizeof(glm::fmat2x3) != 24;
+ Error += sizeof(glm::fmat2x4) != 32;
+ Error += sizeof(glm::fmat3x2) != 24;
+ Error += sizeof(glm::fmat3x3) != 36;
+ Error += sizeof(glm::fmat3x4) != 48;
+ Error += sizeof(glm::fmat4x2) != 32;
+ Error += sizeof(glm::fmat4x3) != 48;
+ Error += sizeof(glm::fmat4x4) != 64;
+
+ Error += sizeof(glm::f32mat2) != 16;
+ Error += sizeof(glm::f32mat3) != 36;
+ Error += sizeof(glm::f32mat4) != 64;
+ Error += sizeof(glm::f32mat2x2) != 16;
+ Error += sizeof(glm::f32mat2x3) != 24;
+ Error += sizeof(glm::f32mat2x4) != 32;
+ Error += sizeof(glm::f32mat3x2) != 24;
+ Error += sizeof(glm::f32mat3x3) != 36;
+ Error += sizeof(glm::f32mat3x4) != 48;
+ Error += sizeof(glm::f32mat4x2) != 32;
+ Error += sizeof(glm::f32mat4x3) != 48;
+ Error += sizeof(glm::f32mat4x4) != 64;
+
+
+ Error += sizeof(glm::lowp_mat2) != 16;
+ Error += sizeof(glm::lowp_mat3) != 36;
+ Error += sizeof(glm::lowp_mat4) != 64;
+ Error += sizeof(glm::lowp_mat2x2) != 16;
+ Error += sizeof(glm::lowp_mat2x3) != 24;
+ Error += sizeof(glm::lowp_mat2x4) != 32;
+ Error += sizeof(glm::lowp_mat3x2) != 24;
+ Error += sizeof(glm::lowp_mat3x3) != 36;
+ Error += sizeof(glm::lowp_mat3x4) != 48;
+ Error += sizeof(glm::lowp_mat4x2) != 32;
+ Error += sizeof(glm::lowp_mat4x3) != 48;
+ Error += sizeof(glm::lowp_mat4x4) != 64;
+
+ Error += sizeof(glm::lowp_fmat2) != 16;
+ Error += sizeof(glm::lowp_fmat3) != 36;
+ Error += sizeof(glm::lowp_fmat4) != 64;
+ Error += sizeof(glm::lowp_fmat2x2) != 16;
+ Error += sizeof(glm::lowp_fmat2x3) != 24;
+ Error += sizeof(glm::lowp_fmat2x4) != 32;
+ Error += sizeof(glm::lowp_fmat3x2) != 24;
+ Error += sizeof(glm::lowp_fmat3x3) != 36;
+ Error += sizeof(glm::lowp_fmat3x4) != 48;
+ Error += sizeof(glm::lowp_fmat4x2) != 32;
+ Error += sizeof(glm::lowp_fmat4x3) != 48;
+ Error += sizeof(glm::lowp_fmat4x4) != 64;
+
+ Error += sizeof(glm::lowp_f32mat2) != 16;
+ Error += sizeof(glm::lowp_f32mat3) != 36;
+ Error += sizeof(glm::lowp_f32mat4) != 64;
+ Error += sizeof(glm::lowp_f32mat2x2) != 16;
+ Error += sizeof(glm::lowp_f32mat2x3) != 24;
+ Error += sizeof(glm::lowp_f32mat2x4) != 32;
+ Error += sizeof(glm::lowp_f32mat3x2) != 24;
+ Error += sizeof(glm::lowp_f32mat3x3) != 36;
+ Error += sizeof(glm::lowp_f32mat3x4) != 48;
+ Error += sizeof(glm::lowp_f32mat4x2) != 32;
+ Error += sizeof(glm::lowp_f32mat4x3) != 48;
+ Error += sizeof(glm::lowp_f32mat4x4) != 64;
+
+ Error += sizeof(glm::mediump_mat2) != 16;
+ Error += sizeof(glm::mediump_mat3) != 36;
+ Error += sizeof(glm::mediump_mat4) != 64;
+ Error += sizeof(glm::mediump_mat2x2) != 16;
+ Error += sizeof(glm::mediump_mat2x3) != 24;
+ Error += sizeof(glm::mediump_mat2x4) != 32;
+ Error += sizeof(glm::mediump_mat3x2) != 24;
+ Error += sizeof(glm::mediump_mat3x3) != 36;
+ Error += sizeof(glm::mediump_mat3x4) != 48;
+ Error += sizeof(glm::mediump_mat4x2) != 32;
+ Error += sizeof(glm::mediump_mat4x3) != 48;
+ Error += sizeof(glm::mediump_mat4x4) != 64;
+
+ Error += sizeof(glm::mediump_fmat2) != 16;
+ Error += sizeof(glm::mediump_fmat3) != 36;
+ Error += sizeof(glm::mediump_fmat4) != 64;
+ Error += sizeof(glm::mediump_fmat2x2) != 16;
+ Error += sizeof(glm::mediump_fmat2x3) != 24;
+ Error += sizeof(glm::mediump_fmat2x4) != 32;
+ Error += sizeof(glm::mediump_fmat3x2) != 24;
+ Error += sizeof(glm::mediump_fmat3x3) != 36;
+ Error += sizeof(glm::mediump_fmat3x4) != 48;
+ Error += sizeof(glm::mediump_fmat4x2) != 32;
+ Error += sizeof(glm::mediump_fmat4x3) != 48;
+ Error += sizeof(glm::mediump_fmat4x4) != 64;
+
+ Error += sizeof(glm::mediump_f32mat2) != 16;
+ Error += sizeof(glm::mediump_f32mat3) != 36;
+ Error += sizeof(glm::mediump_f32mat4) != 64;
+ Error += sizeof(glm::mediump_f32mat2x2) != 16;
+ Error += sizeof(glm::mediump_f32mat2x3) != 24;
+ Error += sizeof(glm::mediump_f32mat2x4) != 32;
+ Error += sizeof(glm::mediump_f32mat3x2) != 24;
+ Error += sizeof(glm::mediump_f32mat3x3) != 36;
+ Error += sizeof(glm::mediump_f32mat3x4) != 48;
+ Error += sizeof(glm::mediump_f32mat4x2) != 32;
+ Error += sizeof(glm::mediump_f32mat4x3) != 48;
+ Error += sizeof(glm::mediump_f32mat4x4) != 64;
+
+ Error += sizeof(glm::highp_mat2) != 16;
+ Error += sizeof(glm::highp_mat3) != 36;
+ Error += sizeof(glm::highp_mat4) != 64;
+ Error += sizeof(glm::highp_mat2x2) != 16;
+ Error += sizeof(glm::highp_mat2x3) != 24;
+ Error += sizeof(glm::highp_mat2x4) != 32;
+ Error += sizeof(glm::highp_mat3x2) != 24;
+ Error += sizeof(glm::highp_mat3x3) != 36;
+ Error += sizeof(glm::highp_mat3x4) != 48;
+ Error += sizeof(glm::highp_mat4x2) != 32;
+ Error += sizeof(glm::highp_mat4x3) != 48;
+ Error += sizeof(glm::highp_mat4x4) != 64;
+
+ Error += sizeof(glm::highp_fmat2) != 16;
+ Error += sizeof(glm::highp_fmat3) != 36;
+ Error += sizeof(glm::highp_fmat4) != 64;
+ Error += sizeof(glm::highp_fmat2x2) != 16;
+ Error += sizeof(glm::highp_fmat2x3) != 24;
+ Error += sizeof(glm::highp_fmat2x4) != 32;
+ Error += sizeof(glm::highp_fmat3x2) != 24;
+ Error += sizeof(glm::highp_fmat3x3) != 36;
+ Error += sizeof(glm::highp_fmat3x4) != 48;
+ Error += sizeof(glm::highp_fmat4x2) != 32;
+ Error += sizeof(glm::highp_fmat4x3) != 48;
+ Error += sizeof(glm::highp_fmat4x4) != 64;
+
+ Error += sizeof(glm::highp_f32mat2) != 16;
+ Error += sizeof(glm::highp_f32mat3) != 36;
+ Error += sizeof(glm::highp_f32mat4) != 64;
+ Error += sizeof(glm::highp_f32mat2x2) != 16;
+ Error += sizeof(glm::highp_f32mat2x3) != 24;
+ Error += sizeof(glm::highp_f32mat2x4) != 32;
+ Error += sizeof(glm::highp_f32mat3x2) != 24;
+ Error += sizeof(glm::highp_f32mat3x3) != 36;
+ Error += sizeof(glm::highp_f32mat3x4) != 48;
+ Error += sizeof(glm::highp_f32mat4x2) != 32;
+ Error += sizeof(glm::highp_f32mat4x3) != 48;
+ Error += sizeof(glm::highp_f32mat4x4) != 64;
+
+ return Error;
+}
+
+static int test_dmat_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::f64mat2) != 32;
+ Error += sizeof(glm::f64mat3) != 72;
+ Error += sizeof(glm::f64mat4) != 128;
+ Error += sizeof(glm::f64mat2x2) != 32;
+ Error += sizeof(glm::f64mat2x3) != 48;
+ Error += sizeof(glm::f64mat2x4) != 64;
+ Error += sizeof(glm::f64mat3x2) != 48;
+ Error += sizeof(glm::f64mat3x3) != 72;
+ Error += sizeof(glm::f64mat3x4) != 96;
+ Error += sizeof(glm::f64mat4x2) != 64;
+ Error += sizeof(glm::f64mat4x3) != 96;
+ Error += sizeof(glm::f64mat4x4) != 128;
+
+ Error += sizeof(glm::lowp_f64mat2) != 32;
+ Error += sizeof(glm::lowp_f64mat3) != 72;
+ Error += sizeof(glm::lowp_f64mat4) != 128;
+ Error += sizeof(glm::lowp_f64mat2x2) != 32;
+ Error += sizeof(glm::lowp_f64mat2x3) != 48;
+ Error += sizeof(glm::lowp_f64mat2x4) != 64;
+ Error += sizeof(glm::lowp_f64mat3x2) != 48;
+ Error += sizeof(glm::lowp_f64mat3x3) != 72;
+ Error += sizeof(glm::lowp_f64mat3x4) != 96;
+ Error += sizeof(glm::lowp_f64mat4x2) != 64;
+ Error += sizeof(glm::lowp_f64mat4x3) != 96;
+ Error += sizeof(glm::lowp_f64mat4x4) != 128;
+
+ Error += sizeof(glm::mediump_f64mat2) != 32;
+ Error += sizeof(glm::mediump_f64mat3) != 72;
+ Error += sizeof(glm::mediump_f64mat4) != 128;
+ Error += sizeof(glm::mediump_f64mat2x2) != 32;
+ Error += sizeof(glm::mediump_f64mat2x3) != 48;
+ Error += sizeof(glm::mediump_f64mat2x4) != 64;
+ Error += sizeof(glm::mediump_f64mat3x2) != 48;
+ Error += sizeof(glm::mediump_f64mat3x3) != 72;
+ Error += sizeof(glm::mediump_f64mat3x4) != 96;
+ Error += sizeof(glm::mediump_f64mat4x2) != 64;
+ Error += sizeof(glm::mediump_f64mat4x3) != 96;
+ Error += sizeof(glm::mediump_f64mat4x4) != 128;
+
+ Error += sizeof(glm::highp_f64mat2) != 32;
+ Error += sizeof(glm::highp_f64mat3) != 72;
+ Error += sizeof(glm::highp_f64mat4) != 128;
+ Error += sizeof(glm::highp_f64mat2x2) != 32;
+ Error += sizeof(glm::highp_f64mat2x3) != 48;
+ Error += sizeof(glm::highp_f64mat2x4) != 64;
+ Error += sizeof(glm::highp_f64mat3x2) != 48;
+ Error += sizeof(glm::highp_f64mat3x3) != 72;
+ Error += sizeof(glm::highp_f64mat3x4) != 96;
+ Error += sizeof(glm::highp_f64mat4x2) != 64;
+ Error += sizeof(glm::highp_f64mat4x3) != 96;
+ Error += sizeof(glm::highp_f64mat4x4) != 128;
+
+ return Error;
+}
+
+static int test_quat_size()
+{
+ int Error = 0;
+
+ Error += sizeof(glm::f32quat) != 16;
+ Error += sizeof(glm::f64quat) != 32;
+
+ Error += sizeof(glm::lowp_f32quat) != 16;
+ Error += sizeof(glm::lowp_f64quat) != 32;
+
+ Error += sizeof(glm::mediump_f32quat) != 16;
+ Error += sizeof(glm::mediump_f64quat) != 32;
+
+ Error += sizeof(glm::highp_f32quat) != 16;
+ Error += sizeof(glm::highp_f64quat) != 32;
+
+ return Error;
+}
+
+static int test_quat_precision()
+{
+ int Error = 0;
+
+ {
+ glm::f32quat q1(0.f, glm::vec3(0.f, 0.f, 1.f));
+ glm::lowp_f32quat qA(q1);
+ glm::mediump_f32quat qB(q1);
+ glm::highp_f32quat qC(q1);
+ glm::f32quat q2(qA);
+ glm::f32quat q3(qB);
+ glm::f32quat q4(qC);
+
+ Error += glm::all(glm::equal(q1, q2, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(q1, q3, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(q1, q4, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+static int test_fvec_conversion()
+{
+ int Error(0);
+
+ {
+ glm::highp_vec4 a = glm::vec4(1, 2, 3, 4);
+ glm::mediump_vec4 b = glm::vec4(1, 2, 3, 4);
+ glm::lowp_vec4 c = b;
+ glm::mediump_vec4 d = c;
+ glm::lowp_ivec4 e = glm::ivec4(d);
+ glm::lowp_ivec3 f = glm::ivec3(e);
+
+ Error += glm::all(glm::equal(b, d, glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+#if GLM_HAS_OPENMP
+static int test_openmp()
+{
+ std::vector<glm::u8vec3> VectorA(1000);
+ std::vector<glm::u8vec3> VectorB(1000);
+ std::vector<glm::u8vec3> VectorC(1000);
+
+ for (std::size_t i = 0; i < VectorA.size(); ++i)
+ {
+ VectorA[i] = glm::u8vec3(1, 1, 1);
+ VectorB[i] = glm::u8vec3(1, 1, 1);
+ }
+
+ #pragma omp parallel for default(none) shared(VectorA, VectorB, VectorC)
+ for (int i = 0; i < static_cast<int>(VectorC.size()); ++i)
+ {
+ VectorC[i] = VectorA[i] + VectorB[i];
+ }
+
+ return 0;
+}
+#endif//GLM_HAS_OPENMP
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_scalar_size();
+ Error += test_fvec_size();
+
+ Error += test_fvec_precision();
+ Error += test_fvec_conversion();
+
+ Error += test_dvec_precision();
+
+ Error += test_uvec_size();
+ Error += test_uvec_precision();
+ Error += test_ivec_size();
+ Error += test_ivec_precision();
+
+ Error += test_fmat_size();
+ Error += test_dmat_size();
+ Error += test_quat_size();
+ Error += test_quat_precision();
+
+# if GLM_HAS_OPENMP
+ Error += test_openmp();
+# endif//
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_type_ptr.cpp b/3rdparty/glm/source/test/gtc/gtc_type_ptr.cpp
new file mode 100644
index 0000000..6fcd305
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_type_ptr.cpp
@@ -0,0 +1,335 @@
+#include <glm/gtc/type_ptr.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/vector_relational.hpp>
+
+int test_value_ptr_vec()
+{
+ int Error = 0;
+
+ {
+ glm::vec2 v(1.0);
+ float * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+ {
+ glm::vec3 v(1.0);
+ float * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+ {
+ glm::vec4 v(1.0);
+ float * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+
+ {
+ glm::dvec2 v(1.0);
+ double * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+ {
+ glm::dvec3 v(1.0);
+ double * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+ {
+ glm::dvec4 v(1.0);
+ double * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_value_ptr_vec_const()
+{
+ int Error = 0;
+
+ {
+ glm::vec2 const v(1.0);
+ float const * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+ {
+ glm::vec3 const v(1.0);
+ float const * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+ {
+ glm::vec4 const v(1.0);
+ float const * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+
+ {
+ glm::dvec2 const v(1.0);
+ double const * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+ {
+ glm::dvec3 const v(1.0);
+ double const * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+ {
+ glm::dvec4 const v(1.0);
+ double const * p = glm::value_ptr(v);
+ Error += p == &v[0] ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_value_ptr_mat()
+{
+ int Error = 0;
+
+ {
+ glm::mat2x2 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat2x3 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat2x4 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat3x2 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat3x3 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat3x4 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat4x2 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat4x3 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat4x4 m(1.0);
+ float * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_value_ptr_mat_const()
+{
+ int Error = 0;
+
+ {
+ glm::mat2x2 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat2x3 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat2x4 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat3x2 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat3x3 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat3x4 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat4x2 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat4x3 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+ {
+ glm::mat4x4 const m(1.0);
+ float const * p = glm::value_ptr(m);
+ Error += p == &m[0][0] ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_make_pointer_mat()
+{
+ int Error = 0;
+
+ float ArrayA[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ double ArrayB[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+ glm::mat2x2 Mat2x2A = glm::make_mat2x2(ArrayA);
+ glm::mat2x3 Mat2x3A = glm::make_mat2x3(ArrayA);
+ glm::mat2x4 Mat2x4A = glm::make_mat2x4(ArrayA);
+ glm::mat3x2 Mat3x2A = glm::make_mat3x2(ArrayA);
+ glm::mat3x3 Mat3x3A = glm::make_mat3x3(ArrayA);
+ glm::mat3x4 Mat3x4A = glm::make_mat3x4(ArrayA);
+ glm::mat4x2 Mat4x2A = glm::make_mat4x2(ArrayA);
+ glm::mat4x3 Mat4x3A = glm::make_mat4x3(ArrayA);
+ glm::mat4x4 Mat4x4A = glm::make_mat4x4(ArrayA);
+
+ glm::dmat2x2 Mat2x2B = glm::make_mat2x2(ArrayB);
+ glm::dmat2x3 Mat2x3B = glm::make_mat2x3(ArrayB);
+ glm::dmat2x4 Mat2x4B = glm::make_mat2x4(ArrayB);
+ glm::dmat3x2 Mat3x2B = glm::make_mat3x2(ArrayB);
+ glm::dmat3x3 Mat3x3B = glm::make_mat3x3(ArrayB);
+ glm::dmat3x4 Mat3x4B = glm::make_mat3x4(ArrayB);
+ glm::dmat4x2 Mat4x2B = glm::make_mat4x2(ArrayB);
+ glm::dmat4x3 Mat4x3B = glm::make_mat4x3(ArrayB);
+ glm::dmat4x4 Mat4x4B = glm::make_mat4x4(ArrayB);
+
+ return Error;
+}
+
+int test_make_pointer_vec()
+{
+ int Error = 0;
+
+ float ArrayA[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ int ArrayB[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ bool ArrayC[] = {true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false};
+
+ glm::vec2 Vec2A = glm::make_vec2(ArrayA);
+ glm::vec3 Vec3A = glm::make_vec3(ArrayA);
+ glm::vec4 Vec4A = glm::make_vec4(ArrayA);
+
+ glm::ivec2 Vec2B = glm::make_vec2(ArrayB);
+ glm::ivec3 Vec3B = glm::make_vec3(ArrayB);
+ glm::ivec4 Vec4B = glm::make_vec4(ArrayB);
+
+ glm::bvec2 Vec2C = glm::make_vec2(ArrayC);
+ glm::bvec3 Vec3C = glm::make_vec3(ArrayC);
+ glm::bvec4 Vec4C = glm::make_vec4(ArrayC);
+
+ return Error;
+}
+
+int test_make_vec1()
+{
+ int Error = 0;
+
+ glm::ivec1 const v1 = glm::make_vec1(glm::ivec1(2));
+ Error += v1 == glm::ivec1(2) ? 0 : 1;
+
+ glm::ivec1 const v2 = glm::make_vec1(glm::ivec2(2));
+ Error += v2 == glm::ivec1(2) ? 0 : 1;
+
+ glm::ivec1 const v3 = glm::make_vec1(glm::ivec3(2));
+ Error += v3 == glm::ivec1(2) ? 0 : 1;
+
+ glm::ivec1 const v4 = glm::make_vec1(glm::ivec4(2));
+ Error += v3 == glm::ivec1(2) ? 0 : 1;
+
+ return Error;
+}
+
+int test_make_vec2()
+{
+ int Error = 0;
+
+ glm::ivec2 const v1 = glm::make_vec2(glm::ivec1(2));
+ Error += v1 == glm::ivec2(2, 0) ? 0 : 1;
+
+ glm::ivec2 const v2 = glm::make_vec2(glm::ivec2(2));
+ Error += v2 == glm::ivec2(2, 2) ? 0 : 1;
+
+ glm::ivec2 const v3 = glm::make_vec2(glm::ivec3(2));
+ Error += v3 == glm::ivec2(2, 2) ? 0 : 1;
+
+ glm::ivec2 const v4 = glm::make_vec2(glm::ivec4(2));
+ Error += v3 == glm::ivec2(2, 2) ? 0 : 1;
+
+ return Error;
+}
+
+int test_make_vec3()
+{
+ int Error = 0;
+
+ glm::ivec3 const v1 = glm::make_vec3(glm::ivec1(2));
+ Error += v1 == glm::ivec3(2, 0, 0) ? 0 : 1;
+
+ glm::ivec3 const v2 = glm::make_vec3(glm::ivec2(2));
+ Error += v2 == glm::ivec3(2, 2, 0) ? 0 : 1;
+
+ glm::ivec3 const v3 = glm::make_vec3(glm::ivec3(2));
+ Error += v3 == glm::ivec3(2, 2, 2) ? 0 : 1;
+
+ glm::ivec3 const v4 = glm::make_vec3(glm::ivec4(2));
+ Error += v3 == glm::ivec3(2, 2, 2) ? 0 : 1;
+
+ return Error;
+}
+
+int test_make_vec4()
+{
+ int Error = 0;
+
+ glm::ivec4 const v1 = glm::make_vec4(glm::ivec1(2));
+ Error += v1 == glm::ivec4(2, 0, 0, 1) ? 0 : 1;
+
+ glm::ivec4 const v2 = glm::make_vec4(glm::ivec2(2));
+ Error += v2 == glm::ivec4(2, 2, 0, 1) ? 0 : 1;
+
+ glm::ivec4 const v3 = glm::make_vec4(glm::ivec3(2));
+ Error += v3 == glm::ivec4(2, 2, 2, 1) ? 0 : 1;
+
+ glm::ivec4 const v4 = glm::make_vec4(glm::ivec4(2));
+ Error += v4 == glm::ivec4(2, 2, 2, 2) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_make_vec1();
+ Error += test_make_vec2();
+ Error += test_make_vec3();
+ Error += test_make_vec4();
+ Error += test_make_pointer_vec();
+ Error += test_make_pointer_mat();
+ Error += test_value_ptr_vec();
+ Error += test_value_ptr_vec_const();
+ Error += test_value_ptr_mat();
+ Error += test_value_ptr_mat_const();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_ulp.cpp b/3rdparty/glm/source/test/gtc/gtc_ulp.cpp
new file mode 100644
index 0000000..d5074a3
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_ulp.cpp
@@ -0,0 +1,99 @@
+#include <glm/gtc/ulp.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <limits>
+
+int test_ulp_float_dist()
+{
+ int Error = 0;
+
+ float A = 1.0f;
+
+ float B = glm::next_float(A);
+ Error += glm::notEqual(A, B, 0) ? 0 : 1;
+ float C = glm::prev_float(B);
+ Error += glm::equal(A, C, 0) ? 0 : 1;
+
+ int D = glm::float_distance(A, B);
+ Error += D == 1 ? 0 : 1;
+ int E = glm::float_distance(A, C);
+ Error += E == 0 ? 0 : 1;
+
+ return Error;
+}
+
+int test_ulp_float_step()
+{
+ int Error = 0;
+
+ float A = 1.0f;
+
+ for(int i = 10; i < 1000; i *= 10)
+ {
+ float B = glm::next_float(A, i);
+ Error += glm::notEqual(A, B, 0) ? 0 : 1;
+ float C = glm::prev_float(B, i);
+ Error += glm::equal(A, C, 0) ? 0 : 1;
+
+ int D = glm::float_distance(A, B);
+ Error += D == i ? 0 : 1;
+ int E = glm::float_distance(A, C);
+ Error += E == 0 ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_ulp_double_dist()
+{
+ int Error = 0;
+
+ double A = 1.0;
+
+ double B = glm::next_float(A);
+ Error += glm::notEqual(A, B, 0) ? 0 : 1;
+ double C = glm::prev_float(B);
+ Error += glm::equal(A, C, 0) ? 0 : 1;
+
+ glm::int64 const D = glm::float_distance(A, B);
+ Error += D == 1 ? 0 : 1;
+ glm::int64 const E = glm::float_distance(A, C);
+ Error += E == 0 ? 0 : 1;
+
+ return Error;
+}
+
+int test_ulp_double_step()
+{
+ int Error = 0;
+
+ double A = 1.0;
+
+ for(int i = 10; i < 1000; i *= 10)
+ {
+ double B = glm::next_float(A, i);
+ Error += glm::notEqual(A, B, 0) ? 0 : 1;
+ double C = glm::prev_float(B, i);
+ Error += glm::equal(A, C, 0) ? 0 : 1;
+
+ glm::int64 const D = glm::float_distance(A, B);
+ Error += D == i ? 0 : 1;
+ glm::int64 const E = glm::float_distance(A, C);
+ Error += E == 0 ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_ulp_float_dist();
+ Error += test_ulp_float_step();
+ Error += test_ulp_double_dist();
+ Error += test_ulp_double_step();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtc/gtc_user_defined_types.cpp b/3rdparty/glm/source/test/gtc/gtc_user_defined_types.cpp
new file mode 100644
index 0000000..af39620
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_user_defined_types.cpp
@@ -0,0 +1,30 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// OpenGL Mathematics Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net)
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Created : 2010-09-16
+// Updated : 2011-05-27
+// Licence : This source is under MIT licence
+// File : test/gtc/type_ptr.cpp
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define GLM_FORCE_RADIANS
+#include <glm/gtc/user_defined_type.hpp>
+
+int test_make_pointer_vec()
+{
+ int Error = 0;
+
+ glm::func();
+ //func();
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_make_pointer_vec();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtc/gtc_vec1.cpp b/3rdparty/glm/source/test/gtc/gtc_vec1.cpp
new file mode 100644
index 0000000..268d95e
--- /dev/null
+++ b/3rdparty/glm/source/test/gtc/gtc_vec1.cpp
@@ -0,0 +1,8 @@
+#include <glm/gtc/vec1.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/CMakeLists.txt b/3rdparty/glm/source/test/gtx/CMakeLists.txt
new file mode 100644
index 0000000..ad7bf49
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/CMakeLists.txt
@@ -0,0 +1,59 @@
+glmCreateTestGTC(gtx)
+glmCreateTestGTC(gtx_associated_min_max)
+glmCreateTestGTC(gtx_closest_point)
+glmCreateTestGTC(gtx_color_encoding)
+glmCreateTestGTC(gtx_color_space_YCoCg)
+glmCreateTestGTC(gtx_color_space)
+glmCreateTestGTC(gtx_common)
+glmCreateTestGTC(gtx_compatibility)
+glmCreateTestGTC(gtx_component_wise)
+glmCreateTestGTC(gtx_easing)
+glmCreateTestGTC(gtx_euler_angle)
+glmCreateTestGTC(gtx_extend)
+glmCreateTestGTC(gtx_extended_min_max)
+glmCreateTestGTC(gtx_exterior_product)
+glmCreateTestGTC(gtx_fast_exponential)
+glmCreateTestGTC(gtx_fast_square_root)
+glmCreateTestGTC(gtx_fast_trigonometry)
+glmCreateTestGTC(gtx_functions)
+glmCreateTestGTC(gtx_gradient_paint)
+glmCreateTestGTC(gtx_handed_coordinate_space)
+glmCreateTestGTC(gtx_integer)
+glmCreateTestGTC(gtx_intersect)
+glmCreateTestGTC(gtx_io)
+glmCreateTestGTC(gtx_load)
+glmCreateTestGTC(gtx_log_base)
+glmCreateTestGTC(gtx_matrix_cross_product)
+glmCreateTestGTC(gtx_matrix_decompose)
+glmCreateTestGTC(gtx_matrix_factorisation)
+glmCreateTestGTC(gtx_matrix_interpolation)
+glmCreateTestGTC(gtx_matrix_major_storage)
+glmCreateTestGTC(gtx_matrix_operation)
+glmCreateTestGTC(gtx_matrix_query)
+glmCreateTestGTC(gtx_matrix_transform_2d)
+glmCreateTestGTC(gtx_norm)
+glmCreateTestGTC(gtx_normal)
+glmCreateTestGTC(gtx_normalize_dot)
+glmCreateTestGTC(gtx_number_precision)
+glmCreateTestGTC(gtx_orthonormalize)
+glmCreateTestGTC(gtx_optimum_pow)
+glmCreateTestGTC(gtx_pca)
+glmCreateTestGTC(gtx_perpendicular)
+glmCreateTestGTC(gtx_polar_coordinates)
+glmCreateTestGTC(gtx_projection)
+glmCreateTestGTC(gtx_quaternion)
+glmCreateTestGTC(gtx_dual_quaternion)
+glmCreateTestGTC(gtx_range)
+glmCreateTestGTC(gtx_rotate_normalized_axis)
+glmCreateTestGTC(gtx_rotate_vector)
+glmCreateTestGTC(gtx_scalar_multiplication)
+glmCreateTestGTC(gtx_scalar_relational)
+glmCreateTestGTC(gtx_spline)
+glmCreateTestGTC(gtx_string_cast)
+glmCreateTestGTC(gtx_texture)
+glmCreateTestGTC(gtx_type_aligned)
+glmCreateTestGTC(gtx_type_trait)
+glmCreateTestGTC(gtx_vec_swizzle)
+glmCreateTestGTC(gtx_vector_angle)
+glmCreateTestGTC(gtx_vector_query)
+glmCreateTestGTC(gtx_wrap)
diff --git a/3rdparty/glm/source/test/gtx/gtx.cpp b/3rdparty/glm/source/test/gtx/gtx.cpp
new file mode 100644
index 0000000..1b143b6
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx.cpp
@@ -0,0 +1,8 @@
+#include <glm/ext.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_associated_min_max.cpp b/3rdparty/glm/source/test/gtx/gtx_associated_min_max.cpp
new file mode 100644
index 0000000..9007f8a
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_associated_min_max.cpp
@@ -0,0 +1,10 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtx/associated_min_max.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_closest_point.cpp b/3rdparty/glm/source/test/gtx/gtx_closest_point.cpp
new file mode 100644
index 0000000..0f6303a
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_closest_point.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/closest_point.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_color_encoding.cpp b/3rdparty/glm/source/test/gtx/gtx_color_encoding.cpp
new file mode 100644
index 0000000..8b499be
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_color_encoding.cpp
@@ -0,0 +1,51 @@
+#include <glm/gtx/color_encoding.hpp>
+#include <glm/gtc/color_space.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/constants.hpp>
+
+namespace srgb
+{
+ int test()
+ {
+ int Error(0);
+
+ glm::vec3 const ColorSourceRGB(1.0, 0.5, 0.0);
+/*
+ {
+ glm::vec3 const ColorSRGB = glm::convertLinearSRGBToD65XYZ(ColorSourceRGB);
+ glm::vec3 const ColorRGB = glm::convertD65XYZToLinearSRGB(ColorSRGB);
+ Error += glm::all(glm::epsilonEqual(ColorSourceRGB, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+*/
+ {
+ glm::vec3 const ColorSRGB = glm::convertLinearToSRGB(ColorSourceRGB, 2.8f);
+ glm::vec3 const ColorRGB = glm::convertSRGBToLinear(ColorSRGB, 2.8f);
+ Error += glm::all(glm::epsilonEqual(ColorSourceRGB, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+
+ glm::vec4 const ColorSourceRGBA(1.0, 0.5, 0.0, 1.0);
+
+ {
+ glm::vec4 const ColorSRGB = glm::convertLinearToSRGB(ColorSourceRGBA);
+ glm::vec4 const ColorRGB = glm::convertSRGBToLinear(ColorSRGB);
+ Error += glm::all(glm::epsilonEqual(ColorSourceRGBA, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 const ColorSRGB = glm::convertLinearToSRGB(ColorSourceRGBA, 2.8f);
+ glm::vec4 const ColorRGB = glm::convertSRGBToLinear(ColorSRGB, 2.8f);
+ Error += glm::all(glm::epsilonEqual(ColorSourceRGBA, ColorRGB, 0.00001f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace srgb
+
+int main()
+{
+ int Error(0);
+
+ Error += srgb::test();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_color_space.cpp b/3rdparty/glm/source/test/gtx/gtx_color_space.cpp
new file mode 100644
index 0000000..a23d2c8
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_color_space.cpp
@@ -0,0 +1,20 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/color_space.hpp>
+
+int test_saturation()
+{
+ int Error(0);
+
+ glm::vec4 Color = glm::saturation(1.0f, glm::vec4(1.0, 0.5, 0.0, 1.0));
+
+ return Error;
+}
+
+int main()
+{
+ int Error(0);
+
+ Error += test_saturation();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_color_space_YCoCg.cpp b/3rdparty/glm/source/test/gtx/gtx_color_space_YCoCg.cpp
new file mode 100644
index 0000000..2ca131d
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_color_space_YCoCg.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/color_space_YCoCg.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_common.cpp b/3rdparty/glm/source/test/gtx/gtx_common.cpp
new file mode 100644
index 0000000..fd4fa99
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_common.cpp
@@ -0,0 +1,161 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/common.hpp>
+#include <glm/gtc/integer.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/vector_relational.hpp>
+#include <glm/common.hpp>
+
+namespace fmod_
+{
+ template<typename genType>
+ GLM_FUNC_QUALIFIER genType modTrunc(genType a, genType b)
+ {
+ return a - b * glm::trunc(a / b);
+ }
+
+ int test()
+ {
+ int Error(0);
+
+ {
+ float A0(3.0);
+ float B0(2.0f);
+ float C0 = glm::fmod(A0, B0);
+
+ Error += glm::abs(C0 - 1.0f) < 0.00001f ? 0 : 1;
+
+ glm::vec4 A1(3.0);
+ float B1(2.0f);
+ glm::vec4 C1 = glm::fmod(A1, B1);
+
+ Error += glm::all(glm::epsilonEqual(C1, glm::vec4(1.0f), 0.00001f)) ? 0 : 1;
+
+ glm::vec4 A2(3.0);
+ glm::vec4 B2(2.0f);
+ glm::vec4 C2 = glm::fmod(A2, B2);
+
+ Error += glm::all(glm::epsilonEqual(C2, glm::vec4(1.0f), 0.00001f)) ? 0 : 1;
+
+ glm::ivec4 A3(3);
+ int B3(2);
+ glm::ivec4 C3 = glm::fmod(A3, B3);
+
+ Error += glm::all(glm::equal(C3, glm::ivec4(1))) ? 0 : 1;
+
+ glm::ivec4 A4(3);
+ glm::ivec4 B4(2);
+ glm::ivec4 C4 = glm::fmod(A4, B4);
+
+ Error += glm::all(glm::equal(C4, glm::ivec4(1))) ? 0 : 1;
+ }
+
+ {
+ float A0(22.0);
+ float B0(-10.0f);
+ float C0 = glm::fmod(A0, B0);
+
+ Error += glm::abs(C0 - 2.0f) < 0.00001f ? 0 : 1;
+
+ glm::vec4 A1(22.0);
+ float B1(-10.0f);
+ glm::vec4 C1 = glm::fmod(A1, B1);
+
+ Error += glm::all(glm::epsilonEqual(C1, glm::vec4(2.0f), 0.00001f)) ? 0 : 1;
+
+ glm::vec4 A2(22.0);
+ glm::vec4 B2(-10.0f);
+ glm::vec4 C2 = glm::fmod(A2, B2);
+
+ Error += glm::all(glm::epsilonEqual(C2, glm::vec4(2.0f), 0.00001f)) ? 0 : 1;
+
+ glm::ivec4 A3(22);
+ int B3(-10);
+ glm::ivec4 C3 = glm::fmod(A3, B3);
+
+ Error += glm::all(glm::equal(C3, glm::ivec4(2))) ? 0 : 1;
+
+ glm::ivec4 A4(22);
+ glm::ivec4 B4(-10);
+ glm::ivec4 C4 = glm::fmod(A4, B4);
+
+ Error += glm::all(glm::equal(C4, glm::ivec4(2))) ? 0 : 1;
+ }
+
+ // http://stackoverflow.com/questions/7610631/glsl-mod-vs-hlsl-fmod
+ {
+ for (float y = -10.0f; y < 10.0f; y += 0.1f)
+ for (float x = -10.0f; x < 10.0f; x += 0.1f)
+ {
+ float const A(std::fmod(x, y));
+ //float const B(std::remainder(x, y));
+ float const C(glm::fmod(x, y));
+ float const D(modTrunc(x, y));
+
+ //Error += glm::epsilonEqual(A, B, 0.0001f) ? 0 : 1;
+ //assert(!Error);
+ Error += glm::epsilonEqual(A, C, 0.0001f) ? 0 : 1;
+ assert(!Error);
+ Error += glm::epsilonEqual(A, D, 0.00001f) ? 0 : 1;
+ assert(!Error);
+ }
+ }
+
+ return Error;
+ }
+}//namespace fmod_
+
+int test_isdenormal()
+{
+ int Error = 0;
+
+ bool A = glm::isdenormal(1.0f);
+ Error += !A ? 0 : 1;
+
+ glm::bvec1 B = glm::isdenormal(glm::vec1(1.0f));
+ Error += !glm::any(B) ? 0 : 1;
+
+ glm::bvec2 C = glm::isdenormal(glm::vec2(1.0f));
+ Error += !glm::any(C) ? 0 : 1;
+
+ glm::bvec3 D = glm::isdenormal(glm::vec3(1.0f));
+ Error += !glm::any(D) ? 0 : 1;
+
+ glm::bvec4 E = glm::isdenormal(glm::vec4(1.0f));
+ Error += !glm::any(E) ? 0 : 1;
+
+ return Error;
+}
+
+int test_openBounded()
+{
+ int Error = 0;
+
+ Error += glm::all(glm::openBounded(glm::ivec2(2), glm::ivec2(1), glm::ivec2(3))) ? 0 : 1;
+ Error += !glm::all(glm::openBounded(glm::ivec2(1), glm::ivec2(1), glm::ivec2(3))) ? 0 : 1;
+ Error += !glm::all(glm::openBounded(glm::ivec2(3), glm::ivec2(1), glm::ivec2(3))) ? 0 : 1;
+
+ return Error;
+}
+
+int test_closeBounded()
+{
+ int Error = 0;
+
+ Error += glm::all(glm::closeBounded(glm::ivec2(2), glm::ivec2(1), glm::ivec2(3))) ? 0 : 1;
+ Error += glm::all(glm::closeBounded(glm::ivec2(1), glm::ivec2(1), glm::ivec2(3))) ? 0 : 1;
+ Error += glm::all(glm::closeBounded(glm::ivec2(3), glm::ivec2(1), glm::ivec2(3))) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_isdenormal();
+ Error += ::fmod_::test();
+ Error += test_openBounded();
+ Error += test_closeBounded();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_compatibility.cpp b/3rdparty/glm/source/test/gtx/gtx_compatibility.cpp
new file mode 100644
index 0000000..e5351ce
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_compatibility.cpp
@@ -0,0 +1,19 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/compatibility.hpp>
+
+int main()
+{
+ int Error(0);
+
+ Error += glm::isfinite(1.0f) ? 0 : 1;
+ Error += glm::isfinite(1.0) ? 0 : 1;
+ Error += glm::isfinite(-1.0f) ? 0 : 1;
+ Error += glm::isfinite(-1.0) ? 0 : 1;
+
+ Error += glm::all(glm::isfinite(glm::vec4(1.0f))) ? 0 : 1;
+ Error += glm::all(glm::isfinite(glm::dvec4(1.0))) ? 0 : 1;
+ Error += glm::all(glm::isfinite(glm::vec4(-1.0f))) ? 0 : 1;
+ Error += glm::all(glm::isfinite(glm::dvec4(-1.0))) ? 0 : 1;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_component_wise.cpp b/3rdparty/glm/source/test/gtx/gtx_component_wise.cpp
new file mode 100644
index 0000000..29c81af
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_component_wise.cpp
@@ -0,0 +1,116 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/component_wise.hpp>
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/constants.hpp>
+#include <limits>
+
+namespace compNormalize
+{
+ int run()
+ {
+ int Error(0);
+
+ {
+ glm::vec4 const A = glm::compNormalize<float>(glm::u8vec4(0, 127, 128, 255));
+
+ Error += glm::epsilonEqual(A.x, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ Error += A.y < 0.5f ? 0 : 1;
+ Error += A.z > 0.5f ? 0 : 1;
+ Error += glm::epsilonEqual(A.w, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 const A = glm::compNormalize<float>(glm::i8vec4(-128, -1, 0, 127));
+
+ Error += glm::epsilonEqual(A.x,-1.0f, glm::epsilon<float>()) ? 0 : 1;
+ Error += A.y < 0.0f ? 0 : 1;
+ Error += A.z > 0.0f ? 0 : 1;
+ Error += glm::epsilonEqual(A.w, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 const A = glm::compNormalize<float>(glm::u16vec4(
+ std::numeric_limits<glm::u16>::min(),
+ (std::numeric_limits<glm::u16>::max() >> 1) + 0,
+ (std::numeric_limits<glm::u16>::max() >> 1) + 1,
+ std::numeric_limits<glm::u16>::max()));
+
+ Error += glm::epsilonEqual(A.x, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ Error += A.y < 0.5f ? 0 : 1;
+ Error += A.z > 0.5f ? 0 : 1;
+ Error += glm::epsilonEqual(A.w, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 const A = glm::compNormalize<float>(glm::i16vec4(
+ std::numeric_limits<glm::i16>::min(),
+ static_cast<glm::i16>(-1),
+ static_cast<glm::i16>(0),
+ std::numeric_limits<glm::i16>::max()));
+
+ Error += glm::epsilonEqual(A.x,-1.0f, glm::epsilon<float>()) ? 0 : 1;
+ Error += A.y < 0.0f ? 0 : 1;
+ Error += A.z > 0.0f ? 0 : 1;
+ Error += glm::epsilonEqual(A.w, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace compNormalize
+
+namespace compScale
+{
+ int run()
+ {
+ int Error(0);
+
+ {
+ glm::u8vec4 const A = glm::compScale<glm::u8>(glm::vec4(0.0f, 0.2f, 0.5f, 1.0f));
+
+ Error += A.x == std::numeric_limits<glm::u8>::min() ? 0 : 1;
+ Error += A.y < (std::numeric_limits<glm::u8>::max() >> 2) ? 0 : 1;
+ Error += A.z == 127 ? 0 : 1;
+ Error += A.w == 255 ? 0 : 1;
+ }
+
+ {
+ glm::i8vec4 const A = glm::compScale<glm::i8>(glm::vec4(0.0f,-1.0f, 0.5f, 1.0f));
+
+ Error += A.x == 0 ? 0 : 1;
+ Error += A.y == -128 ? 0 : 1;
+ Error += A.z == 63 ? 0 : 1;
+ Error += A.w == 127 ? 0 : 1;
+ }
+
+ {
+ glm::u16vec4 const A = glm::compScale<glm::u16>(glm::vec4(0.0f, 0.2f, 0.5f, 1.0f));
+
+ Error += A.x == std::numeric_limits<glm::u16>::min() ? 0 : 1;
+ Error += A.y < (std::numeric_limits<glm::u16>::max() >> 2) ? 0 : 1;
+ Error += A.z == 32767 ? 0 : 1;
+ Error += A.w == 65535 ? 0 : 1;
+ }
+
+ {
+ glm::i16vec4 const A = glm::compScale<glm::i16>(glm::vec4(0.0f,-1.0f, 0.5f, 1.0f));
+
+ Error += A.x == 0 ? 0 : 1;
+ Error += A.y == -32768 ? 0 : 1;
+ Error += A.z == 16383 ? 0 : 1;
+ Error += A.w == 32767 ? 0 : 1;
+ }
+
+ return Error;
+ }
+}// compScale
+
+int main()
+{
+ int Error(0);
+
+ Error += compNormalize::run();
+ Error += compScale::run();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_dual_quaternion.cpp b/3rdparty/glm/source/test/gtx/gtx_dual_quaternion.cpp
new file mode 100644
index 0000000..ceedc2c
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_dual_quaternion.cpp
@@ -0,0 +1,205 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#define GLM_FORCE_CTOR_INIT
+#include <glm/gtx/dual_quaternion.hpp>
+#include <glm/gtc/matrix_transform.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtx/euler_angles.hpp>
+#include <glm/vector_relational.hpp>
+#if GLM_HAS_TRIVIAL_QUERIES
+# include <type_traits>
+#endif
+
+int myrand()
+{
+ static int holdrand = 1;
+ return (((holdrand = holdrand * 214013L + 2531011L) >> 16) & 0x7fff);
+}
+
+float myfrand() // returns values from -1 to 1 inclusive
+{
+ return float(double(myrand()) / double( 0x7ffff )) * 2.0f - 1.0f;
+}
+
+int test_dquat_type()
+{
+ glm::dvec3 vA;
+ glm::dquat dqA, dqB;
+ glm::ddualquat C(dqA, dqB);
+ glm::ddualquat B(dqA);
+ glm::ddualquat D(dqA, vA);
+ return 0;
+}
+
+int test_scalars()
+{
+ float const Epsilon = 0.0001f;
+
+ int Error(0);
+
+ glm::quat src_q1 = glm::quat(1.0f,2.0f,3.0f,4.0f);
+ glm::quat src_q2 = glm::quat(5.0f,6.0f,7.0f,8.0f);
+ glm::dualquat src1(src_q1,src_q2);
+
+ {
+ glm::dualquat dst1 = src1 * 2.0f;
+ glm::dualquat dst2 = 2.0f * src1;
+ glm::dualquat dst3 = src1;
+ dst3 *= 2.0f;
+ glm::dualquat dstCmp(src_q1 * 2.0f,src_q2 * 2.0f);
+ Error += glm::all(glm::epsilonEqual(dst1.real,dstCmp.real, Epsilon)) && glm::all(glm::epsilonEqual(dst1.dual,dstCmp.dual, Epsilon)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(dst2.real,dstCmp.real, Epsilon)) && glm::all(glm::epsilonEqual(dst2.dual,dstCmp.dual, Epsilon)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(dst3.real,dstCmp.real, Epsilon)) && glm::all(glm::epsilonEqual(dst3.dual,dstCmp.dual, Epsilon)) ? 0 : 1;
+ }
+
+ {
+ glm::dualquat dst1 = src1 / 2.0f;
+ glm::dualquat dst2 = src1;
+ dst2 /= 2.0f;
+ glm::dualquat dstCmp(src_q1 / 2.0f,src_q2 / 2.0f);
+ Error += glm::all(glm::epsilonEqual(dst1.real,dstCmp.real, Epsilon)) && glm::all(glm::epsilonEqual(dst1.dual,dstCmp.dual, Epsilon)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(dst2.real,dstCmp.real, Epsilon)) && glm::all(glm::epsilonEqual(dst2.dual,dstCmp.dual, Epsilon)) ? 0 : 1;
+ }
+ return Error;
+}
+
+int test_inverse()
+{
+ int Error(0);
+
+ float const Epsilon = 0.0001f;
+
+ glm::dualquat dqid = glm::dual_quat_identity<float, glm::defaultp>();
+ glm::mat4x4 mid(1.0f);
+
+ for (int j = 0; j < 100; ++j)
+ {
+ glm::mat4x4 rot = glm::yawPitchRoll(myfrand() * 360.0f, myfrand() * 360.0f, myfrand() * 360.0f);
+ glm::vec3 vt = glm::vec3(myfrand() * 10.0f, myfrand() * 10.0f, myfrand() * 10.0f);
+
+ glm::mat4x4 m = glm::translate(mid, vt) * rot;
+
+ glm::quat qr = glm::quat_cast(m);
+
+ glm::dualquat dq(qr);
+
+ glm::dualquat invdq = glm::inverse(dq);
+
+ glm::dualquat r1 = invdq * dq;
+ glm::dualquat r2 = dq * invdq;
+
+ Error += glm::all(glm::epsilonEqual(r1.real, dqid.real, Epsilon)) && glm::all(glm::epsilonEqual(r1.dual, dqid.dual, Epsilon)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(r2.real, dqid.real, Epsilon)) && glm::all(glm::epsilonEqual(r2.dual, dqid.dual, Epsilon)) ? 0 : 1;
+
+ // testing commutative property
+ glm::dualquat r ( glm::quat( myfrand() * glm::pi<float>() * 2.0f, myfrand(), myfrand(), myfrand() ),
+ glm::vec3(myfrand() * 10.0f, myfrand() * 10.0f, myfrand() * 10.0f) );
+ glm::dualquat riq = (r * invdq) * dq;
+ glm::dualquat rqi = (r * dq) * invdq;
+
+ Error += glm::all(glm::epsilonEqual(riq.real, rqi.real, Epsilon)) && glm::all(glm::epsilonEqual(riq.dual, rqi.dual, Epsilon)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_mul()
+{
+ int Error(0);
+
+ float const Epsilon = 0.0001f;
+
+ glm::mat4x4 mid(1.0f);
+
+ for (int j = 0; j < 100; ++j)
+ {
+ // generate random rotations and translations and compare transformed by matrix and dualquats random points
+ glm::vec3 vt1 = glm::vec3(myfrand() * 10.0f, myfrand() * 10.0f, myfrand() * 10.0f);
+ glm::vec3 vt2 = glm::vec3(myfrand() * 10.0f, myfrand() * 10.0f, myfrand() * 10.0f);
+
+ glm::mat4x4 rot1 = glm::yawPitchRoll(myfrand() * 360.0f, myfrand() * 360.0f, myfrand() * 360.0f);
+ glm::mat4x4 rot2 = glm::yawPitchRoll(myfrand() * 360.0f, myfrand() * 360.0f, myfrand() * 360.0f);
+ glm::mat4x4 m1 = glm::translate(mid, vt1) * rot1;
+ glm::mat4x4 m2 = glm::translate(mid, vt2) * rot2;
+ glm::mat4x4 m3 = m2 * m1;
+ glm::mat4x4 m4 = m1 * m2;
+
+ glm::quat qrot1 = glm::quat_cast(rot1);
+ glm::quat qrot2 = glm::quat_cast(rot2);
+
+ glm::dualquat dq1 = glm::dualquat(qrot1,vt1);
+ glm::dualquat dq2 = glm::dualquat(qrot2,vt2);
+ glm::dualquat dq3 = dq2 * dq1;
+ glm::dualquat dq4 = dq1 * dq2;
+
+ for (int i = 0; i < 100; ++i)
+ {
+ glm::vec4 src_pt = glm::vec4(myfrand() * 4.0f, myfrand() * 5.0f, myfrand() * 3.0f,1.0f);
+ // test both multiplication orders
+ glm::vec4 dst_pt_m3 = m3 * src_pt;
+ glm::vec4 dst_pt_dq3 = dq3 * src_pt;
+
+ glm::vec4 dst_pt_m3_i = glm::inverse(m3) * src_pt;
+ glm::vec4 dst_pt_dq3_i = src_pt * dq3;
+
+ glm::vec4 dst_pt_m4 = m4 * src_pt;
+ glm::vec4 dst_pt_dq4 = dq4 * src_pt;
+
+ glm::vec4 dst_pt_m4_i = glm::inverse(m4) * src_pt;
+ glm::vec4 dst_pt_dq4_i = src_pt * dq4;
+
+ Error += glm::all(glm::epsilonEqual(dst_pt_m3, dst_pt_dq3, Epsilon)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(dst_pt_m4, dst_pt_dq4, Epsilon)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(dst_pt_m3_i, dst_pt_dq3_i, Epsilon)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(dst_pt_m4_i, dst_pt_dq4_i, Epsilon)) ? 0 : 1;
+ }
+ }
+
+ return Error;
+}
+
+int test_dual_quat_ctr()
+{
+ int Error(0);
+
+# if GLM_HAS_TRIVIAL_QUERIES
+ // Error += std::is_trivially_default_constructible<glm::dualquat>::value ? 0 : 1;
+ // Error += std::is_trivially_default_constructible<glm::ddualquat>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::dualquat>::value ? 0 : 1;
+ // Error += std::is_trivially_copy_assignable<glm::ddualquat>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::dualquat>::value ? 0 : 1;
+ Error += std::is_trivially_copyable<glm::ddualquat>::value ? 0 : 1;
+
+ Error += std::is_copy_constructible<glm::dualquat>::value ? 0 : 1;
+ Error += std::is_copy_constructible<glm::ddualquat>::value ? 0 : 1;
+# endif
+
+ return Error;
+}
+
+int test_size()
+{
+ int Error = 0;
+
+ Error += 32 == sizeof(glm::dualquat) ? 0 : 1;
+ Error += 64 == sizeof(glm::ddualquat) ? 0 : 1;
+ Error += glm::dualquat().length() == 2 ? 0 : 1;
+ Error += glm::ddualquat().length() == 2 ? 0 : 1;
+ Error += glm::dualquat::length() == 2 ? 0 : 1;
+ Error += glm::ddualquat::length() == 2 ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_dual_quat_ctr();
+ Error += test_dquat_type();
+ Error += test_scalars();
+ Error += test_inverse();
+ Error += test_mul();
+ Error += test_size();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_easing.cpp b/3rdparty/glm/source/test/gtx/gtx_easing.cpp
new file mode 100644
index 0000000..0e98cd5
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_easing.cpp
@@ -0,0 +1,65 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/glm.hpp>
+#include <glm/gtx/quaternion.hpp>
+#include <glm/gtx/easing.hpp>
+
+namespace
+{
+
+ template<typename T>
+ void _test_easing()
+ {
+ T a = static_cast<T>(0.5);
+ T r;
+
+ r = glm::linearInterpolation(a);
+
+ r = glm::quadraticEaseIn(a);
+ r = glm::quadraticEaseOut(a);
+ r = glm::quadraticEaseInOut(a);
+
+ r = glm::cubicEaseIn(a);
+ r = glm::cubicEaseOut(a);
+ r = glm::cubicEaseInOut(a);
+
+ r = glm::quarticEaseIn(a);
+ r = glm::quarticEaseOut(a);
+ r = glm::quinticEaseInOut(a);
+
+ r = glm::sineEaseIn(a);
+ r = glm::sineEaseOut(a);
+ r = glm::sineEaseInOut(a);
+
+ r = glm::circularEaseIn(a);
+ r = glm::circularEaseOut(a);
+ r = glm::circularEaseInOut(a);
+
+ r = glm::exponentialEaseIn(a);
+ r = glm::exponentialEaseOut(a);
+ r = glm::exponentialEaseInOut(a);
+
+ r = glm::elasticEaseIn(a);
+ r = glm::elasticEaseOut(a);
+ r = glm::elasticEaseInOut(a);
+
+ r = glm::backEaseIn(a);
+ r = glm::backEaseOut(a);
+ r = glm::backEaseInOut(a);
+
+ r = glm::bounceEaseIn(a);
+ r = glm::bounceEaseOut(a);
+ r = glm::bounceEaseInOut(a);
+ }
+
+}
+
+int main()
+{
+ int Error = 0;
+
+ _test_easing<float>();
+ _test_easing<double>();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_euler_angle.cpp b/3rdparty/glm/source/test/gtx/gtx_euler_angle.cpp
new file mode 100644
index 0000000..348f581
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_euler_angle.cpp
@@ -0,0 +1,539 @@
+// Code sample from Filippo Ramaciotti
+
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/matrix_transform.hpp>
+#include <glm/gtx/matrix_cross_product.hpp>
+#include <glm/gtx/matrix_operation.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtx/string_cast.hpp>
+#include <glm/gtx/euler_angles.hpp>
+#include <cstdio>
+#include <vector>
+#include <utility>
+
+namespace test_eulerAngleX
+{
+ int test()
+ {
+ int Error = 0;
+
+ float const Angle(glm::pi<float>() * 0.5f);
+ glm::vec3 const X(1.0f, 0.0f, 0.0f);
+
+ glm::vec4 const Y(0.0f, 1.0f, 0.0f, 1.0f);
+ glm::vec4 const Y1 = glm::rotate(glm::mat4(1.0f), Angle, X) * Y;
+ glm::vec4 const Y2 = glm::eulerAngleX(Angle) * Y;
+ glm::vec4 const Y3 = glm::eulerAngleXY(Angle, 0.0f) * Y;
+ glm::vec4 const Y4 = glm::eulerAngleYX(0.0f, Angle) * Y;
+ glm::vec4 const Y5 = glm::eulerAngleXZ(Angle, 0.0f) * Y;
+ glm::vec4 const Y6 = glm::eulerAngleZX(0.0f, Angle) * Y;
+ glm::vec4 const Y7 = glm::eulerAngleYXZ(0.0f, Angle, 0.0f) * Y;
+ Error += glm::all(glm::epsilonEqual(Y1, Y2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Y1, Y3, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Y1, Y4, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Y1, Y5, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Y1, Y6, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Y1, Y7, 0.00001f)) ? 0 : 1;
+
+ glm::vec4 const Z(0.0f, 0.0f, 1.0f, 1.0f);
+ glm::vec4 const Z1 = glm::rotate(glm::mat4(1.0f), Angle, X) * Z;
+ glm::vec4 const Z2 = glm::eulerAngleX(Angle) * Z;
+ glm::vec4 const Z3 = glm::eulerAngleXY(Angle, 0.0f) * Z;
+ glm::vec4 const Z4 = glm::eulerAngleYX(0.0f, Angle) * Z;
+ glm::vec4 const Z5 = glm::eulerAngleXZ(Angle, 0.0f) * Z;
+ glm::vec4 const Z6 = glm::eulerAngleZX(0.0f, Angle) * Z;
+ glm::vec4 const Z7 = glm::eulerAngleYXZ(0.0f, Angle, 0.0f) * Z;
+ Error += glm::all(glm::epsilonEqual(Z1, Z2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z3, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z4, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z5, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z6, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z7, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleX
+
+namespace test_eulerAngleY
+{
+ int test()
+ {
+ int Error = 0;
+
+ float const Angle(glm::pi<float>() * 0.5f);
+ glm::vec3 const Y(0.0f, 1.0f, 0.0f);
+
+ glm::vec4 const X(1.0f, 0.0f, 0.0f, 1.0f);
+ glm::vec4 const X1 = glm::rotate(glm::mat4(1.0f), Angle, Y) * X;
+ glm::vec4 const X2 = glm::eulerAngleY(Angle) * X;
+ glm::vec4 const X3 = glm::eulerAngleYX(Angle, 0.0f) * X;
+ glm::vec4 const X4 = glm::eulerAngleXY(0.0f, Angle) * X;
+ glm::vec4 const X5 = glm::eulerAngleYZ(Angle, 0.0f) * X;
+ glm::vec4 const X6 = glm::eulerAngleZY(0.0f, Angle) * X;
+ glm::vec4 const X7 = glm::eulerAngleYXZ(Angle, 0.0f, 0.0f) * X;
+ Error += glm::all(glm::epsilonEqual(X1, X2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X3, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X4, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X5, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X6, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X7, 0.00001f)) ? 0 : 1;
+
+ glm::vec4 const Z(0.0f, 0.0f, 1.0f, 1.0f);
+ glm::vec4 const Z1 = glm::eulerAngleY(Angle) * Z;
+ glm::vec4 const Z2 = glm::rotate(glm::mat4(1.0f), Angle, Y) * Z;
+ glm::vec4 const Z3 = glm::eulerAngleYX(Angle, 0.0f) * Z;
+ glm::vec4 const Z4 = glm::eulerAngleXY(0.0f, Angle) * Z;
+ glm::vec4 const Z5 = glm::eulerAngleYZ(Angle, 0.0f) * Z;
+ glm::vec4 const Z6 = glm::eulerAngleZY(0.0f, Angle) * Z;
+ glm::vec4 const Z7 = glm::eulerAngleYXZ(Angle, 0.0f, 0.0f) * Z;
+ Error += glm::all(glm::epsilonEqual(Z1, Z2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z3, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z4, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z5, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z6, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z7, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleY
+
+namespace test_eulerAngleZ
+{
+ int test()
+ {
+ int Error = 0;
+
+ float const Angle(glm::pi<float>() * 0.5f);
+ glm::vec3 const Z(0.0f, 0.0f, 1.0f);
+
+ glm::vec4 const X(1.0f, 0.0f, 0.0f, 1.0f);
+ glm::vec4 const X1 = glm::rotate(glm::mat4(1.0f), Angle, Z) * X;
+ glm::vec4 const X2 = glm::eulerAngleZ(Angle) * X;
+ glm::vec4 const X3 = glm::eulerAngleZX(Angle, 0.0f) * X;
+ glm::vec4 const X4 = glm::eulerAngleXZ(0.0f, Angle) * X;
+ glm::vec4 const X5 = glm::eulerAngleZY(Angle, 0.0f) * X;
+ glm::vec4 const X6 = glm::eulerAngleYZ(0.0f, Angle) * X;
+ glm::vec4 const X7 = glm::eulerAngleYXZ(0.0f, 0.0f, Angle) * X;
+ Error += glm::all(glm::epsilonEqual(X1, X2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X3, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X4, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X5, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X6, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(X1, X7, 0.00001f)) ? 0 : 1;
+
+ glm::vec4 const Y(1.0f, 0.0f, 0.0f, 1.0f);
+ glm::vec4 const Z1 = glm::rotate(glm::mat4(1.0f), Angle, Z) * Y;
+ glm::vec4 const Z2 = glm::eulerAngleZ(Angle) * Y;
+ glm::vec4 const Z3 = glm::eulerAngleZX(Angle, 0.0f) * Y;
+ glm::vec4 const Z4 = glm::eulerAngleXZ(0.0f, Angle) * Y;
+ glm::vec4 const Z5 = glm::eulerAngleZY(Angle, 0.0f) * Y;
+ glm::vec4 const Z6 = glm::eulerAngleYZ(0.0f, Angle) * Y;
+ glm::vec4 const Z7 = glm::eulerAngleYXZ(0.0f, 0.0f, Angle) * Y;
+ Error += glm::all(glm::epsilonEqual(Z1, Z2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z3, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z4, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z5, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z6, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(Z1, Z7, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleZ
+
+namespace test_derivedEulerAngles
+{
+ bool epsilonEqual(glm::mat4 const& mat1, glm::mat4 const& mat2, glm::mat4::value_type const& epsilon)
+ {
+ return glm::all(glm::epsilonEqual(mat1[0], mat2[0], epsilon)) ?
+ (
+ glm::all(glm::epsilonEqual(mat1[1], mat2[1], epsilon)) ?
+ (
+ glm::all(glm::epsilonEqual(mat1[2], mat2[2], epsilon)) ?
+ (
+ glm::all(glm::epsilonEqual(mat1[3], mat2[3], epsilon)) ? true : false
+ ) : false
+ ) : false
+ ) : false;
+ }
+
+ template<typename RotationFunc, typename TestDerivedFunc>
+ int test(RotationFunc rotationFunc, TestDerivedFunc testDerivedFunc, const glm::vec3& basis)
+ {
+ int Error = 0;
+
+ typedef glm::vec3::value_type value;
+ value const zeroAngle(0.0f);
+ value const Angle(glm::pi<float>() * 0.75f);
+ value const negativeAngle(-Angle);
+ value const zeroAngleVelocity(0.0f);
+ value const AngleVelocity(glm::pi<float>() * 0.27f);
+ value const negativeAngleVelocity(-AngleVelocity);
+
+ typedef std::pair<value,value> AngleAndAngleVelocity;
+ std::vector<AngleAndAngleVelocity> testPairs;
+ testPairs.push_back(AngleAndAngleVelocity(zeroAngle, zeroAngleVelocity));
+ testPairs.push_back(AngleAndAngleVelocity(zeroAngle, AngleVelocity));
+ testPairs.push_back(AngleAndAngleVelocity(zeroAngle, negativeAngleVelocity));
+ testPairs.push_back(AngleAndAngleVelocity(Angle, zeroAngleVelocity));
+ testPairs.push_back(AngleAndAngleVelocity(Angle, AngleVelocity));
+ testPairs.push_back(AngleAndAngleVelocity(Angle, negativeAngleVelocity));
+ testPairs.push_back(AngleAndAngleVelocity(negativeAngle, zeroAngleVelocity));
+ testPairs.push_back(AngleAndAngleVelocity(negativeAngle, AngleVelocity));
+ testPairs.push_back(AngleAndAngleVelocity(negativeAngle, negativeAngleVelocity));
+
+ for (size_t i = 0, size = testPairs.size(); i < size; ++i)
+ {
+ AngleAndAngleVelocity const& pair = testPairs.at(i);
+
+ glm::mat4 const W = glm::matrixCross4(basis * pair.second);
+ glm::mat4 const rotMt = glm::transpose(rotationFunc(pair.first));
+ glm::mat4 const derivedRotM = testDerivedFunc(pair.first, pair.second);
+
+ Error += epsilonEqual(W, derivedRotM * rotMt, 0.00001f) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace test_derivedEulerAngles
+
+namespace test_eulerAngleXY
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::vec4 const V(1.0f);
+
+ float const AngleX(glm::pi<float>() * 0.5f);
+ float const AngleY(glm::pi<float>() * 0.25f);
+
+ glm::vec3 const axisX(1.0f, 0.0f, 0.0f);
+ glm::vec3 const axisY(0.0f, 1.0f, 0.0f);
+
+ glm::vec4 const V1 = (glm::rotate(glm::mat4(1.0f), AngleX, axisX) * glm::rotate(glm::mat4(1.0f), AngleY, axisY)) * V;
+ glm::vec4 const V2 = glm::eulerAngleXY(AngleX, AngleY) * V;
+ glm::vec4 const V3 = glm::eulerAngleX(AngleX) * glm::eulerAngleY(AngleY) * V;
+ Error += glm::all(glm::epsilonEqual(V1, V2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(V1, V3, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleXY
+
+namespace test_eulerAngleYX
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::vec4 const V(1.0f);
+
+ float const AngleX(glm::pi<float>() * 0.5f);
+ float const AngleY(glm::pi<float>() * 0.25f);
+
+ glm::vec3 const axisX(1.0f, 0.0f, 0.0f);
+ glm::vec3 const axisY(0.0f, 1.0f, 0.0f);
+
+ glm::vec4 const V1 = (glm::rotate(glm::mat4(1.0f), AngleY, axisY) * glm::rotate(glm::mat4(1.0f), AngleX, axisX)) * V;
+ glm::vec4 const V2 = glm::eulerAngleYX(AngleY, AngleX) * V;
+ glm::vec4 const V3 = glm::eulerAngleY(AngleY) * glm::eulerAngleX(AngleX) * V;
+ Error += glm::all(glm::epsilonEqual(V1, V2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(V1, V3, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleYX
+
+namespace test_eulerAngleXZ
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::vec4 const V(1.0f);
+
+ float const AngleX(glm::pi<float>() * 0.5f);
+ float const AngleZ(glm::pi<float>() * 0.25f);
+
+ glm::vec3 const axisX(1.0f, 0.0f, 0.0f);
+ glm::vec3 const axisZ(0.0f, 0.0f, 1.0f);
+
+ glm::vec4 const V1 = (glm::rotate(glm::mat4(1.0f), AngleX, axisX) * glm::rotate(glm::mat4(1.0f), AngleZ, axisZ)) * V;
+ glm::vec4 const V2 = glm::eulerAngleXZ(AngleX, AngleZ) * V;
+ glm::vec4 const V3 = glm::eulerAngleX(AngleX) * glm::eulerAngleZ(AngleZ) * V;
+ Error += glm::all(glm::epsilonEqual(V1, V2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(V1, V3, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleXZ
+
+namespace test_eulerAngleZX
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::vec4 const V(1.0f);
+
+ float const AngleX(glm::pi<float>() * 0.5f);
+ float const AngleZ(glm::pi<float>() * 0.25f);
+
+ glm::vec3 const axisX(1.0f, 0.0f, 0.0f);
+ glm::vec3 const axisZ(0.0f, 0.0f, 1.0f);
+
+ glm::vec4 const V1 = (glm::rotate(glm::mat4(1.0f), AngleZ, axisZ) * glm::rotate(glm::mat4(1.0f), AngleX, axisX)) * V;
+ glm::vec4 const V2 = glm::eulerAngleZX(AngleZ, AngleX) * V;
+ glm::vec4 const V3 = glm::eulerAngleZ(AngleZ) * glm::eulerAngleX(AngleX) * V;
+ Error += glm::all(glm::epsilonEqual(V1, V2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(V1, V3, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleZX
+
+namespace test_eulerAngleYZ
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::vec4 const V(1.0f);
+
+ float const AngleY(glm::pi<float>() * 0.5f);
+ float const AngleZ(glm::pi<float>() * 0.25f);
+
+ glm::vec3 const axisX(1.0f, 0.0f, 0.0f);
+ glm::vec3 const axisY(0.0f, 1.0f, 0.0f);
+ glm::vec3 const axisZ(0.0f, 0.0f, 1.0f);
+
+ glm::vec4 const V1 = (glm::rotate(glm::mat4(1.0f), AngleY, axisY) * glm::rotate(glm::mat4(1.0f), AngleZ, axisZ)) * V;
+ glm::vec4 const V2 = glm::eulerAngleYZ(AngleY, AngleZ) * V;
+ glm::vec4 const V3 = glm::eulerAngleY(AngleY) * glm::eulerAngleZ(AngleZ) * V;
+ Error += glm::all(glm::epsilonEqual(V1, V2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(V1, V3, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleYZ
+
+namespace test_eulerAngleZY
+{
+ int test()
+ {
+ int Error = 0;
+
+ glm::vec4 const V(1.0f);
+
+ float const AngleY(glm::pi<float>() * 0.5f);
+ float const AngleZ(glm::pi<float>() * 0.25f);
+
+ glm::vec3 const axisX(1.0f, 0.0f, 0.0f);
+ glm::vec3 const axisY(0.0f, 1.0f, 0.0f);
+ glm::vec3 const axisZ(0.0f, 0.0f, 1.0f);
+
+ glm::vec4 const V1 = (glm::rotate(glm::mat4(1.0f), AngleZ, axisZ) * glm::rotate(glm::mat4(1.0f), AngleY, axisY)) * V;
+ glm::vec4 const V2 = glm::eulerAngleZY(AngleZ, AngleY) * V;
+ glm::vec4 const V3 = glm::eulerAngleZ(AngleZ) * glm::eulerAngleY(AngleY) * V;
+ Error += glm::all(glm::epsilonEqual(V1, V2, 0.00001f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(V1, V3, 0.00001f)) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace test_eulerAngleZY
+
+namespace test_eulerAngleYXZ
+{
+ int test()
+ {
+ glm::f32 first = 1.046f;
+ glm::f32 second = 0.52f;
+ glm::f32 third = -0.785f;
+
+ glm::fmat4 rotationEuler = glm::eulerAngleYXZ(first, second, third);
+
+ glm::fmat4 rotationInvertedY = glm::eulerAngleY(-1.f*first) * glm::eulerAngleX(second) * glm::eulerAngleZ(third);
+ glm::fmat4 rotationDumb = glm::fmat4();
+ rotationDumb = glm::rotate(rotationDumb, first, glm::fvec3(0,1,0));
+ rotationDumb = glm::rotate(rotationDumb, second, glm::fvec3(1,0,0));
+ rotationDumb = glm::rotate(rotationDumb, third, glm::fvec3(0,0,1));
+
+ std::printf("%s\n", glm::to_string(glm::fmat3(rotationEuler)).c_str());
+ std::printf("%s\n", glm::to_string(glm::fmat3(rotationDumb)).c_str());
+ std::printf("%s\n", glm::to_string(glm::fmat3(rotationInvertedY)).c_str());
+
+ std::printf("\nRESIDUAL\n");
+ std::printf("%s\n", glm::to_string(glm::fmat3(rotationEuler-(rotationDumb))).c_str());
+ std::printf("%s\n", glm::to_string(glm::fmat3(rotationEuler-(rotationInvertedY))).c_str());
+
+ return 0;
+ }
+}//namespace eulerAngleYXZ
+
+namespace test_eulerAngles
+{
+ template<typename TestRotationFunc>
+ int test(TestRotationFunc testRotationFunc, glm::vec3 const& I, glm::vec3 const& J, glm::vec3 const& K)
+ {
+ int Error = 0;
+
+ typedef glm::mat4::value_type value;
+ value const minAngle(-glm::pi<value>());
+ value const maxAngle(glm::pi<value>());
+ value const maxAngleWithDelta(maxAngle - 0.0000001f);
+ value const minMidAngle(-glm::pi<value>() * 0.5f);
+ value const maxMidAngle(glm::pi<value>() * 0.5f);
+
+ std::vector<glm::vec3> testEulerAngles;
+ testEulerAngles.push_back(glm::vec3(1.046f, 0.52f, -0.785f));
+ testEulerAngles.push_back(glm::vec3(minAngle, minMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, minMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, minMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(minAngle, maxMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, maxMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, maxMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(maxAngle, minMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, minMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, minMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, minMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, minMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, maxMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, maxMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, maxMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(minAngle, 0.0f, minAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, 0.0f, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxAngle, maxAngle));
+
+ for (size_t i = 0, size = testEulerAngles.size(); i < size; ++i)
+ {
+ glm::vec3 const& angles = testEulerAngles.at(i);
+ glm::mat4 const rotationEuler = testRotationFunc(angles.x, angles.y, angles.z);
+
+ glm::mat4 rotationDumb = glm::diagonal4x4(glm::mat4::col_type(1.0f));
+ rotationDumb = glm::rotate(rotationDumb, angles.x, I);
+ rotationDumb = glm::rotate(rotationDumb, angles.y, J);
+ rotationDumb = glm::rotate(rotationDumb, angles.z, K);
+
+ glm::vec4 const V(1.0f,1.0f,1.0f,1.0f);
+ glm::vec4 const V1 = rotationEuler * V;
+ glm::vec4 const V2 = rotationDumb * V;
+
+ Error += glm::all(glm::epsilonEqual(V1, V2, 0.00001f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace test_extractsEulerAngles
+
+namespace test_extractsEulerAngles
+{
+ template<typename RotationFunc, typename TestExtractionFunc>
+ int test(RotationFunc rotationFunc, TestExtractionFunc testExtractionFunc)
+ {
+ int Error = 0;
+
+ typedef glm::mat4::value_type value;
+ value const minAngle(-glm::pi<value>());
+ value const maxAngle(glm::pi<value>());
+ value const maxAngleWithDelta(maxAngle - 0.0000001f);
+ value const minMidAngle(-glm::pi<value>() * 0.5f);
+ value const maxMidAngle(glm::pi<value>() * 0.5f);
+
+ std::vector<glm::vec3> testEulerAngles;
+ testEulerAngles.push_back(glm::vec3(1.046f, 0.52f, -0.785f));
+ testEulerAngles.push_back(glm::vec3(minAngle, minMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, minMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, minMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(minAngle, maxMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, maxMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, maxMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(maxAngle, minMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, minMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, minMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, minMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, minMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, maxMidAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, maxMidAngle, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngleWithDelta, maxMidAngle, maxAngleWithDelta));
+ testEulerAngles.push_back(glm::vec3(minAngle, 0.0f, minAngle));
+ testEulerAngles.push_back(glm::vec3(minAngle, 0.0f, maxAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxAngle, minAngle));
+ testEulerAngles.push_back(glm::vec3(maxAngle, maxAngle, maxAngle));
+
+ for (size_t i = 0, size = testEulerAngles.size(); i < size; ++i)
+ {
+ glm::vec3 const& angles = testEulerAngles.at(i);
+ glm::mat4 const rotation = rotationFunc(angles.x, angles.y, angles.z);
+
+ glm::vec3 extractedEulerAngles(0.0f);
+ testExtractionFunc(rotation, extractedEulerAngles.x, extractedEulerAngles.y, extractedEulerAngles.z);
+ glm::mat4 const extractedRotation = rotationFunc(extractedEulerAngles.x, extractedEulerAngles.y, extractedEulerAngles.z);
+
+ glm::vec4 const V(1.0f,1.0f,1.0f,1.0f);
+ glm::vec4 const V1 = rotation * V;
+ glm::vec4 const V2 = extractedRotation * V;
+
+ Error += glm::all(glm::epsilonEqual(V1, V2, 0.00001f)) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace test_extractsEulerAngles
+
+int main()
+{
+ int Error = 0;
+
+ typedef glm::mat4::value_type value;
+ glm::vec3 const X(1.0f, 0.0f, 0.0f);
+ glm::vec3 const Y(0.0f, 1.0f, 0.0f);
+ glm::vec3 const Z(0.0f, 0.0f, 1.0f);
+
+ Error += test_eulerAngleX::test();
+ Error += test_eulerAngleY::test();
+ Error += test_eulerAngleZ::test();
+
+ Error += test_derivedEulerAngles::test(glm::eulerAngleX<value>, glm::derivedEulerAngleX<value>, X);
+ Error += test_derivedEulerAngles::test(glm::eulerAngleY<value>, glm::derivedEulerAngleY<value>, Y);
+ Error += test_derivedEulerAngles::test(glm::eulerAngleZ<value>, glm::derivedEulerAngleZ<value>, Z);
+
+ Error += test_eulerAngleXY::test();
+ Error += test_eulerAngleYX::test();
+ Error += test_eulerAngleXZ::test();
+ Error += test_eulerAngleZX::test();
+ Error += test_eulerAngleYZ::test();
+ Error += test_eulerAngleZY::test();
+ Error += test_eulerAngleYXZ::test();
+
+ Error += test_eulerAngles::test(glm::eulerAngleXZX<value>, X, Z, X);
+ Error += test_eulerAngles::test(glm::eulerAngleXYX<value>, X, Y, X);
+ Error += test_eulerAngles::test(glm::eulerAngleYXY<value>, Y, X, Y);
+ Error += test_eulerAngles::test(glm::eulerAngleYZY<value>, Y, Z, Y);
+ Error += test_eulerAngles::test(glm::eulerAngleZYZ<value>, Z, Y, Z);
+ Error += test_eulerAngles::test(glm::eulerAngleZXZ<value>, Z, X, Z);
+ Error += test_eulerAngles::test(glm::eulerAngleXZY<value>, X, Z, Y);
+ Error += test_eulerAngles::test(glm::eulerAngleYZX<value>, Y, Z, X);
+ Error += test_eulerAngles::test(glm::eulerAngleZYX<value>, Z, Y, X);
+ Error += test_eulerAngles::test(glm::eulerAngleZXY<value>, Z, X, Y);
+
+ Error += test_extractsEulerAngles::test(glm::eulerAngleYXZ<value>, glm::extractEulerAngleYXZ<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleXZX<value>, glm::extractEulerAngleXZX<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleXYX<value>, glm::extractEulerAngleXYX<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleYXY<value>, glm::extractEulerAngleYXY<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleYZY<value>, glm::extractEulerAngleYZY<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleZYZ<value>, glm::extractEulerAngleZYZ<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleZXZ<value>, glm::extractEulerAngleZXZ<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleXZY<value>, glm::extractEulerAngleXZY<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleYZX<value>, glm::extractEulerAngleYZX<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleZYX<value>, glm::extractEulerAngleZYX<value>);
+ Error += test_extractsEulerAngles::test(glm::eulerAngleZXY<value>, glm::extractEulerAngleZXY<value>);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_extend.cpp b/3rdparty/glm/source/test/gtx/gtx_extend.cpp
new file mode 100644
index 0000000..0c37df5
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_extend.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/extend.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_extended_min_max.cpp b/3rdparty/glm/source/test/gtx/gtx_extended_min_max.cpp
new file mode 100644
index 0000000..3d20f10
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_extended_min_max.cpp
@@ -0,0 +1,101 @@
+#define GLM_ENABLE_EXPERIMENTAL
+
+#include <glm/gtx/extended_min_max.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+
+// This file has divisions by zero to test isnan
+#if GLM_COMPILER & GLM_COMPILER_VC
+# pragma warning(disable : 4723)
+#endif
+
+namespace fmin_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ float Zero_f = 0.0f;
+ glm::vec1 A0 = glm::fmin(glm::vec1(1), glm::vec1(Zero_f / 0.0f));
+ Error += glm::equal(A0.x, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+
+ glm::vec1 A1 = glm::fmin(glm::vec1(Zero_f / 0.0f), glm::vec1(1));
+ Error += glm::equal(A1.x, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+
+ glm::vec2 B0 = glm::fmin(glm::vec2(1), glm::vec2(1));
+ glm::vec2 B1 = glm::fmin(glm::vec2(1), 1.0f);
+ bool B2 = glm::all(glm::equal(B0, B1, glm::epsilon<float>()));
+ Error += B2 ? 0 : 1;
+
+ glm::vec3 C0 = glm::fmin(glm::vec3(1), glm::vec3(1));
+ glm::vec3 C1 = glm::fmin(glm::vec3(1), 1.0f);
+ bool C2 = glm::all(glm::equal(C0, C1, glm::epsilon<float>()));
+ Error += C2 ? 0 : 1;
+
+ glm::vec4 D0 = glm::fmin(glm::vec4(1), glm::vec4(1));
+ glm::vec4 D1 = glm::fmin(glm::vec4(1), 1.0f);
+ bool D2 = glm::all(glm::equal(D0, D1, glm::epsilon<float>()));
+ Error += D2 ? 0 : 1;
+
+ return Error;
+ }
+}//namespace fmin_
+
+namespace fmax_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ float Zero_f = 0.0f;
+ glm::vec1 A0 = glm::fmax(glm::vec1(1), glm::vec1(Zero_f / 0.0f));
+ Error += glm::equal(A0.x, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+
+ glm::vec1 A1 = glm::fmax(glm::vec1(Zero_f / 0.0f), glm::vec1(1));
+ Error += glm::equal(A0.x, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+
+ glm::vec2 B0 = glm::fmax(glm::vec2(1), glm::vec2(1));
+ glm::vec2 B1 = glm::fmax(glm::vec2(1), 1.0f);
+ bool B2 = glm::all(glm::equal(B0, B1, glm::epsilon<float>()));
+ Error += B2 ? 0 : 1;
+
+ glm::vec3 C0 = glm::fmax(glm::vec3(1), glm::vec3(1));
+ glm::vec3 C1 = glm::fmax(glm::vec3(1), 1.0f);
+ bool C2 = glm::all(glm::equal(C0, C1, glm::epsilon<float>()));
+ Error += C2 ? 0 : 1;
+
+ glm::vec4 D0 = glm::fmax(glm::vec4(1), glm::vec4(1));
+ glm::vec4 D1 = glm::fmax(glm::vec4(1), 1.0f);
+ bool D2 = glm::all(glm::equal(D0, D1, glm::epsilon<float>()));
+ Error += D2 ? 0 : 1;
+
+ return Error;
+ }
+}//namespace fmax_
+
+namespace fclamp_
+{
+ static int test()
+ {
+ int Error = 0;
+
+ float Zero_f = 0.0f;
+ glm::vec1 A0 = glm::fclamp(glm::vec1(1), glm::vec1(Zero_f / 0.0f), glm::vec1(2.0f));
+ Error += glm::equal(A0.x, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace fclamp_
+
+int main()
+{
+ int Error = 0;
+
+ Error += fmin_::test();
+ Error += fmax_::test();
+ Error += fclamp_::test();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_extented_min_max.cpp b/3rdparty/glm/source/test/gtx/gtx_extented_min_max.cpp
new file mode 100644
index 0000000..c8c7847
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_extented_min_max.cpp
@@ -0,0 +1,39 @@
+///////////////////////////////////////////////////////////////////////////////////
+/// OpenGL Mathematics (glm.g-truc.net)
+///
+/// Copyright (c) 2005 - 2015 G-Truc Creation (www.g-truc.net)
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to deal
+/// in the Software without restriction, including without limitation the rights
+/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+/// copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in
+/// all copies or substantial portions of the Software.
+///
+/// Restrictions:
+/// By making use of the Software for military purposes, you choose to make
+/// a Bunny unhappy.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+/// THE SOFTWARE.
+///
+/// @file test/gtx/gtx_extented_min_max.cpp
+/// @date 2013-10-25 / 2014-11-25
+/// @author Christophe Riccio
+///////////////////////////////////////////////////////////////////////////////////
+
+#include <glm/gtx/extended_min_max.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_exterior_product.cpp b/3rdparty/glm/source/test/gtx/gtx_exterior_product.cpp
new file mode 100644
index 0000000..a02c983
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_exterior_product.cpp
@@ -0,0 +1,14 @@
+#include <glm/gtx/exterior_product.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/vec2.hpp>
+
+int main()
+{
+ int Error = 0;
+
+ float const f = glm::cross(glm::vec2(1.0f, 1.0f), glm::vec2(1.0f, 1.0f));
+ Error += glm::epsilonEqual(f, 0.0f, 0.001f) ? 0 : 1;
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_fast_exponential.cpp b/3rdparty/glm/source/test/gtx/gtx_fast_exponential.cpp
new file mode 100644
index 0000000..341e26e
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_fast_exponential.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/fast_exponential.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_fast_square_root.cpp b/3rdparty/glm/source/test/gtx/gtx_fast_square_root.cpp
new file mode 100644
index 0000000..80d7fe4
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_fast_square_root.cpp
@@ -0,0 +1,45 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/fast_square_root.hpp>
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/vector_relational.hpp>
+
+int test_fastInverseSqrt()
+{
+ int Error = 0;
+
+ Error += glm::epsilonEqual(glm::fastInverseSqrt(1.0f), 1.0f, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(glm::fastInverseSqrt(1.0), 1.0, 0.01) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(glm::fastInverseSqrt(glm::vec2(1.0f)), glm::vec2(1.0f), 0.01f)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(glm::fastInverseSqrt(glm::dvec3(1.0)), glm::dvec3(1.0), 0.01)) ? 0 : 1;
+ Error += glm::all(glm::epsilonEqual(glm::fastInverseSqrt(glm::dvec4(1.0)), glm::dvec4(1.0), 0.01)) ? 0 : 1;
+
+ return Error;
+}
+
+int test_fastDistance()
+{
+ int Error = 0;
+
+ float const A = glm::fastDistance(0.0f, 1.0f);
+ float const B = glm::fastDistance(glm::vec2(0.0f), glm::vec2(1.0f, 0.0f));
+ float const C = glm::fastDistance(glm::vec3(0.0f), glm::vec3(1.0f, 0.0f, 0.0f));
+ float const D = glm::fastDistance(glm::vec4(0.0f), glm::vec4(1.0f, 0.0f, 0.0f, 0.0f));
+
+ Error += glm::epsilonEqual(A, 1.0f, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(B, 1.0f, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(C, 1.0f, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(D, 1.0f, 0.01f) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_fastInverseSqrt();
+ Error += test_fastDistance();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_fast_trigonometry.cpp b/3rdparty/glm/source/test/gtx/gtx_fast_trigonometry.cpp
new file mode 100644
index 0000000..8bf86ba
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_fast_trigonometry.cpp
@@ -0,0 +1,564 @@
+#include <glm/ext/scalar_ulp.hpp>
+
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtx/fast_trigonometry.hpp>
+#include <glm/gtx/integer.hpp>
+#include <glm/gtx/common.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/trigonometric.hpp>
+#include <cmath>
+#include <ctime>
+#include <cstdio>
+#include <vector>
+
+namespace fastCos
+{
+ int perf(bool NextFloat)
+ {
+ const float begin = -glm::pi<float>();
+ const float end = glm::pi<float>();
+ float result = 0.f;
+
+ const std::clock_t timestamp1 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::fastCos(i);
+
+ const std::clock_t timestamp2 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::cos(i);
+
+ const std::clock_t timestamp3 = std::clock();
+ const std::clock_t time_fast = timestamp2 - timestamp1;
+ const std::clock_t time_default = timestamp3 - timestamp2;
+ std::printf("fastCos Time %d clocks\n", static_cast<int>(time_fast));
+ std::printf("cos Time %d clocks\n", static_cast<int>(time_default));
+
+ return time_fast <= time_default ? 0 : 1;
+ }
+}//namespace fastCos
+
+namespace fastSin
+{
+ /*
+ float sin(float x) {
+ float temp;
+ temp = (x + M_PI) / ((2 * M_PI) - M_PI);
+ return limited_sin((x + M_PI) - ((2 * M_PI) - M_PI) * temp));
+ }
+ */
+
+ int perf(bool NextFloat)
+ {
+ const float begin = -glm::pi<float>();
+ const float end = glm::pi<float>();
+ float result = 0.f;
+
+ const std::clock_t timestamp1 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::fastSin(i);
+
+ const std::clock_t timestamp2 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::sin(i);
+
+ const std::clock_t timestamp3 = std::clock();
+ const std::clock_t time_fast = timestamp2 - timestamp1;
+ const std::clock_t time_default = timestamp3 - timestamp2;
+ std::printf("fastSin Time %d clocks\n", static_cast<int>(time_fast));
+ std::printf("sin Time %d clocks\n", static_cast<int>(time_default));
+
+ return time_fast <= time_default ? 0 : 1;
+ }
+}//namespace fastSin
+
+namespace fastTan
+{
+ int perf(bool NextFloat)
+ {
+ const float begin = -glm::pi<float>();
+ const float end = glm::pi<float>();
+ float result = 0.f;
+
+ const std::clock_t timestamp1 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::fastTan(i);
+
+ const std::clock_t timestamp2 = std::clock();
+ for (float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::tan(i);
+
+ const std::clock_t timestamp3 = std::clock();
+ const std::clock_t time_fast = timestamp2 - timestamp1;
+ const std::clock_t time_default = timestamp3 - timestamp2;
+ std::printf("fastTan Time %d clocks\n", static_cast<int>(time_fast));
+ std::printf("tan Time %d clocks\n", static_cast<int>(time_default));
+
+ return time_fast <= time_default ? 0 : 1;
+ }
+}//namespace fastTan
+
+namespace fastAcos
+{
+ int perf(bool NextFloat)
+ {
+ const float begin = -glm::pi<float>();
+ const float end = glm::pi<float>();
+ float result = 0.f;
+
+ const std::clock_t timestamp1 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::fastAcos(i);
+
+ const std::clock_t timestamp2 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::acos(i);
+
+ const std::clock_t timestamp3 = std::clock();
+ const std::clock_t time_fast = timestamp2 - timestamp1;
+ const std::clock_t time_default = timestamp3 - timestamp2;
+
+ std::printf("fastAcos Time %d clocks\n", static_cast<int>(time_fast));
+ std::printf("acos Time %d clocks\n", static_cast<int>(time_default));
+
+ return time_fast <= time_default ? 0 : 1;
+ }
+}//namespace fastAcos
+
+namespace fastAsin
+{
+ int perf(bool NextFloat)
+ {
+ const float begin = -glm::pi<float>();
+ const float end = glm::pi<float>();
+ float result = 0.f;
+ const std::clock_t timestamp1 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::fastAsin(i);
+ const std::clock_t timestamp2 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::asin(i);
+ const std::clock_t timestamp3 = std::clock();
+ const std::clock_t time_fast = timestamp2 - timestamp1;
+ const std::clock_t time_default = timestamp3 - timestamp2;
+ std::printf("fastAsin Time %d clocks\n", static_cast<int>(time_fast));
+ std::printf("asin Time %d clocks\n", static_cast<int>(time_default));
+
+ return time_fast <= time_default ? 0 : 1;
+ }
+}//namespace fastAsin
+
+namespace fastAtan
+{
+ int perf(bool NextFloat)
+ {
+ const float begin = -glm::pi<float>();
+ const float end = glm::pi<float>();
+ float result = 0.f;
+ const std::clock_t timestamp1 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::fastAtan(i);
+ const std::clock_t timestamp2 = std::clock();
+ for(float i = begin; i < end; i = NextFloat ? glm::nextFloat(i) : i += 0.1f)
+ result = glm::atan(i);
+ const std::clock_t timestamp3 = std::clock();
+ const std::clock_t time_fast = timestamp2 - timestamp1;
+ const std::clock_t time_default = timestamp3 - timestamp2;
+ std::printf("fastAtan Time %d clocks\n", static_cast<int>(time_fast));
+ std::printf("atan Time %d clocks\n", static_cast<int>(time_default));
+
+ return time_fast <= time_default ? 0 : 1;
+ }
+}//namespace fastAtan
+
+namespace taylorCos
+{
+ using glm::qualifier;
+ using glm::length_t;
+
+ glm::vec4 const AngleShift(0.0f, glm::half_pi<float>(), glm::pi<float>(), glm::three_over_two_pi<float>());
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> taylorSeriesNewCos(glm::vec<L, T, Q> const& x)
+ {
+ glm::vec<L, T, Q> const Powed2(x * x);
+ glm::vec<L, T, Q> const Powed4(Powed2 * Powed2);
+ glm::vec<L, T, Q> const Powed6(Powed4 * Powed2);
+ glm::vec<L, T, Q> const Powed8(Powed4 * Powed4);
+
+ return static_cast<T>(1)
+ - Powed2 * static_cast<T>(0.5)
+ + Powed4 * static_cast<T>(0.04166666666666666666666666666667)
+ - Powed6 * static_cast<T>(0.00138888888888888888888888888889)
+ + Powed8 * static_cast<T>(2.4801587301587301587301587301587e-5);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> taylorSeriesNewCos6(glm::vec<L, T, Q> const& x)
+ {
+ glm::vec<L, T, Q> const Powed2(x * x);
+ glm::vec<L, T, Q> const Powed4(Powed2 * Powed2);
+ glm::vec<L, T, Q> const Powed6(Powed4 * Powed2);
+
+ return static_cast<T>(1)
+ - Powed2 * static_cast<T>(0.5)
+ + Powed4 * static_cast<T>(0.04166666666666666666666666666667)
+ - Powed6 * static_cast<T>(0.00138888888888888888888888888889);
+ }
+
+ template<glm::length_t L, qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, float, Q> fastAbs(glm::vec<L, float, Q> x)
+ {
+ int* Pointer = reinterpret_cast<int*>(&x[0]);
+ Pointer[0] &= 0x7fffffff;
+ Pointer[1] &= 0x7fffffff;
+ Pointer[2] &= 0x7fffffff;
+ Pointer[3] &= 0x7fffffff;
+ return x;
+ }
+
+ template<glm::length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> fastCosNew(glm::vec<L, T, Q> const& x)
+ {
+ glm::vec<L, T, Q> const Angle0_PI(fastAbs(fmod(x + glm::pi<T>(), glm::two_pi<T>()) - glm::pi<T>()));
+ return taylorSeriesNewCos6(x);
+/*
+ vec<L, bool, Q> const FirstQuarterPi(lessThanEqual(Angle0_PI, vec<L, T, Q>(glm::half_pi<T>())));
+
+ vec<L, T, Q> const RevertAngle(mix(vec<L, T, Q>(glm::pi<T>()), vec<L, T, Q>(0), FirstQuarterPi));
+ vec<L, T, Q> const ReturnSign(mix(vec<L, T, Q>(-1), vec<L, T, Q>(1), FirstQuarterPi));
+ vec<L, T, Q> const SectionAngle(RevertAngle - Angle0_PI);
+
+ return ReturnSign * taylorSeriesNewCos(SectionAngle);
+*/
+ }
+
+ int perf_fastCosNew(float Begin, float End, std::size_t Samples)
+ {
+ std::vector<glm::vec4> Results;
+ Results.resize(Samples);
+
+ float const Steps = (End - Begin) / static_cast<float>(Samples);
+
+ std::clock_t const TimeStampBegin = std::clock();
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ Results[i] = fastCosNew(AngleShift + glm::vec4(Begin + Steps * static_cast<float>(i)));
+
+ std::clock_t const TimeStampEnd = std::clock();
+
+ std::printf("fastCosNew %d clocks\n", static_cast<int>(TimeStampEnd - TimeStampBegin));
+
+ int Error = 0;
+ for(std::size_t i = 0; i < Samples; ++i)
+ Error += Results[i].x >= -1.0f && Results[i].x <= 1.0f ? 0 : 1;
+ return Error;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> deterministic_fmod(glm::vec<L, T, Q> const& x, T y)
+ {
+ return x - y * trunc(x / y);
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> fastCosDeterminisctic(glm::vec<L, T, Q> const& x)
+ {
+ glm::vec<L, T, Q> const Angle0_PI(abs(deterministic_fmod(x + glm::pi<T>(), glm::two_pi<T>()) - glm::pi<T>()));
+ glm::vec<L, bool, Q> const FirstQuarterPi(lessThanEqual(Angle0_PI, glm::vec<L, T, Q>(glm::half_pi<T>())));
+
+ glm::vec<L, T, Q> const RevertAngle(mix(glm::vec<L, T, Q>(glm::pi<T>()), glm::vec<L, T, Q>(0), FirstQuarterPi));
+ glm::vec<L, T, Q> const ReturnSign(mix(glm::vec<L, T, Q>(-1), glm::vec<L, T, Q>(1), FirstQuarterPi));
+ glm::vec<L, T, Q> const SectionAngle(RevertAngle - Angle0_PI);
+
+ return ReturnSign * taylorSeriesNewCos(SectionAngle);
+ }
+
+ int perf_fastCosDeterminisctic(float Begin, float End, std::size_t Samples)
+ {
+ std::vector<glm::vec4> Results;
+ Results.resize(Samples);
+
+ float const Steps = (End - Begin) / static_cast<float>(Samples);
+
+ std::clock_t const TimeStampBegin = std::clock();
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ Results[i] = taylorCos::fastCosDeterminisctic(AngleShift + glm::vec4(Begin + Steps * static_cast<float>(i)));
+
+ std::clock_t const TimeStampEnd = std::clock();
+
+ std::printf("fastCosDeterminisctic %d clocks\n", static_cast<int>(TimeStampEnd - TimeStampBegin));
+
+ int Error = 0;
+ for(std::size_t i = 0; i < Samples; ++i)
+ Error += Results[i].x >= -1.0f && Results[i].x <= 1.0f ? 0 : 1;
+ return Error;
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> taylorSeriesRefCos(glm::vec<L, T, Q> const& x)
+ {
+ return static_cast<T>(1)
+ - (x * x) / glm::factorial(static_cast<T>(2))
+ + (x * x * x * x) / glm::factorial(static_cast<T>(4))
+ - (x * x * x * x * x * x) / glm::factorial(static_cast<T>(6))
+ + (x * x * x * x * x * x * x * x) / glm::factorial(static_cast<T>(8));
+ }
+
+ template<length_t L, typename T, qualifier Q>
+ GLM_FUNC_QUALIFIER glm::vec<L, T, Q> fastRefCos(glm::vec<L, T, Q> const& x)
+ {
+ glm::vec<L, T, Q> const Angle0_PI(glm::abs(fmod(x + glm::pi<T>(), glm::two_pi<T>()) - glm::pi<T>()));
+// return taylorSeriesRefCos(Angle0_PI);
+
+ glm::vec<L, bool, Q> const FirstQuarterPi(lessThanEqual(Angle0_PI, glm::vec<L, T, Q>(glm::half_pi<T>())));
+
+ glm::vec<L, T, Q> const RevertAngle(mix(glm::vec<L, T, Q>(glm::pi<T>()), glm::vec<L, T, Q>(0), FirstQuarterPi));
+ glm::vec<L, T, Q> const ReturnSign(mix(glm::vec<L, T, Q>(-1), glm::vec<L, T, Q>(1), FirstQuarterPi));
+ glm::vec<L, T, Q> const SectionAngle(RevertAngle - Angle0_PI);
+
+ return ReturnSign * taylorSeriesRefCos(SectionAngle);
+ }
+
+ int perf_fastCosRef(float Begin, float End, std::size_t Samples)
+ {
+ std::vector<glm::vec4> Results;
+ Results.resize(Samples);
+
+ float const Steps = (End - Begin) / static_cast<float>(Samples);
+
+ std::clock_t const TimeStampBegin = std::clock();
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ Results[i] = taylorCos::fastRefCos(AngleShift + glm::vec4(Begin + Steps * static_cast<float>(i)));
+
+ std::clock_t const TimeStampEnd = std::clock();
+
+ std::printf("fastCosRef %d clocks\n", static_cast<int>(TimeStampEnd - TimeStampBegin));
+
+ int Error = 0;
+ for(std::size_t i = 0; i < Samples; ++i)
+ Error += Results[i].x >= -1.0f && Results[i].x <= 1.0f ? 0 : 1;
+ return Error;
+ }
+
+ int perf_fastCosOld(float Begin, float End, std::size_t Samples)
+ {
+ std::vector<glm::vec4> Results;
+ Results.resize(Samples);
+
+ float const Steps = (End - Begin) / static_cast<float>(Samples);
+
+ std::clock_t const TimeStampBegin = std::clock();
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ Results[i] = glm::fastCos(AngleShift + glm::vec4(Begin + Steps * static_cast<float>(i)));
+
+ std::clock_t const TimeStampEnd = std::clock();
+
+ std::printf("fastCosOld %d clocks\n", static_cast<int>(TimeStampEnd - TimeStampBegin));
+
+ int Error = 0;
+ for(std::size_t i = 0; i < Samples; ++i)
+ Error += Results[i].x >= -1.0f && Results[i].x <= 1.0f ? 0 : 1;
+ return Error;
+ }
+
+ int perf_cos(float Begin, float End, std::size_t Samples)
+ {
+ std::vector<glm::vec4> Results;
+ Results.resize(Samples);
+
+ float const Steps = (End - Begin) / static_cast<float>(Samples);
+
+ std::clock_t const TimeStampBegin = std::clock();
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ Results[i] = glm::cos(AngleShift + glm::vec4(Begin + Steps * static_cast<float>(i)));
+
+ std::clock_t const TimeStampEnd = std::clock();
+
+ std::printf("cos %d clocks\n", static_cast<int>(TimeStampEnd - TimeStampBegin));
+
+ int Error = 0;
+ for(std::size_t i = 0; i < Samples; ++i)
+ Error += Results[i].x >= -1.0f && Results[i].x <= 1.0f ? 0 : 1;
+ return Error;
+ }
+
+ int perf(std::size_t const Samples)
+ {
+ int Error = 0;
+
+ float const Begin = -glm::pi<float>();
+ float const End = glm::pi<float>();
+
+ Error += perf_cos(Begin, End, Samples);
+ Error += perf_fastCosOld(Begin, End, Samples);
+ Error += perf_fastCosRef(Begin, End, Samples);
+ //Error += perf_fastCosNew(Begin, End, Samples);
+ Error += perf_fastCosDeterminisctic(Begin, End, Samples);
+
+ return Error;
+ }
+
+ int test()
+ {
+ int Error = 0;
+
+ //for(float Angle = -4.0f * glm::pi<float>(); Angle < 4.0f * glm::pi<float>(); Angle += 0.1f)
+ //for(float Angle = -720.0f; Angle < 720.0f; Angle += 0.1f)
+ for(float Angle = 0.0f; Angle < 180.0f; Angle += 0.1f)
+ {
+ float const modAngle = std::fmod(glm::abs(Angle), 360.f);
+ assert(modAngle >= 0.0f && modAngle <= 360.f);
+ float const radAngle = glm::radians(modAngle);
+ float const Cos0 = std::cos(radAngle);
+
+ float const Cos1 = taylorCos::fastRefCos(glm::fvec1(radAngle)).x;
+ Error += glm::abs(Cos1 - Cos0) < 0.1f ? 0 : 1;
+
+ //float const Cos2 = taylorCos::fastCosNew(glm::fvec1(radAngle)).x;
+ //Error += glm::abs(Cos2 - Cos0) < 0.1f ? 0 : 1;
+
+ assert(!Error);
+ }
+
+ return Error;
+ }
+}//namespace taylorCos
+
+namespace taylor2
+{
+ glm::vec4 const AngleShift(0.0f, glm::pi<float>() * 0.5f, glm::pi<float>() * 1.0f, glm::pi<float>() * 1.5f);
+
+ float taylorCosA(float x)
+ {
+ return 1.f
+ - (x * x) * (1.f / 2.f)
+ + (x * x * x * x) * (1.f / 24.f)
+ - (x * x * x * x * x * x) * (1.f / 720.f)
+ + (x * x * x * x * x * x * x * x) * (1.f / 40320.f);
+ }
+
+ float taylorCosB(float x)
+ {
+ return 1.f
+ - (x * x) * (1.f / 2.f)
+ + (x * x * x * x) * (1.f / 24.f)
+ - (x * x * x * x * x * x) * (1.f / 720.f)
+ + (x * x * x * x * x * x * x * x) * (1.f / 40320.f);
+ }
+
+ float taylorCosC(float x)
+ {
+ return 1.f
+ - (x * x) * (1.f / 2.f)
+ + ((x * x) * (x * x)) * (1.f / 24.f)
+ - (((x * x) * (x * x)) * (x * x)) * (1.f / 720.f)
+ + (((x * x) * (x * x)) * ((x * x) * (x * x))) * (1.f / 40320.f);
+ }
+
+ int perf_taylorCosA(float Begin, float End, std::size_t Samples)
+ {
+ std::vector<float> Results;
+ Results.resize(Samples);
+
+ float const Steps = (End - Begin) / static_cast<float>(Samples);
+
+ std::clock_t const TimeStampBegin = std::clock();
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ Results[i] = taylorCosA(AngleShift.x + Begin + Steps * static_cast<float>(i));
+
+ std::clock_t const TimeStampEnd = std::clock();
+
+ std::printf("taylorCosA %d clocks\n", static_cast<int>(TimeStampEnd - TimeStampBegin));
+
+ int Error = 0;
+ for(std::size_t i = 0; i < Samples; ++i)
+ Error += Results[i] >= -1.0f && Results[i] <= 1.0f ? 0 : 1;
+ return Error;
+ }
+
+ int perf_taylorCosB(float Begin, float End, std::size_t Samples)
+ {
+ std::vector<float> Results;
+ Results.resize(Samples);
+
+ float const Steps = (End - Begin) / static_cast<float>(Samples);
+
+ std::clock_t const TimeStampBegin = std::clock();
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ Results[i] = taylorCosB(AngleShift.x + Begin + Steps * static_cast<float>(i));
+
+ std::clock_t const TimeStampEnd = std::clock();
+
+ std::printf("taylorCosB %d clocks\n", static_cast<int>(TimeStampEnd - TimeStampBegin));
+
+ int Error = 0;
+ for(std::size_t i = 0; i < Samples; ++i)
+ Error += Results[i] >= -1.0f && Results[i] <= 1.0f ? 0 : 1;
+ return Error;
+ }
+
+ int perf_taylorCosC(float Begin, float End, std::size_t Samples)
+ {
+ std::vector<float> Results;
+ Results.resize(Samples);
+
+ float const Steps = (End - Begin) / static_cast<float>(Samples);
+
+ std::clock_t const TimeStampBegin = std::clock();
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ Results[i] = taylorCosC(AngleShift.x + Begin + Steps * static_cast<float>(i));
+
+ std::clock_t const TimeStampEnd = std::clock();
+
+ std::printf("taylorCosC %d clocks\n", static_cast<int>(TimeStampEnd - TimeStampBegin));
+
+ int Error = 0;
+ for(std::size_t i = 0; i < Samples; ++i)
+ Error += Results[i] >= -1.0f && Results[i] <= 1.0f ? 0 : 1;
+ return Error;
+ }
+
+ int perf(std::size_t Samples)
+ {
+ int Error = 0;
+
+ float const Begin = -glm::pi<float>();
+ float const End = glm::pi<float>();
+
+ Error += perf_taylorCosA(Begin, End, Samples);
+ Error += perf_taylorCosB(Begin, End, Samples);
+ Error += perf_taylorCosC(Begin, End, Samples);
+
+ return Error;
+ }
+
+}//namespace taylor2
+
+int main()
+{
+ int Error(0);
+
+ Error += ::taylor2::perf(1000);
+ Error += ::taylorCos::test();
+ Error += ::taylorCos::perf(1000);
+
+# ifdef NDEBUG
+ ::fastCos::perf(false);
+ ::fastSin::perf(false);
+ ::fastTan::perf(false);
+ ::fastAcos::perf(false);
+ ::fastAsin::perf(false);
+ ::fastAtan::perf(false);
+# endif//NDEBUG
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_functions.cpp b/3rdparty/glm/source/test/gtx/gtx_functions.cpp
new file mode 100644
index 0000000..48a6af0
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_functions.cpp
@@ -0,0 +1,36 @@
+#include <glm/gtx/functions.hpp>
+#include <glm/ext/vector_float2.hpp>
+#include <vector>
+
+int test_gauss_1d()
+{
+ int Error = 0;
+
+ std::vector<float> Result(20);
+ for(std::size_t i = 0, n = Result.size(); i < n; ++i)
+ Result[i] = glm::gauss(static_cast<float>(i) * 0.1f, 0.0f, 1.0f);
+
+ return Error;
+}
+
+int test_gauss_2d()
+{
+ int Error = 0;
+
+ std::vector<float> Result(20);
+ for(std::size_t i = 0, n = Result.size(); i < n; ++i)
+ Result[i] = glm::gauss(glm::vec2(static_cast<float>(i)) * 0.1f, glm::vec2(0.0f), glm::vec2(1.0f));
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_gauss_1d();
+ Error += test_gauss_2d();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_gradient_paint.cpp b/3rdparty/glm/source/test/gtx/gtx_gradient_paint.cpp
new file mode 100644
index 0000000..01f521b
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_gradient_paint.cpp
@@ -0,0 +1,34 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/gradient_paint.hpp>
+
+int test_radialGradient()
+{
+ int Error = 0;
+
+ float Gradient = glm::radialGradient(glm::vec2(0), 1.0f, glm::vec2(1), glm::vec2(0.5));
+ Error += Gradient != 0.0f ? 0 : 1;
+
+ return Error;
+}
+
+int test_linearGradient()
+{
+ int Error = 0;
+
+ float Gradient = glm::linearGradient(glm::vec2(0), glm::vec2(1), glm::vec2(0.5));
+ Error += Gradient != 0.0f ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_radialGradient();
+ Error += test_linearGradient();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_handed_coordinate_space.cpp b/3rdparty/glm/source/test/gtx/gtx_handed_coordinate_space.cpp
new file mode 100644
index 0000000..e417688
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_handed_coordinate_space.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/handed_coordinate_space.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_int_10_10_10_2.cpp b/3rdparty/glm/source/test/gtx/gtx_int_10_10_10_2.cpp
new file mode 100644
index 0000000..ab59bb2
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_int_10_10_10_2.cpp
@@ -0,0 +1,18 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// OpenGL Mathematics Copyright (c) 2005 - 2013 G-Truc Creation (www.g-truc.net)
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Created : 2013-10-25
+// Updated : 2013-10-25
+// Licence : This source is under MIT licence
+// File : test/gtx/associated_min_max.cpp
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtx/associated_min_max.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_integer.cpp b/3rdparty/glm/source/test/gtx/gtx_integer.cpp
new file mode 100644
index 0000000..4c4225d
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_integer.cpp
@@ -0,0 +1,108 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/exponential.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtx/integer.hpp>
+#include <cstdio>
+/*
+int test_floor_log2()
+{
+ int Error = 0;
+
+ for(std::size_t i = 1; i < 1000000; ++i)
+ {
+ glm::uint A = glm::floor_log2(glm::uint(i));
+ glm::uint B = glm::uint(glm::floor(glm::log2(double(i)))); // Will fail with float, lack of accuracy
+
+ Error += A == B ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+*/
+int test_log2()
+{
+ int Error = 0;
+
+ for(std::size_t i = 1; i < 24; ++i)
+ {
+ glm::uint A = glm::log2(glm::uint(1 << i));
+ glm::uint B = glm::uint(glm::log2(double(1 << i)));
+
+ //Error += glm::equalEpsilon(double(A), B, 1.0) ? 0 : 1;
+ Error += glm::abs(double(A) - B) <= 24 ? 0 : 1;
+ assert(!Error);
+
+ std::printf("Log2(%d) error A=%d, B=%d\n", 1 << i, A, B);
+ }
+
+ std::printf("log2 error=%d\n", Error);
+
+ return Error;
+}
+
+int test_nlz()
+{
+ int Error = 0;
+
+ for(glm::uint i = 1; i < glm::uint(33); ++i)
+ Error += glm::nlz(i) == glm::uint(31u) - glm::findMSB(i) ? 0 : 1;
+ //printf("%d, %d\n", glm::nlz(i), 31u - glm::findMSB(i));
+
+ return Error;
+}
+
+int test_pow_uint()
+{
+ int Error = 0;
+
+ glm::uint const p0 = glm::pow(2u, 0u);
+ Error += p0 == 1u ? 0 : 1;
+
+ glm::uint const p1 = glm::pow(2u, 1u);
+ Error += p1 == 2u ? 0 : 1;
+
+ glm::uint const p2 = glm::pow(2u, 2u);
+ Error += p2 == 4u ? 0 : 1;
+
+ return Error;
+}
+
+int test_pow_int()
+{
+ int Error = 0;
+
+ int const p0 = glm::pow(2, 0u);
+ Error += p0 == 1 ? 0 : 1;
+
+ int const p1 = glm::pow(2, 1u);
+ Error += p1 == 2 ? 0 : 1;
+
+ int const p2 = glm::pow(2, 2u);
+ Error += p2 == 4 ? 0 : 1;
+
+ int const p0n = glm::pow(-2, 0u);
+ Error += p0n == -1 ? 0 : 1;
+
+ int const p1n = glm::pow(-2, 1u);
+ Error += p1n == -2 ? 0 : 1;
+
+ int const p2n = glm::pow(-2, 2u);
+ Error += p2n == 4 ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_nlz();
+// Error += test_floor_log2();
+ Error += test_log2();
+ Error += test_pow_uint();
+ Error += test_pow_int();
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_intersect.cpp b/3rdparty/glm/source/test/gtx/gtx_intersect.cpp
new file mode 100644
index 0000000..c4a1b2a
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_intersect.cpp
@@ -0,0 +1,88 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/glm.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtx/intersect.hpp>
+
+int test_intersectRayPlane()
+{
+ int Error = 0;
+ glm::vec3 const PlaneOrigin(0, 0, 1);
+ glm::vec3 const PlaneNormal(0, 0, -1);
+ glm::vec3 const RayOrigin(0, 0, 0);
+ glm::vec3 const RayDir(0, 0, 1);
+
+ // check that inversion of the plane normal has no effect
+ {
+ float Distance = 0;
+ bool const Result = glm::intersectRayPlane(RayOrigin, RayDir, PlaneOrigin, PlaneNormal, Distance);
+ Error += glm::abs(Distance - 1.f) <= std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += Result ? 0 : 1;
+ }
+ {
+ float Distance = 0;
+ bool const Result = glm::intersectRayPlane(RayOrigin, RayDir, PlaneOrigin, -1.f * PlaneNormal, Distance);
+ Error += glm::abs(Distance - 1.f) <= std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += Result ? 0 : 1;
+ }
+
+ // check if plane is before of behind the ray origin
+ {
+ float Distance = 9.9999f; // value should not be changed
+ bool const Result = glm::intersectRayPlane(RayOrigin, RayDir, -1.f * PlaneOrigin, PlaneNormal, Distance);
+ Error += glm::abs(Distance - 9.9999f) <= std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += Result ? 1 : 0; // there is no intersection in front of the ray origin, only behind
+ }
+
+ return Error;
+}
+
+int test_intersectRayTriangle()
+{
+ int Error = 0;
+
+ glm::vec3 const Orig(0, 0, 2);
+ glm::vec3 const Dir(0, 0, -1);
+ glm::vec3 const Vert0(0, 0, 0);
+ glm::vec3 const Vert1(-1, -1, 0);
+ glm::vec3 const Vert2(1, -1, 0);
+ glm::vec2 BaryPosition(0);
+ float Distance = 0;
+
+ bool const Result = glm::intersectRayTriangle(Orig, Dir, Vert0, Vert1, Vert2, BaryPosition, Distance);
+
+ Error += glm::all(glm::epsilonEqual(BaryPosition, glm::vec2(0), std::numeric_limits<float>::epsilon())) ? 0 : 1;
+ Error += glm::abs(Distance - 2.f) <= std::numeric_limits<float>::epsilon() ? 0 : 1;
+ Error += Result ? 0 : 1;
+
+ return Error;
+}
+
+int test_intersectLineTriangle()
+{
+ int Error = 0;
+
+ glm::vec3 const Orig(0, 0, 2);
+ glm::vec3 const Dir(0, 0, -1);
+ glm::vec3 const Vert0(0, 0, 0);
+ glm::vec3 const Vert1(-1, -1, 0);
+ glm::vec3 const Vert2(1, -1, 0);
+ glm::vec3 Position(2.0f, 0.0f, 0.0f);
+
+ bool const Result = glm::intersectLineTriangle(Orig, Dir, Vert0, Vert1, Vert2, Position);
+
+ Error += glm::all(glm::epsilonEqual(Position, glm::vec3(2.0f, 0.0f, 0.0f), std::numeric_limits<float>::epsilon())) ? 0 : 1;
+ Error += Result ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_intersectRayPlane();
+ Error += test_intersectRayTriangle();
+ Error += test_intersectLineTriangle();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_io.cpp b/3rdparty/glm/source/test/gtx/gtx_io.cpp
new file mode 100644
index 0000000..7d90fd7
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_io.cpp
@@ -0,0 +1,186 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/glm.hpp>
+#if GLM_LANG & GLM_LANG_CXXMS_FLAG
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtx/io.hpp>
+#include <iostream>
+#include <sstream>
+#include <typeinfo>
+
+namespace
+{
+ template<typename CTy, typename CTr>
+ std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>& os, glm::qualifier const& a)
+ {
+ typename std::basic_ostream<CTy,CTr>::sentry const cerberus(os);
+
+ if (cerberus)
+ {
+ switch (a) {
+ case glm::highp: os << "uhi"; break;
+ case glm::mediump: os << "umd"; break;
+ case glm::lowp: os << "ulo"; break;
+# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+ case glm::aligned_highp: os << "ahi"; break;
+ case glm::aligned_mediump: os << "amd"; break;
+ case glm::aligned_lowp: os << "alo"; break;
+# endif
+ }
+ }
+
+ return os;
+ }
+
+ template<typename U, glm::qualifier P, typename T, typename CTy, typename CTr>
+ std::basic_string<CTy> type_name(std::basic_ostream<CTy,CTr>&, T const&)
+ {
+ std::basic_ostringstream<CTy,CTr> ostr;
+
+ if (typeid(T) == typeid(glm::qua<U,P>)) { ostr << "quat"; }
+ else if (typeid(T) == typeid(glm::vec<2, U,P>)) { ostr << "vec2"; }
+ else if (typeid(T) == typeid(glm::vec<3, U,P>)) { ostr << "vec3"; }
+ else if (typeid(T) == typeid(glm::vec<4, U,P>)) { ostr << "vec4"; }
+ else if (typeid(T) == typeid(glm::mat<2, 2, U,P>)) { ostr << "mat2x2"; }
+ else if (typeid(T) == typeid(glm::mat<2, 3, U,P>)) { ostr << "mat2x3"; }
+ else if (typeid(T) == typeid(glm::mat<2, 4, U,P>)) { ostr << "mat2x4"; }
+ else if (typeid(T) == typeid(glm::mat<3, 2, U,P>)) { ostr << "mat3x2"; }
+ else if (typeid(T) == typeid(glm::mat<3, 3, U,P>)) { ostr << "mat3x3"; }
+ else if (typeid(T) == typeid(glm::mat<3, 4, U,P>)) { ostr << "mat3x4"; }
+ else if (typeid(T) == typeid(glm::mat<4, 2, U,P>)) { ostr << "mat4x2"; }
+ else if (typeid(T) == typeid(glm::mat<4, 3, U,P>)) { ostr << "mat4x3"; }
+ else if (typeid(T) == typeid(glm::mat<4, 4, U,P>)) { ostr << "mat4x4"; }
+ else { ostr << "unknown"; }
+
+ ostr << '<' << typeid(U).name() << ',' << P << '>';
+
+ return ostr.str();
+ }
+} // namespace {
+
+template<typename T, glm::qualifier P, typename OS>
+int test_io_quat(OS& os)
+{
+ os << '\n' << typeid(OS).name() << '\n';
+
+ glm::qua<T, P> const q(1, 0, 0, 0);
+
+ {
+ glm::io::basic_format_saver<typename OS::char_type> const iofs(os);
+
+ os << glm::io::precision(2) << glm::io::width(1 + 2 + 1 + 2)
+ << type_name<T, P>(os, q) << ": " << q << '\n';
+ }
+
+ {
+ glm::io::basic_format_saver<typename OS::char_type> const iofs(os);
+
+ os << glm::io::unformatted
+ << type_name<T, P>(os, q) << ": " << q << '\n';
+ }
+
+ return 0;
+}
+
+template<typename T, glm::qualifier P, typename OS>
+int test_io_vec(OS& os)
+{
+ os << '\n' << typeid(OS).name() << '\n';
+
+ glm::vec<2, T,P> const v2(0, 1);
+ glm::vec<3, T,P> const v3(2, 3, 4);
+ glm::vec<4, T,P> const v4(5, 6, 7, 8);
+
+ os << type_name<T,P>(os, v2) << ": " << v2 << '\n'
+ << type_name<T,P>(os, v3) << ": " << v3 << '\n'
+ << type_name<T,P>(os, v4) << ": " << v4 << '\n';
+
+ glm::io::basic_format_saver<typename OS::char_type> const iofs(os);
+
+ os << glm::io::precision(2) << glm::io::width(1 + 2 + 1 + 2)
+ << type_name<T,P>(os, v2) << ": " << v2 << '\n'
+ << type_name<T,P>(os, v3) << ": " << v3 << '\n'
+ << type_name<T,P>(os, v4) << ": " << v4 << '\n';
+
+ return 0;
+}
+
+template<typename T, glm::qualifier P, typename OS>
+int test_io_mat(OS& os, glm::io::order_type otype)
+{
+ os << '\n' << typeid(OS).name() << '\n';
+
+ glm::vec<2, T,P> const v2_1( 0, 1);
+ glm::vec<2, T,P> const v2_2( 2, 3);
+ glm::vec<2, T,P> const v2_3( 4, 5);
+ glm::vec<2, T,P> const v2_4( 6, 7);
+ glm::vec<3, T,P> const v3_1( 8, 9, 10);
+ glm::vec<3, T,P> const v3_2(11, 12, 13);
+ glm::vec<3, T,P> const v3_3(14, 15, 16);
+ glm::vec<3, T,P> const v3_4(17, 18, 19);
+ glm::vec<4, T,P> const v4_1(20, 21, 22, 23);
+ glm::vec<4, T,P> const v4_2(24, 25, 26, 27);
+ glm::vec<4, T,P> const v4_3(28, 29, 30, 31);
+ glm::vec<4, T,P> const v4_4(32, 33, 34, 35);
+
+ glm::io::basic_format_saver<typename OS::char_type> const iofs(os);
+
+ os << glm::io::precision(2) << glm::io::width(1 + 2 + 1 + 2)
+ << glm::io::order(otype)
+ << "mat2x2<" << typeid(T).name() << ',' << P << ">: " << glm::mat<2, 2, T,P>(v2_1, v2_2) << '\n'
+ << "mat2x3<" << typeid(T).name() << ',' << P << ">: " << glm::mat<2, 3, T,P>(v3_1, v3_2) << '\n'
+ << "mat2x4<" << typeid(T).name() << ',' << P << ">: " << glm::mat<2, 4, T,P>(v4_1, v4_2) << '\n'
+ << "mat3x2<" << typeid(T).name() << ',' << P << ">: " << glm::mat<3, 2, T,P>(v2_1, v2_2, v2_3) << '\n'
+ << "mat3x3<" << typeid(T).name() << ',' << P << ">: " << glm::mat<3, 3, T,P>(v3_1, v3_2, v3_3) << '\n'
+ << "mat3x4<" << typeid(T).name() << ',' << P << ">: " << glm::mat<3, 4, T,P>(v4_1, v4_2, v4_3) << '\n'
+ << "mat4x2<" << typeid(T).name() << ',' << P << ">: " << glm::mat<4, 2, T,P>(v2_1, v2_2, v2_3, v2_4) << '\n'
+ << "mat4x3<" << typeid(T).name() << ',' << P << ">: " << glm::mat<4, 3, T,P>(v3_1, v3_2, v3_3, v3_4) << '\n'
+ << "mat4x4<" << typeid(T).name() << ',' << P << ">: " << glm::mat<4, 4, T,P>(v4_1, v4_2, v4_3, v4_4) << '\n';
+
+ os << glm::io::unformatted
+ << glm::io::order(otype)
+ << "mat2x2<" << typeid(T).name() << ',' << P << ">: " << glm::mat<2, 2, T,P>(v2_1, v2_2) << '\n'
+ << "mat2x3<" << typeid(T).name() << ',' << P << ">: " << glm::mat<2, 3, T,P>(v3_1, v3_2) << '\n'
+ << "mat2x4<" << typeid(T).name() << ',' << P << ">: " << glm::mat<2, 4, T,P>(v4_1, v4_2) << '\n'
+ << "mat3x2<" << typeid(T).name() << ',' << P << ">: " << glm::mat<3, 2, T,P>(v2_1, v2_2, v2_3) << '\n'
+ << "mat3x3<" << typeid(T).name() << ',' << P << ">: " << glm::mat<3, 3, T,P>(v3_1, v3_2, v3_3) << '\n'
+ << "mat3x4<" << typeid(T).name() << ',' << P << ">: " << glm::mat<3, 4, T,P>(v4_1, v4_2, v4_3) << '\n'
+ << "mat4x2<" << typeid(T).name() << ',' << P << ">: " << glm::mat<4, 2, T,P>(v2_1, v2_2, v2_3, v2_4) << '\n'
+ << "mat4x3<" << typeid(T).name() << ',' << P << ">: " << glm::mat<4, 3, T,P>(v3_1, v3_2, v3_3, v3_4) << '\n'
+ << "mat4x4<" << typeid(T).name() << ',' << P << ">: " << glm::mat<4, 4, T,P>(v4_1, v4_2, v4_3, v4_4) << '\n';
+
+ return 0;
+}
+
+int main()
+{
+ int Error(0);
+
+ Error += test_io_quat<float, glm::highp>(std::cout);
+ Error += test_io_quat<float, glm::highp>(std::wcout);
+ Error += test_io_quat<int, glm::mediump>(std::cout);
+ Error += test_io_quat<int, glm::mediump>(std::wcout);
+ Error += test_io_quat<glm::uint, glm::lowp>(std::cout);
+ Error += test_io_quat<glm::uint, glm::lowp>(std::wcout);
+
+ Error += test_io_vec<float, glm::highp>(std::cout);
+ Error += test_io_vec<float, glm::highp>(std::wcout);
+ Error += test_io_vec<int, glm::mediump>(std::cout);
+ Error += test_io_vec<int, glm::mediump>(std::wcout);
+ Error += test_io_vec<glm::uint, glm::lowp>(std::cout);
+ Error += test_io_vec<glm::uint, glm::lowp>(std::wcout);
+
+ Error += test_io_mat<float, glm::highp>(std::cout, glm::io::column_major);
+ Error += test_io_mat<float, glm::lowp>(std::wcout, glm::io::column_major);
+ Error += test_io_mat<float, glm::highp>(std::cout, glm::io::row_major);
+ Error += test_io_mat<float, glm::lowp>(std::wcout, glm::io::row_major);
+
+ return Error;
+}
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif// GLM_LANG & GLM_LANG_CXXMS_FLAG
diff --git a/3rdparty/glm/source/test/gtx/gtx_load.cpp b/3rdparty/glm/source/test/gtx/gtx_load.cpp
new file mode 100644
index 0000000..1467b9b
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_load.cpp
@@ -0,0 +1,124 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/glm.hpp>
+/*
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+
+#include <glm/gtx/common.hpp>
+#include <glm/gtc/integer.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/type_aligned.hpp>
+#include <glm/ext/vector_relational.hpp>
+
+namespace glm
+{
+ enum genTypeEnum
+ {
+ QUALIFIER_HIGHP,
+ QUALIFIER_MEDIUMP,
+ QUALIFIER_LOWP
+ };
+
+ template <typename genType>
+ struct genTypeTrait
+ {};
+
+ template <length_t L, typename T>
+ struct genTypeTrait<vec<L, T, aligned_highp> >
+ {
+ static const genTypeEnum GENTYPE = QUALIFIER_HIGHP;
+ };
+
+ template <length_t L, typename T>
+ struct genTypeTrait<vec<L, T, aligned_mediump> >
+ {
+ static const genTypeEnum GENTYPE = QUALIFIER_MEDIUMP;
+ };
+
+ template <length_t L, typename T>
+ struct genTypeTrait<vec<L, T, aligned_lowp> >
+ {
+ static const genTypeEnum GENTYPE = QUALIFIER_LOWP;
+ };
+
+ template<length_t L, typename T, qualifier Q, bool isAligned>
+ struct load_gentype
+ {
+
+ };
+
+# if GLM_ARCH & GLM_ARCH_SSE_BIT
+ template<qualifier Q>
+ struct load_gentype<4, float, Q, true>
+ {
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, float, Q> load(float const* Mem)
+ {
+ vec<4, float, Q> Result;
+ Result.data = _mm_loadu_ps(Mem);
+ return Result;
+ }
+ };
+# endif//GLM_ARCH & GLM_ARCH_SSE_BIT
+
+ template<typename genType>
+ GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType example_identity()
+ {
+ return detail::init_gentype<genType, detail::genTypeTrait<genType>::GENTYPE>::identity();
+ }
+
+ template <typename genType, typename valType>
+ genType load(valType const* Mem)
+ {
+
+ }
+
+ aligned_vec4 loadu(float const* Mem)
+ {
+ aligned_vec4 Result;
+# if GLM_ARCH & GLM_ARCH_SSE_BIT
+ Result.data = _mm_loadu_ps(Mem);
+# else
+ Result[0] = *(Mem + 0);
+ Result[1] = *(Mem + 1);
+ Result[2] = *(Mem + 2);
+ Result[3] = *(Mem + 3);
+# endif//GLM_ARCH & GLM_ARCH_SSE_BIT
+ return Result;
+ }
+
+ aligned_vec4 loada(float const* Mem)
+ {
+ aligned_vec4 Result;
+# if GLM_ARCH & GLM_ARCH_SSE_BIT
+ Result.data = _mm_load_ps(Mem);
+# else
+ Result[0] = *(Mem + 0);
+ Result[1] = *(Mem + 1);
+ Result[2] = *(Mem + 2);
+ Result[3] = *(Mem + 3);
+# endif//GLM_ARCH & GLM_ARCH_SSE_BIT
+ return Result;
+ }
+}//namespace glm
+
+int test_vec4_load()
+{
+ int Error = 0;
+
+ float Data[] = {1.f, 2.f, 3.f, 4.f};
+ glm::aligned_vec4 const V = glm::loadu(Data);
+ Error += glm::all(glm::equal(V, glm::aligned_vec4(1.f, 2.f, 3.f, 4.f), glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+#endif
+*/
+int main()
+{
+ int Error = 0;
+/*
+# if GLM_CONFIG_SIMD == GLM_ENABLE
+ Error += test_vec4_load();
+# endif
+*/
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_log_base.cpp b/3rdparty/glm/source/test/gtx/gtx_log_base.cpp
new file mode 100644
index 0000000..37c7464
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_log_base.cpp
@@ -0,0 +1,54 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/log_base.hpp>
+#include <glm/gtc/vec1.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/exponential.hpp>
+
+namespace test_log
+{
+ int run()
+ {
+ int Error = 0;
+
+ {
+ float A = glm::log(10.f, 2.0f);
+ float B = glm::log2(10.f);
+ Error += glm::epsilonEqual(A, B, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ glm::vec1 A = glm::log(glm::vec1(10.f), glm::vec1(2.0f));
+ glm::vec1 B = glm::log2(glm::vec1(10.f));
+ Error += glm::all(glm::epsilonEqual(A, B, glm::vec1(0.00001f))) ? 0 : 1;
+ }
+
+ {
+ glm::vec2 A = glm::log(glm::vec2(10.f), glm::vec2(2.0f));
+ glm::vec2 B = glm::log2(glm::vec2(10.f));
+ Error += glm::all(glm::epsilonEqual(A, B, glm::vec2(0.00001f))) ? 0 : 1;
+ }
+
+ {
+ glm::vec3 A = glm::log(glm::vec3(10.f), glm::vec3(2.0f));
+ glm::vec3 B = glm::log2(glm::vec3(10.f));
+ Error += glm::all(glm::epsilonEqual(A, B, glm::vec3(0.00001f))) ? 0 : 1;
+ }
+
+ {
+ glm::vec4 A = glm::log(glm::vec4(10.f), glm::vec4(2.0f));
+ glm::vec4 B = glm::log2(glm::vec4(10.f));
+ Error += glm::all(glm::epsilonEqual(A, B, glm::vec4(0.00001f))) ? 0 : 1;
+ }
+
+ return Error;
+ }
+}//namespace test_log
+
+int main()
+{
+ int Error(0);
+
+ Error += test_log::run();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_matrix_cross_product.cpp b/3rdparty/glm/source/test/gtx/gtx_matrix_cross_product.cpp
new file mode 100644
index 0000000..c1d0fa9
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_matrix_cross_product.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/matrix_cross_product.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_matrix_decompose.cpp b/3rdparty/glm/source/test/gtx/gtx_matrix_decompose.cpp
new file mode 100644
index 0000000..5a1884e
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_matrix_decompose.cpp
@@ -0,0 +1,19 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/matrix_decompose.hpp>
+
+int main()
+{
+ int Error(0);
+
+ glm::mat4 Matrix(1);
+
+ glm::vec3 Scale;
+ glm::quat Orientation;
+ glm::vec3 Translation;
+ glm::vec3 Skew(1);
+ glm::vec4 Perspective(1);
+
+ glm::decompose(Matrix, Scale, Orientation, Translation, Skew, Perspective);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_matrix_factorisation.cpp b/3rdparty/glm/source/test/gtx/gtx_matrix_factorisation.cpp
new file mode 100644
index 0000000..6771dba
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_matrix_factorisation.cpp
@@ -0,0 +1,105 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/matrix_factorisation.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/gtc/epsilon.hpp>
+
+template <glm::length_t C, glm::length_t R, typename T, glm::qualifier Q>
+int test_qr(glm::mat<C, R, T, Q> m)
+{
+ int Error = 0;
+
+ T const epsilon = static_cast<T>(1e-10);
+
+ glm::mat<(C < R ? C : R), R, T, Q> q(-999);
+ glm::mat<C, (C < R ? C : R), T, Q> r(-999);
+
+ glm::qr_decompose(m, q, r);
+
+ //Test if q*r really equals the input matrix
+ glm::mat<C, R, T, Q> tm = q*r;
+ glm::mat<C, R, T, Q> err = tm - m;
+
+ for (glm::length_t i = 0; i < C; i++)
+ for (glm::length_t j = 0; j < R; j++)
+ Error += glm::abs(err[i][j]) > epsilon ? 1 : 0;
+
+ //Test if the columns of q are orthonormal
+ for (glm::length_t i = 0; i < (C < R ? C : R); i++)
+ {
+ Error += (length(q[i]) - 1) > epsilon ? 1 : 0;
+
+ for (glm::length_t j = 0; j<i; j++)
+ Error += glm::abs(dot(q[i], q[j])) > epsilon ? 1 : 0;
+ }
+
+ //Test if the matrix r is upper triangular
+ for (glm::length_t i = 0; i < C; i++)
+ for (glm::length_t j = i + 1; j < (C < R ? C : R); j++)
+ Error += glm::epsilonEqual(r[i][j], static_cast<T>(0), glm::epsilon<T>()) ? 0 : 1;
+
+ return Error;
+}
+
+template <glm::length_t C, glm::length_t R, typename T, glm::qualifier Q>
+int test_rq(glm::mat<C, R, T, Q> m)
+{
+ int Error = 0;
+
+ T const epsilon = static_cast<T>(1e-10);
+
+ glm::mat<C, (C < R ? C : R), T, Q> q(-999);
+ glm::mat<(C < R ? C : R), R, T, Q> r(-999);
+
+ glm::rq_decompose(m, r, q);
+
+ //Test if q*r really equals the input matrix
+ glm::mat<C, R, T, Q> tm = r*q;
+ glm::mat<C, R, T, Q> err = tm - m;
+
+ for (glm::length_t i = 0; i < C; i++)
+ for (glm::length_t j = 0; j < R; j++)
+ Error += glm::abs(err[i][j]) > epsilon ? 1 : 0;
+
+ //Test if the rows of q are orthonormal
+ glm::mat<(C < R ? C : R), C, T, Q> tq = transpose(q);
+
+ for (glm::length_t i = 0; i < (C < R ? C : R); i++)
+ {
+ Error += (length(tq[i]) - 1) > epsilon ? 1 : 0;
+
+ for (glm::length_t j = 0; j<i; j++)
+ Error += glm::abs(dot(tq[i], tq[j])) > epsilon ? 1 : 0;
+ }
+
+ //Test if the matrix r is upper triangular
+ for (glm::length_t i = 0; i < (C < R ? C : R); i++)
+ for (glm::length_t j = R - (C < R ? C : R) + i + 1; j < R; j++)
+ Error += glm::epsilonEqual(r[i][j], static_cast<T>(0), glm::epsilon<T>()) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ //Test QR square
+ Error += test_qr(glm::dmat3(12.0, 6.0, -4.0, -51.0, 167.0, 24.0, 4.0, -68.0, -41.0)) ? 1 : 0;
+
+ //Test RQ square
+ Error += test_rq(glm::dmat3(12.0, 6.0, -4.0, -51.0, 167.0, 24.0, 4.0, -68.0, -41.0)) ? 1 : 0;
+
+ //Test QR triangular 1
+ Error += test_qr(glm::dmat3x4(12.0, 6.0, -4.0, -51.0, 167.0, 24.0, 4.0, -68.0, -41.0, 7.0, 2.0, 15.0)) ? 1 : 0;
+
+ //Test QR triangular 2
+ Error += test_qr(glm::dmat4x3(12.0, 6.0, -4.0, -51.0, 167.0, 24.0, 4.0, -68.0, -41.0, 7.0, 2.0, 15.0)) ? 1 : 0;
+
+ //Test RQ triangular 1 : Fails at the triangular test
+ Error += test_rq(glm::dmat3x4(12.0, 6.0, -4.0, -51.0, 167.0, 24.0, 4.0, -68.0, -41.0, 7.0, 2.0, 15.0)) ? 1 : 0;
+
+ //Test QR triangular 2
+ Error += test_rq(glm::dmat4x3(12.0, 6.0, -4.0, -51.0, 167.0, 24.0, 4.0, -68.0, -41.0, 7.0, 2.0, 15.0)) ? 1 : 0;
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_matrix_interpolation.cpp b/3rdparty/glm/source/test/gtx/gtx_matrix_interpolation.cpp
new file mode 100644
index 0000000..108f02e
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_matrix_interpolation.cpp
@@ -0,0 +1,122 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/quaternion.hpp>
+#include <glm/gtx/component_wise.hpp>
+#include <glm/gtx/matrix_interpolation.hpp>
+
+#include <iostream>
+#include <limits>
+#include <math.h>
+
+
+static int test_axisAngle()
+{
+ int Error = 0;
+
+ glm::mat4 m1(-0.9946f, 0.0f, -0.104531f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.104531f, 0.0f, -0.9946f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f);
+ glm::mat4 m2(-0.992624f, 0.0f, -0.121874f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.121874f, 0.0f, -0.992624f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f);
+
+ glm::mat4 const m1rot = glm::extractMatrixRotation(m1);
+ glm::mat4 const dltRotation = m2 * glm::transpose(m1rot);
+
+ glm::vec3 dltAxis(0.0f);
+ float dltAngle = 0.0f;
+ glm::axisAngle(dltRotation, dltAxis, dltAngle);
+
+ std::cout << "dltAxis: (" << dltAxis.x << ", " << dltAxis.y << ", " << dltAxis.z << "), dltAngle: " << dltAngle << std::endl;
+
+ glm::quat q = glm::quat_cast(dltRotation);
+ std::cout << "q: (" << q.x << ", " << q.y << ", " << q.z << ", " << q.w << ")" << std::endl;
+ float yaw = glm::yaw(q);
+ std::cout << "Yaw: " << yaw << std::endl;
+
+ return Error;
+}
+
+template <class T>
+int testForAxisAngle(glm::vec<3, T, glm::defaultp> const axisTrue, T const angleTrue)
+{
+ T const eps = std::sqrt(std::numeric_limits<T>::epsilon());
+
+ glm::mat<4, 4, T, glm::defaultp> const matTrue = glm::axisAngleMatrix(axisTrue, angleTrue);
+
+ glm::vec<3, T, glm::defaultp> axis;
+ T angle;
+ glm::axisAngle(matTrue, axis, angle);
+ glm::mat<4, 4, T, glm::defaultp> const matRebuilt = glm::axisAngleMatrix(axis, angle);
+
+ glm::mat<4, 4, T, glm::defaultp> const errMat = matTrue - matRebuilt;
+ T const maxErr = glm::compMax(glm::vec<4, T, glm::defaultp>(
+ glm::compMax(glm::abs(errMat[0])),
+ glm::compMax(glm::abs(errMat[1])),
+ glm::compMax(glm::abs(errMat[2])),
+ glm::compMax(glm::abs(errMat[3]))
+ ));
+
+ return maxErr < eps ? 0 : 1;
+}
+
+static int test_axisAngle2()
+{
+ int Error = 0;
+
+ Error += testForAxisAngle(glm::vec3(0.0f, 1.0f, 0.0f), 0.0f);
+ Error += testForAxisAngle(glm::vec3(0.358f, 0.0716f, 0.9309f), 0.00001f);
+ Error += testForAxisAngle(glm::vec3(1.0f, 0.0f, 0.0f), 0.0001f);
+ Error += testForAxisAngle(glm::vec3(0.0f, 0.0f, 1.0f), 0.001f);
+ Error += testForAxisAngle(glm::vec3(0.0f, 0.0f, 1.0f), 0.001f);
+ Error += testForAxisAngle(glm::vec3(0.0f, 1.0f, 0.0f), 0.005f);
+ Error += testForAxisAngle(glm::vec3(0.0f, 0.0f, 1.0f), 0.005f);
+ Error += testForAxisAngle(glm::vec3(0.358f, 0.0716f, 0.9309f), 0.03f);
+ Error += testForAxisAngle(glm::vec3(0.358f, 0.0716f, 0.9309f), 0.0003f);
+ Error += testForAxisAngle(glm::vec3(0.0f, 0.0f, 1.0f), 0.01f);
+ Error += testForAxisAngle(glm::dvec3(0.0f, 1.0f, 0.0f), 0.00005);
+ Error += testForAxisAngle(glm::dvec3(-1.0f, 0.0f, 0.0f), 0.000001);
+ Error += testForAxisAngle(glm::dvec3(0.7071f, 0.7071f, 0.0f), 0.5);
+ Error += testForAxisAngle(glm::dvec3(0.7071f, 0.0f, 0.7071f), 0.0002);
+ Error += testForAxisAngle(glm::dvec3(0.7071f, 0.0f, 0.7071f), 0.00002);
+ Error += testForAxisAngle(glm::dvec3(0.7071f, 0.0f, 0.7071f), 0.000002);
+ Error += testForAxisAngle(glm::dvec3(0.7071f, 0.0f, 0.7071f), 0.0000002);
+ Error += testForAxisAngle(glm::vec3(0.0f, 0.7071f, 0.7071f), 1.3f);
+ Error += testForAxisAngle(glm::vec3(0.0f, 0.7071f, 0.7071f), 6.3f);
+ Error += testForAxisAngle(glm::vec3(1.0f, 0.0f, 0.0f), -0.23456f);
+ Error += testForAxisAngle(glm::vec3(1.0f, 0.0f, 0.0f), glm::pi<float>());
+ Error += testForAxisAngle(glm::vec3(0.0f, 1.0f, 0.0f), -glm::pi<float>());
+ Error += testForAxisAngle(glm::vec3(0.358f, 0.0716f, 0.9309f), -glm::pi<float>());
+ Error += testForAxisAngle(glm::vec3(1.0f, 0.0f, 0.0f), glm::pi<float>() + 2e-6f);
+ Error += testForAxisAngle(glm::vec3(1.0f, 0.0f, 0.0f), glm::pi<float>() + 1e-4f);
+ Error += testForAxisAngle(glm::vec3(0.0f, 1.0f, 0.0f), -glm::pi<float>() + 1e-3f);
+ Error += testForAxisAngle(glm::vec3(0.358f, 0.0716f, 0.9309f), -glm::pi<float>() + 5e-3f);
+
+ return Error;
+}
+
+static int test_rotate()
+{
+ glm::mat4 m2(1.0);
+ float myAngle = 1.0f;
+ m2 = glm::rotate(m2, myAngle, glm::vec3(1.0f, 0.0f, 0.0f));
+ glm::vec3 m2Axis;
+ float m2Angle;
+ glm::axisAngle(m2, m2Axis, m2Angle);
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_axisAngle();
+ Error += test_axisAngle2();
+ Error += test_rotate();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_matrix_major_storage.cpp b/3rdparty/glm/source/test/gtx/gtx_matrix_major_storage.cpp
new file mode 100644
index 0000000..21de7f7
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_matrix_major_storage.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/matrix_major_storage.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_matrix_operation.cpp b/3rdparty/glm/source/test/gtx/gtx_matrix_operation.cpp
new file mode 100644
index 0000000..79c95c5
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_matrix_operation.cpp
@@ -0,0 +1,86 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/glm.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtx/matrix_operation.hpp>
+#include <limits>
+
+int test_adjugate()
+{
+ int Error = 0;
+
+ const float epsilon = std::numeric_limits<float>::epsilon();
+
+ // mat2
+ const glm::mat2 m2(
+ 2, 3,
+ 1, 5
+ );
+
+ const glm::mat2 eam2(
+ 5, -3,
+ -1, 2
+ );
+
+ const glm::mat2 am2 = glm::adjugate(m2);
+
+ Error += glm::all(glm::bvec2(
+ glm::all(glm::epsilonEqual(am2[0], eam2[0], epsilon)),
+ glm::all(glm::epsilonEqual(am2[1], eam2[1], epsilon))
+ )) ? 0 : 1;
+
+ // mat3
+ const glm::mat3 m3(
+ 2, 3, 3,
+ 1, 5, 4,
+ 4, 6, 8
+ );
+
+ const glm::mat3 eam3(
+ 16, -6, -3,
+ 8, 4, -5,
+ -14, 0, 7
+ );
+
+ const glm::mat3 am3 = glm::adjugate(m3);
+
+ Error += glm::all(glm::bvec3(
+ glm::all(glm::epsilonEqual(am3[0], eam3[0], epsilon)),
+ glm::all(glm::epsilonEqual(am3[1], eam3[1], epsilon)),
+ glm::all(glm::epsilonEqual(am3[2], eam3[2], epsilon))
+ )) ? 0 : 1;
+
+ // mat4
+ const glm::mat4 m4(
+ 2, 3, 3, 1,
+ 1, 5, 4, 3,
+ 4, 6, 8, 5,
+ -2, -3, -3, 4
+ );
+
+ const glm::mat4 eam4(
+ 97, -30, -15, 17,
+ 45, 20, -25, 5,
+ -91, 0, 35, -21,
+ 14, 0, 0, 14
+ );
+
+ const glm::mat4 am4 = glm::adjugate(m4);
+
+ Error += glm::all(glm::bvec4(
+ glm::all(glm::epsilonEqual(am4[0], eam4[0], epsilon)),
+ glm::all(glm::epsilonEqual(am4[1], eam4[1], epsilon)),
+ glm::all(glm::epsilonEqual(am4[2], eam4[2], epsilon)),
+ glm::all(glm::epsilonEqual(am4[3], eam4[3], epsilon))
+ )) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_adjugate();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_matrix_query.cpp b/3rdparty/glm/source/test/gtx/gtx_matrix_query.cpp
new file mode 100644
index 0000000..0dda1f0
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_matrix_query.cpp
@@ -0,0 +1,66 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/matrix_query.hpp>
+
+int test_isNull()
+{
+ int Error(0);
+
+ bool TestA = glm::isNull(glm::mat4(0), 0.00001f);
+ Error += TestA ? 0 : 1;
+
+ return Error;
+}
+
+int test_isIdentity()
+{
+ int Error(0);
+
+ {
+ bool TestA = glm::isIdentity(glm::mat2(1), 0.00001f);
+ Error += TestA ? 0 : 1;
+ }
+ {
+ bool TestA = glm::isIdentity(glm::mat3(1), 0.00001f);
+ Error += TestA ? 0 : 1;
+ }
+ {
+ bool TestA = glm::isIdentity(glm::mat4(1), 0.00001f);
+ Error += TestA ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_isNormalized()
+{
+ int Error(0);
+
+ bool TestA = glm::isNormalized(glm::mat4(1), 0.00001f);
+ Error += TestA ? 0 : 1;
+
+ return Error;
+}
+
+int test_isOrthogonal()
+{
+ int Error(0);
+
+ bool TestA = glm::isOrthogonal(glm::mat4(1), 0.00001f);
+ Error += TestA ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error(0);
+
+ Error += test_isNull();
+ Error += test_isIdentity();
+ Error += test_isNormalized();
+ Error += test_isOrthogonal();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_matrix_transform_2d.cpp b/3rdparty/glm/source/test/gtx/gtx_matrix_transform_2d.cpp
new file mode 100644
index 0000000..f80d263
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_matrix_transform_2d.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/matrix_transform_2d.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_mixed_product.cpp b/3rdparty/glm/source/test/gtx/gtx_mixed_product.cpp
new file mode 100644
index 0000000..ab59bb2
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_mixed_product.cpp
@@ -0,0 +1,18 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// OpenGL Mathematics Copyright (c) 2005 - 2013 G-Truc Creation (www.g-truc.net)
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Created : 2013-10-25
+// Updated : 2013-10-25
+// Licence : This source is under MIT licence
+// File : test/gtx/associated_min_max.cpp
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include <glm/gtc/type_precision.hpp>
+#include <glm/gtx/associated_min_max.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_norm.cpp b/3rdparty/glm/source/test/gtx/gtx_norm.cpp
new file mode 100644
index 0000000..e82102a
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_norm.cpp
@@ -0,0 +1,81 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/norm.hpp>
+
+
+int test_lMaxNorm()
+{
+ int Error(0);
+
+ {
+ float norm = glm::lMaxNorm(glm::vec3(-1, -2, -3));
+ Error += glm::epsilonEqual(norm, 3.f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ float norm = glm::lMaxNorm(glm::vec3(2, 3, 1));
+ Error += glm::epsilonEqual(norm, 3.f, 0.00001f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_lxNorm()
+{
+ int Error(0);
+
+ {
+ unsigned int depth_1 = 1;
+ float normA = glm::lxNorm(glm::vec3(2, 3, 1), depth_1);
+ float normB = glm::l1Norm(glm::vec3(2, 3, 1));
+ Error += glm::epsilonEqual(normA, normB, 0.00001f) ? 0 : 1;
+ Error += glm::epsilonEqual(normA, 6.f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ unsigned int depth_1 = 1;
+ float normA = glm::lxNorm(glm::vec3(-1, -2, -3), depth_1);
+ float normB = glm::l1Norm(glm::vec3(-1, -2, -3));
+ Error += glm::epsilonEqual(normA, normB, 0.00001f) ? 0 : 1;
+ Error += glm::epsilonEqual(normA, 6.f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ unsigned int depth_2 = 2;
+ float normA = glm::lxNorm(glm::vec3(2, 3, 1), depth_2);
+ float normB = glm::l2Norm(glm::vec3(2, 3, 1));
+ Error += glm::epsilonEqual(normA, normB, 0.00001f) ? 0 : 1;
+ Error += glm::epsilonEqual(normA, 3.741657387f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ unsigned int depth_2 = 2;
+ float normA = glm::lxNorm(glm::vec3(-1, -2, -3), depth_2);
+ float normB = glm::l2Norm(glm::vec3(-1, -2, -3));
+ Error += glm::epsilonEqual(normA, normB, 0.00001f) ? 0 : 1;
+ Error += glm::epsilonEqual(normA, 3.741657387f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ unsigned int oddDepth = 3;
+ float norm = glm::lxNorm(glm::vec3(2, 3, 1), oddDepth);
+ Error += glm::epsilonEqual(norm, 3.301927249f, 0.00001f) ? 0 : 1;
+ }
+
+ {
+ unsigned int oddDepth = 3;
+ float norm = glm::lxNorm(glm::vec3(-1, -2, -3), oddDepth);
+ Error += glm::epsilonEqual(norm, 3.301927249f, 0.00001f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error(0);
+
+ Error += test_lMaxNorm();
+ Error += test_lxNorm();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_normal.cpp b/3rdparty/glm/source/test/gtx/gtx_normal.cpp
new file mode 100644
index 0000000..7a01ec0
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_normal.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/normal.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_normalize_dot.cpp b/3rdparty/glm/source/test/gtx/gtx_normalize_dot.cpp
new file mode 100644
index 0000000..9605863
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_normalize_dot.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/normalize_dot.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_number_precision.cpp b/3rdparty/glm/source/test/gtx/gtx_number_precision.cpp
new file mode 100644
index 0000000..a5a3ef2
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_number_precision.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/number_precision.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_optimum_pow.cpp b/3rdparty/glm/source/test/gtx/gtx_optimum_pow.cpp
new file mode 100644
index 0000000..c0a3fd4
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_optimum_pow.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/optimum_pow.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_orthonormalize.cpp b/3rdparty/glm/source/test/gtx/gtx_orthonormalize.cpp
new file mode 100644
index 0000000..0e7a8c8
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_orthonormalize.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/orthonormalize.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_pca.cpp b/3rdparty/glm/source/test/gtx/gtx_pca.cpp
new file mode 100644
index 0000000..120e277
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_pca.cpp
@@ -0,0 +1,724 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/glm.hpp>
+#include <glm/gtx/pca.hpp>
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtx/string_cast.hpp>
+
+#include <cstdio>
+#include <vector>
+#if GLM_HAS_CXX11_STL == 1
+#include <random>
+#endif
+
+template<typename T>
+T myEpsilon();
+template<>
+GLM_INLINE GLM_CONSTEXPR float myEpsilon<float>() { return 0.00001f; }
+template<>
+GLM_INLINE GLM_CONSTEXPR double myEpsilon<double>() { return 0.000001; }
+
+template<glm::length_t D, typename T, glm::qualifier Q>
+bool vectorEpsilonEqual(glm::vec<D, T, Q> const& a, glm::vec<D, T, Q> const& b, T epsilon)
+{
+ for (int c = 0; c < D; ++c)
+ if (!glm::epsilonEqual(a[c], b[c], epsilon))
+ {
+ fprintf(stderr, "failing vectorEpsilonEqual: [%d] %lf != %lf (~%lf)\n",
+ c,
+ static_cast<double>(a[c]),
+ static_cast<double>(b[c]),
+ static_cast<double>(epsilon)
+ );
+ return false;
+ }
+ return true;
+}
+
+template<glm::length_t D, typename T, glm::qualifier Q>
+bool matrixEpsilonEqual(glm::mat<D, D, T, Q> const& a, glm::mat<D, D, T, Q> const& b, T epsilon)
+{
+ for (int c = 0; c < D; ++c)
+ for (int r = 0; r < D; ++r)
+ if (!glm::epsilonEqual(a[c][r], b[c][r], epsilon))
+ {
+ fprintf(stderr, "failing vectorEpsilonEqual: [%d][%d] %lf != %lf (~%lf)\n",
+ c, r,
+ static_cast<double>(a[c][r]),
+ static_cast<double>(b[c][r]),
+ static_cast<double>(epsilon)
+ );
+ return false;
+ }
+ return true;
+}
+
+template<typename T>
+GLM_INLINE bool sameSign(T const& a, T const& b)
+{
+ return ((a >= 0) && (b >= 0)) || ((a < 0) && (b < 0));
+}
+
+template<typename T>
+T failReport(T line)
+{
+ fprintf(stderr, "Failed in line %d\n", static_cast<int>(line));
+ return line;
+}
+
+// Test data: 1AGA 'agarose double helix'
+// https://www.rcsb.org/structure/1aga
+// The fourth coordinate is randomized
+namespace _1aga
+{
+
+ // Fills `outTestData` with hard-coded atom positions from 1AGA
+ // The fourth coordinate is randomized
+ template<typename vec>
+ void fillTestData(std::vector<vec>& outTestData)
+ {
+ // x,y,z coordinates copied from RCSB PDB file of 1AGA
+ // w coordinate randomized with standard normal distribution
+ static const double _1aga[] = {
+ 3.219, -0.637, 19.462, 2.286,
+ 4.519, 0.024, 18.980, -0.828,
+ 4.163, 1.425, 18.481, -0.810,
+ 3.190, 1.341, 17.330, -0.170,
+ 1.962, 0.991, 18.165, 0.816,
+ 2.093, 1.952, 19.331, 0.276,
+ 5.119, -0.701, 17.908, -0.490,
+ 3.517, 2.147, 19.514, -0.207,
+ 2.970, 2.609, 16.719, 0.552,
+ 2.107, -0.398, 18.564, 0.403,
+ 2.847, 2.618, 15.335, 0.315,
+ 1.457, 3.124, 14.979, 0.683,
+ 1.316, 3.291, 13.473, 0.446,
+ 2.447, 4.155, 12.931, 1.324,
+ 3.795, 3.614, 13.394, 0.112,
+ 4.956, 4.494, 12.982, 0.253,
+ 0.483, 2.217, 15.479, 1.316,
+ 0.021, 3.962, 13.166, 1.522,
+ 2.311, 5.497, 13.395, 0.248,
+ 3.830, 3.522, 14.827, 0.591,
+ 5.150, 4.461, 11.576, 0.635,
+ -1.057, 3.106, 13.132, 0.191,
+ -2.280, 3.902, 12.650, 1.135,
+ -3.316, 2.893, 12.151, 0.794,
+ -2.756, 2.092, 11.000, 0.720,
+ -1.839, 1.204, 11.835, -1.172,
+ -2.737, 0.837, 13.001, -0.313,
+ -1.952, 4.784, 11.578, 2.082,
+ -3.617, 1.972, 13.184, 0.653,
+ -3.744, 1.267, 10.389, -0.413,
+ -0.709, 2.024, 12.234, -1.747,
+ -3.690, 1.156, 9.005, -1.275,
+ -3.434, -0.300, 8.649, 0.441,
+ -3.508, -0.506, 7.143, 0.237,
+ -4.822, 0.042, 6.601, -2.856,
+ -5.027, 1.480, 7.064, 0.985,
+ -6.370, 2.045, 6.652, 0.915,
+ -2.162, -0.690, 9.149, 1.100,
+ -3.442, -1.963, 6.836, -0.081,
+ -5.916, -0.747, 7.065, -2.345,
+ -4.965, 1.556, 8.497, 0.504,
+ -6.439, 2.230, 5.246, 1.451,
+ -2.161, -2.469, 6.802, -1.171,
+ -2.239, -3.925, 6.320, -1.434,
+ -0.847, -4.318, 5.821, 0.098,
+ -0.434, -3.433, 4.670, -1.446,
+ -0.123, -2.195, 5.505, 0.182,
+ 0.644, -2.789, 6.671, 0.865,
+ -3.167, -4.083, 5.248, -0.098,
+ 0.101, -4.119, 6.854, -0.001,
+ 0.775, -3.876, 4.059, 1.061,
+ -1.398, -1.625, 5.904, 0.230,
+ 0.844, -3.774, 2.675, 1.313,
+ 1.977, -2.824, 2.319, -0.112,
+ 2.192, -2.785, 0.813, -0.981,
+ 2.375, -4.197, 0.271, -0.355,
+ 1.232, -5.093, 0.734, 0.632,
+ 1.414, -6.539, 0.322, 0.576,
+ 1.678, -1.527, 2.819, -1.187,
+ 3.421, -1.999, 0.496, -1.770,
+ 3.605, -4.750, 0.735, 1.099,
+ 1.135, -5.078, 2.167, 0.854,
+ 1.289, -6.691, -1.084, -0.487,
+ -1.057, 3.106, 22.602, -1.297,
+ -2.280, 3.902, 22.120, 0.376,
+ -3.316, 2.893, 21.621, 0.932,
+ -2.756, 2.092, 20.470, 1.680,
+ -1.839, 1.204, 21.305, 0.615,
+ -2.737, 0.837, 22.471, 0.899,
+ -1.952, 4.784, 21.048, -0.521,
+ -3.617, 1.972, 22.654, 0.133,
+ -3.744, 1.267, 19.859, 0.081,
+ -0.709, 2.024, 21.704, 1.420,
+ -3.690, 1.156, 18.475, -0.850,
+ -3.434, -0.300, 18.119, -0.249,
+ -3.508, -0.506, 16.613, 1.434,
+ -4.822, 0.042, 16.071, -2.466,
+ -5.027, 1.480, 16.534, -1.045,
+ -6.370, 2.045, 16.122, 1.707,
+ -2.162, -0.690, 18.619, -2.023,
+ -3.442, -1.963, 16.336, -0.304,
+ -5.916, -0.747, 16.535, 0.979,
+ -4.965, 1.556, 17.967, -1.165,
+ -6.439, 2.230, 14.716, 0.929,
+ -2.161, -2.469, 16.302, -0.234,
+ -2.239, -3.925, 15.820, -0.228,
+ -0.847, -4.318, 15.321, 1.844,
+ -0.434, -3.433, 14.170, 1.132,
+ -0.123, -2.195, 15.005, 0.211,
+ 0.644, -2.789, 16.171, -0.632,
+ -3.167, -4.083, 14.748, -0.519,
+ 0.101, -4.119, 16.354, 0.173,
+ 0.775, -3.876, 13.559, 1.243,
+ -1.398, -1.625, 15.404, -0.187,
+ 0.844, -3.774, 12.175, -1.332,
+ 1.977, -2.824, 11.819, -1.616,
+ 2.192, -2.785, 10.313, 1.320,
+ 2.375, -4.197, 9.771, 0.237,
+ 1.232, -5.093, 10.234, 0.851,
+ 1.414, -6.539, 9.822, 1.816,
+ 1.678, -1.527, 12.319, -1.657,
+ 3.421, -1.999, 10.036, 1.559,
+ 3.605, -4.750, 10.235, 0.831,
+ 1.135, -5.078, 11.667, 0.060,
+ 1.289, -6.691, 8.416, 1.066,
+ 3.219, -0.637, 10.002, 2.111,
+ 4.519, 0.024, 9.520, -0.874,
+ 4.163, 1.425, 9.021, -1.012,
+ 3.190, 1.341, 7.870, -0.250,
+ 1.962, 0.991, 8.705, -1.359,
+ 2.093, 1.952, 9.871, -0.126,
+ 5.119, -0.701, 8.448, 0.995,
+ 3.517, 2.147, 10.054, 0.941,
+ 2.970, 2.609, 7.259, -0.562,
+ 2.107, -0.398, 9.104, -0.038,
+ 2.847, 2.618, 5.875, 0.398,
+ 1.457, 3.124, 5.519, 0.481,
+ 1.316, 3.291, 4.013, -0.187,
+ 2.447, 4.155, 3.471, -0.429,
+ 3.795, 3.614, 3.934, -0.432,
+ 4.956, 4.494, 3.522, -0.788,
+ 0.483, 2.217, 6.019, -0.923,
+ 0.021, 3.962, 3.636, -0.316,
+ 2.311, 5.497, 3.935, -1.917,
+ 3.830, 3.522, 5.367, -0.302,
+ 5.150, 4.461, 2.116, -1.615
+ };
+ static const glm::length_t _1agaSize = sizeof(_1aga) / (4 * sizeof(double));
+
+ outTestData.resize(_1agaSize);
+ for(glm::length_t i = 0; i < _1agaSize; ++i)
+ for(glm::length_t d = 0; d < static_cast<glm::length_t>(vec::length()); ++d)
+ outTestData[i][d] = static_cast<typename vec::value_type>(_1aga[i * 4 + d]);
+ }
+
+ // All reference values computed separately using symbolic precision
+ // https://github.com/sgrottel/exp-pca-precision
+ // This applies to all functions named: `_1aga::expected*()`
+
+ GLM_INLINE glm::dmat4 const& expectedCovarData()
+ {
+ static const glm::dmat4 covar4x4d(
+ 9.62434068027210898322, -0.00006657369614512471, -4.29321376568405099761, 0.01879374187452758846,
+ -0.00006657369614512471, 9.62443937868480681175, 5.35113872637944076871, -0.11569259145880574080,
+ -4.29321376568405099761, 5.35113872637944076871, 35.62848549634668415820, 0.90874239254220201545,
+ 0.01879374187452758846, -0.11569259145880574080, 0.90874239254220201545, 1.09705971856890904803
+ );
+ return covar4x4d;
+ }
+
+ template<glm::length_t D>
+ GLM_INLINE glm::vec<D, double, glm::defaultp> const& expectedEigenvalues();
+ template<>
+ GLM_INLINE glm::dvec2 const& expectedEigenvalues<2>()
+ {
+ static const glm::dvec2 evals2(
+ 9.62447289926297399961763301774251330057894539467032275382255,
+ 9.62430715969394210015560961264297422776572580714373620309355
+ );
+ return evals2;
+ }
+ template<>
+ GLM_INLINE glm::dvec3 const& expectedEigenvalues<3>()
+ {
+ static const glm::dvec3 evals3(
+ 37.3274494274683425233695502581182052836449738530676689472257,
+ 9.62431434161498823505729817436585077939509766554969096873168,
+ 7.92550178622027216422369326567668971675332732240052872097887
+ );
+ return evals3;
+ }
+ template<>
+ GLM_INLINE glm::dvec4 const& expectedEigenvalues<4>()
+ {
+ static const glm::dvec4 evals4(
+ 37.3477389918792213596879452204499702406947817221901007885630,
+ 9.62470688921105696017807313860277172063600080413412567999700,
+ 7.94017075281634999342344275928070533134615133171969063657713,
+ 1.06170863996588365446060186982477896078741484440002343404155
+ );
+ return evals4;
+ }
+
+ template<glm::length_t D>
+ GLM_INLINE glm::mat<D, D, double, glm::defaultp> const& expectedEigenvectors();
+ template<>
+ GLM_INLINE glm::dmat2 const& expectedEigenvectors<2>()
+ {
+ static const glm::dmat2 evecs2(
+ glm::dvec2(
+ -0.503510847492551904906870957742619139443409162857537237123308,
+ 1
+ ),
+ glm::dvec2(
+ 1.98605453086051402895741763848787613048533838388005162794043,
+ 1
+ )
+ );
+ return evecs2;
+ }
+ template<>
+ GLM_INLINE glm::dmat3 const& expectedEigenvectors<3>()
+ {
+ static const glm::dmat3 evecs3(
+ glm::dvec3(
+ -0.154972738414395866005286433008304444294405085038689821864654,
+ 0.193161285869815165989799191097521722568079378840201629578695,
+ 1
+ ),
+ glm::dvec3(
+ -158565.112775416943154745839952575022429933119522746586149868,
+ -127221.506282351944358932458687410410814983610301927832439675,
+ 1
+ ),
+ glm::dvec3(
+ 2.52702248596556806145700361724323960543858113426446460406536,
+ -3.14959802931313870497377546974185300816008580801457419079412,
+ 1
+ )
+ );
+ return evecs3;
+ }
+ template<>
+ GLM_INLINE glm::dmat4 const& expectedEigenvectors<4>()
+ {
+ static const glm::dmat4 evecs4(
+ glm::dvec4(
+ -6.35322390281037045217295803597357821705371650876122113027264,
+ 7.91546394153385394517767054617789939529794642646629201212056,
+ 41.0301543819240679808549819457450130787045236815736490549663,
+ 1
+ ),
+ glm::dvec4(
+ -114.622418941087829756565311692197154422302604224781253861297,
+ -92.2070185807065289900871215218752013659402949497379896153118,
+ 0.0155846091025912430932734548933329458404665760587569100867246,
+ 1
+ ),
+ glm::dvec4(
+ 13.1771887761559019483954743159026938257325190511642952175789,
+ -16.3688257459634877666638419310116970616615816436949741766895,
+ 5.17386502341472097227408249233288958059579189051394773143190,
+ 1
+ ),
+ glm::dvec4(
+ -0.0192777078948229800494895064532553117703859768210647632969276,
+ 0.0348034950916108873629241563077465542944938906271231198634442,
+ -0.0340715609308469289267379681032545422644143611273049912226126,
+ 1
+ )
+ );
+ return evecs4;
+ }
+
+} // namespace _1aga
+
+// Compute center of gravity
+template<typename vec>
+vec computeCenter(const std::vector<vec>& testData)
+{
+ double c[4];
+ std::fill(c, c + vec::length(), 0.0);
+
+ typename std::vector<vec>::const_iterator e = testData.end();
+ for(typename std::vector<vec>::const_iterator i = testData.begin(); i != e; ++i)
+ for(glm::length_t d = 0; d < static_cast<glm::length_t>(vec::length()); ++d)
+ c[d] += static_cast<double>((*i)[d]);
+
+ vec cVec(0);
+ for(glm::length_t d = 0; d < static_cast<glm::length_t>(vec::length()); ++d)
+ cVec[d] = static_cast<typename vec::value_type>(c[d] / static_cast<double>(testData.size()));
+ return cVec;
+}
+
+// Test sorting of Eigenvalue&Eigenvector lists. Use exhaustive search.
+template<glm::length_t D, typename T, glm::qualifier Q>
+int testEigenvalueSort()
+{
+ // Test input data: four arbitrary values
+ static const glm::vec<D, T, Q> refVal(
+ glm::vec<4, T, Q>(
+ 10, 8, 6, 4
+ )
+ );
+ // Test input data: four arbitrary vectors, which can be matched to the above values
+ static const glm::mat<D, D, T, Q> refVec(
+ glm::mat<4, 4, T, Q>(
+ 10, 20, 5, 40,
+ 8, 16, 4, 32,
+ 6, 12, 3, 24,
+ 4, 8, 2, 16
+ )
+ );
+ // Permutations of test input data for exhaustive check, based on `D` (1 <= D <= 4)
+ static const int permutationCount[] = {
+ 0,
+ 1,
+ 2,
+ 6,
+ 24
+ };
+ // The permutations t perform, based on `D` (1 <= D <= 4)
+ static const glm::ivec4 permutation[] = {
+ glm::ivec4(0, 1, 2, 3),
+ glm::ivec4(1, 0, 2, 3), // last for D = 2
+ glm::ivec4(0, 2, 1, 3),
+ glm::ivec4(1, 2, 0, 3),
+ glm::ivec4(2, 0, 1, 3),
+ glm::ivec4(2, 1, 0, 3), // last for D = 3
+ glm::ivec4(0, 1, 3, 2),
+ glm::ivec4(1, 0, 3, 2),
+ glm::ivec4(0, 2, 3, 1),
+ glm::ivec4(1, 2, 3, 0),
+ glm::ivec4(2, 0, 3, 1),
+ glm::ivec4(2, 1, 3, 0),
+ glm::ivec4(0, 3, 1, 2),
+ glm::ivec4(1, 3, 0, 2),
+ glm::ivec4(0, 3, 2, 1),
+ glm::ivec4(1, 3, 2, 0),
+ glm::ivec4(2, 3, 0, 1),
+ glm::ivec4(2, 3, 1, 0),
+ glm::ivec4(3, 0, 1, 2),
+ glm::ivec4(3, 1, 0, 2),
+ glm::ivec4(3, 0, 2, 1),
+ glm::ivec4(3, 1, 2, 0),
+ glm::ivec4(3, 2, 0, 1),
+ glm::ivec4(3, 2, 1, 0) // last for D = 4
+ };
+
+ // initial sanity check
+ if(!vectorEpsilonEqual(refVal, refVal, myEpsilon<T>()))
+ return failReport(__LINE__);
+ if(!matrixEpsilonEqual(refVec, refVec, myEpsilon<T>()))
+ return failReport(__LINE__);
+
+ // Exhaustive search through all permutations
+ for(int p = 0; p < permutationCount[D]; ++p)
+ {
+ glm::vec<D, T, Q> testVal;
+ glm::mat<D, D, T, Q> testVec;
+ for(int i = 0; i < D; ++i)
+ {
+ testVal[i] = refVal[permutation[p][i]];
+ testVec[i] = refVec[permutation[p][i]];
+ }
+
+ glm::sortEigenvalues(testVal, testVec);
+
+ if (!vectorEpsilonEqual(testVal, refVal, myEpsilon<T>()))
+ return failReport(__LINE__);
+ if (!matrixEpsilonEqual(testVec, refVec, myEpsilon<T>()))
+ return failReport(__LINE__);
+ }
+
+ return 0;
+}
+
+// Test covariance matrix creation functions
+template<glm::length_t D, typename T, glm::qualifier Q>
+int testCovar(
+#if GLM_HAS_CXX11_STL == 1
+ glm::length_t dataSize, unsigned int randomEngineSeed
+#else // GLM_HAS_CXX11_STL == 1
+ glm::length_t, unsigned int
+#endif // GLM_HAS_CXX11_STL == 1
+)
+{
+ typedef glm::vec<D, T, Q> vec;
+ typedef glm::mat<D, D, T, Q> mat;
+
+ // #1: test expected result with fixed data set
+ std::vector<vec> testData;
+ _1aga::fillTestData(testData);
+
+ // compute center of gravity
+ vec center = computeCenter(testData);
+
+ mat covarMat = glm::computeCovarianceMatrix(testData.data(), testData.size(), center);
+ if(!matrixEpsilonEqual(covarMat, mat(_1aga::expectedCovarData()), myEpsilon<T>()))
+ {
+ fprintf(stderr, "Reconstructed covarMat:\n%s\n", glm::to_string(covarMat).c_str());
+ return failReport(__LINE__);
+ }
+
+ // #2: test function variant consitency with random data
+#if GLM_HAS_CXX11_STL == 1
+ std::default_random_engine rndEng(randomEngineSeed);
+ std::normal_distribution<T> normalDist;
+ testData.resize(dataSize);
+ // some common offset of all data
+ T offset[D];
+ for(glm::length_t d = 0; d < D; ++d)
+ offset[d] = normalDist(rndEng);
+ // init data
+ for(glm::length_t i = 0; i < dataSize; ++i)
+ for(glm::length_t d = 0; d < D; ++d)
+ testData[i][d] = offset[d] + normalDist(rndEng);
+ center = computeCenter(testData);
+
+ std::vector<vec> centeredTestData;
+ centeredTestData.reserve(testData.size());
+ typename std::vector<vec>::const_iterator e = testData.end();
+ for(typename std::vector<vec>::const_iterator i = testData.begin(); i != e; ++i)
+ centeredTestData.push_back((*i) - center);
+
+ mat c1 = glm::computeCovarianceMatrix(centeredTestData.data(), centeredTestData.size());
+ mat c2 = glm::computeCovarianceMatrix<D, T, Q>(centeredTestData.begin(), centeredTestData.end());
+ mat c3 = glm::computeCovarianceMatrix(testData.data(), testData.size(), center);
+ mat c4 = glm::computeCovarianceMatrix<D, T, Q>(testData.rbegin(), testData.rend(), center);
+
+ if(!matrixEpsilonEqual(c1, c2, myEpsilon<T>()))
+ return failReport(__LINE__);
+ if(!matrixEpsilonEqual(c1, c3, myEpsilon<T>()))
+ return failReport(__LINE__);
+ if(!matrixEpsilonEqual(c1, c4, myEpsilon<T>()))
+ return failReport(__LINE__);
+#endif // GLM_HAS_CXX11_STL == 1
+ return 0;
+}
+
+// Computes eigenvalues and eigenvectors from well-known covariance matrix
+template<glm::length_t D, typename T, glm::qualifier Q>
+int testEigenvectors(T epsilon)
+{
+ typedef glm::vec<D, T, Q> vec;
+ typedef glm::mat<D, D, T, Q> mat;
+
+ // test expected result with fixed data set
+ std::vector<vec> testData;
+ mat covarMat(_1aga::expectedCovarData());
+
+ vec eigenvalues;
+ mat eigenvectors;
+ unsigned int c = glm::findEigenvaluesSymReal(covarMat, eigenvalues, eigenvectors);
+ if(c != D)
+ return failReport(__LINE__);
+ glm::sortEigenvalues(eigenvalues, eigenvectors);
+
+ if (!vectorEpsilonEqual(eigenvalues, vec(_1aga::expectedEigenvalues<D>()), epsilon))
+ return failReport(__LINE__);
+
+ for (int i = 0; i < D; ++i)
+ {
+ vec act = glm::normalize(eigenvectors[i]);
+ vec exp = glm::normalize(_1aga::expectedEigenvectors<D>()[i]);
+ if (!sameSign(act[0], exp[0])) exp = -exp;
+ if (!vectorEpsilonEqual(act, exp, epsilon))
+ return failReport(__LINE__);
+ }
+
+ return 0;
+}
+
+// A simple small smoke test:
+// - a uniformly sampled block
+// - reconstruct main axes
+// - check order of eigenvalues equals order of extends of block in direction of main axes
+int smokeTest()
+{
+ using glm::vec3;
+ using glm::mat3;
+ std::vector<vec3> pts;
+ pts.reserve(11 * 15 * 7);
+
+ for(int x = -5; x <= 5; ++x)
+ for(int y = -7; y <= 7; ++y)
+ for(int z = -3; z <= 3; ++z)
+ pts.push_back(vec3(x, y, z));
+
+ mat3 covar = glm::computeCovarianceMatrix(pts.data(), pts.size());
+ mat3 eVec;
+ vec3 eVal;
+ int eCnt = glm::findEigenvaluesSymReal(covar, eVal, eVec);
+ if(eCnt != 3)
+ return failReport(__LINE__);
+
+ // sort eVec by decending eVal
+ if(eVal[0] < eVal[1])
+ {
+ std::swap(eVal[0], eVal[1]);
+ std::swap(eVec[0], eVec[1]);
+ }
+ if(eVal[0] < eVal[2])
+ {
+ std::swap(eVal[0], eVal[2]);
+ std::swap(eVec[0], eVec[2]);
+ }
+ if(eVal[1] < eVal[2])
+ {
+ std::swap(eVal[1], eVal[2]);
+ std::swap(eVec[1], eVec[2]);
+ }
+
+ if(!vectorEpsilonEqual(glm::abs(eVec[0]), vec3(0, 1, 0), myEpsilon<float>()))
+ return failReport(__LINE__);
+ if(!vectorEpsilonEqual(glm::abs(eVec[1]), vec3(1, 0, 0), myEpsilon<float>()))
+ return failReport(__LINE__);
+ if(!vectorEpsilonEqual(glm::abs(eVec[2]), vec3(0, 0, 1), myEpsilon<float>()))
+ return failReport(__LINE__);
+
+ return 0;
+}
+
+#if GLM_HAS_CXX11_STL == 1
+int rndTest(unsigned int randomEngineSeed)
+{
+ std::default_random_engine rndEng(randomEngineSeed);
+ std::normal_distribution<double> normalDist;
+
+ // construct orthonormal system
+ glm::dvec3 x(normalDist(rndEng), normalDist(rndEng), normalDist(rndEng));
+ double l = glm::length(x);
+ while(l < myEpsilon<double>())
+ x = glm::dvec3(normalDist(rndEng), normalDist(rndEng), normalDist(rndEng));
+ x = glm::normalize(x);
+ glm::dvec3 y(normalDist(rndEng), normalDist(rndEng), normalDist(rndEng));
+ l = glm::length(y);
+ while(l < myEpsilon<double>())
+ y = glm::dvec3(normalDist(rndEng), normalDist(rndEng), normalDist(rndEng));
+ while(glm::abs(glm::dot(x, y)) < myEpsilon<double>())
+ {
+ y = glm::dvec3(normalDist(rndEng), normalDist(rndEng), normalDist(rndEng));
+ while(l < myEpsilon<double>())
+ y = glm::dvec3(normalDist(rndEng), normalDist(rndEng), normalDist(rndEng));
+ }
+ y = glm::normalize(y);
+ glm::dvec3 z = glm::normalize(glm::cross(x, y));
+ y = glm::normalize(glm::cross(z, x));
+
+ // generate input point data
+ std::vector<glm::dvec3> ptData;
+ static const int pattern[] = {
+ 8, 0, 0,
+ 4, 1, 2,
+ 0, 2, 0,
+ 0, 0, 4
+ };
+ glm::dvec3 offset(normalDist(rndEng), normalDist(rndEng), normalDist(rndEng));
+ for(int p = 0; p < 4; ++p)
+ for(int xs = 1; xs >= -1; xs -= 2)
+ for(int ys = 1; ys >= -1; ys -= 2)
+ for(int zs = 1; zs >= -1; zs -= 2)
+ ptData.push_back(
+ offset
+ + x * static_cast<double>(pattern[p * 3 + 0] * xs)
+ + y * static_cast<double>(pattern[p * 3 + 1] * ys)
+ + z * static_cast<double>(pattern[p * 3 + 2] * zs));
+
+ // perform PCA:
+ glm::dvec3 center = computeCenter(ptData);
+ glm::dmat3 covarMat = glm::computeCovarianceMatrix(ptData.data(), ptData.size(), center);
+ glm::dvec3 evals;
+ glm::dmat3 evecs;
+ int evcnt = glm::findEigenvaluesSymReal(covarMat, evals, evecs);
+ if(evcnt != 3)
+ return failReport(__LINE__);
+ glm::sortEigenvalues(evals, evecs);
+
+ if (!sameSign(evecs[0][0], x[0])) evecs[0] = -evecs[0];
+ if(!vectorEpsilonEqual(x, evecs[0], myEpsilon<double>()))
+ return failReport(__LINE__);
+ if (!sameSign(evecs[2][0], y[0])) evecs[2] = -evecs[2];
+ if (!vectorEpsilonEqual(y, evecs[2], myEpsilon<double>()))
+ return failReport(__LINE__);
+ if (!sameSign(evecs[1][0], z[0])) evecs[1] = -evecs[1];
+ if (!vectorEpsilonEqual(z, evecs[1], myEpsilon<double>()))
+ return failReport(__LINE__);
+
+ return 0;
+}
+#endif // GLM_HAS_CXX11_STL == 1
+
+int main()
+{
+ int error(0);
+
+ // A small smoke test to fail early with most problems
+ if(smokeTest())
+ return failReport(__LINE__);
+
+ // test sorting utility.
+ if(testEigenvalueSort<2, float, glm::defaultp>() != 0)
+ error = failReport(__LINE__);
+ if(testEigenvalueSort<2, double, glm::defaultp>() != 0)
+ error = failReport(__LINE__);
+ if(testEigenvalueSort<3, float, glm::defaultp>() != 0)
+ error = failReport(__LINE__);
+ if(testEigenvalueSort<3, double, glm::defaultp>() != 0)
+ error = failReport(__LINE__);
+ if(testEigenvalueSort<4, float, glm::defaultp>() != 0)
+ error = failReport(__LINE__);
+ if(testEigenvalueSort<4, double, glm::defaultp>() != 0)
+ error = failReport(__LINE__);
+ if (error != 0)
+ return error;
+
+ // Note: the random engine uses a fixed seed to create consistent and reproducible test data
+ // test covariance matrix computation from different data sources
+ if(testCovar<2, float, glm::defaultp>(100, 12345) != 0)
+ error = failReport(__LINE__);
+ if(testCovar<2, double, glm::defaultp>(100, 42) != 0)
+ error = failReport(__LINE__);
+ if(testCovar<3, float, glm::defaultp>(100, 2021) != 0)
+ error = failReport(__LINE__);
+ if(testCovar<3, double, glm::defaultp>(100, 815) != 0)
+ error = failReport(__LINE__);
+ if(testCovar<4, float, glm::defaultp>(100, 3141) != 0)
+ error = failReport(__LINE__);
+ if(testCovar<4, double, glm::defaultp>(100, 174) != 0)
+ error = failReport(__LINE__);
+ if (error != 0)
+ return error;
+
+ // test PCA eigen vector reconstruction
+ // Expected epsilon precision evaluated separately:
+ // https://github.com/sgrottel/exp-pca-precision
+ if(testEigenvectors<2, float, glm::defaultp>(0.002f) != 0)
+ error = failReport(__LINE__);
+ if(testEigenvectors<2, double, glm::defaultp>(0.00000000001) != 0)
+ error = failReport(__LINE__);
+ if(testEigenvectors<3, float, glm::defaultp>(0.00001f) != 0)
+ error = failReport(__LINE__);
+ if(testEigenvectors<3, double, glm::defaultp>(0.0000000001) != 0)
+ error = failReport(__LINE__);
+ if(testEigenvectors<4, float, glm::defaultp>(0.0001f) != 0)
+ error = failReport(__LINE__);
+ if(testEigenvectors<4, double, glm::defaultp>(0.0000001) != 0)
+ error = failReport(__LINE__);
+ if(error != 0)
+ return error;
+
+ // Final tests with randomized data
+#if GLM_HAS_CXX11_STL == 1
+ if(rndTest(12345) != 0)
+ error = failReport(__LINE__);
+ if(rndTest(42) != 0)
+ error = failReport(__LINE__);
+ if (error != 0)
+ return error;
+#endif // GLM_HAS_CXX11_STL == 1
+
+ return error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_perpendicular.cpp b/3rdparty/glm/source/test/gtx/gtx_perpendicular.cpp
new file mode 100644
index 0000000..d14cfee
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_perpendicular.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/perpendicular.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_polar_coordinates.cpp b/3rdparty/glm/source/test/gtx/gtx_polar_coordinates.cpp
new file mode 100644
index 0000000..da2fe53
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_polar_coordinates.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/polar_coordinates.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_projection.cpp b/3rdparty/glm/source/test/gtx/gtx_projection.cpp
new file mode 100644
index 0000000..8f9f772
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_projection.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/projection.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_quaternion.cpp b/3rdparty/glm/source/test/gtx/gtx_quaternion.cpp
new file mode 100644
index 0000000..80cbbac
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_quaternion.cpp
@@ -0,0 +1,107 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/epsilon.hpp>
+#include <glm/gtc/type_ptr.hpp>
+#include <glm/gtc/matrix_transform.hpp>
+#include <glm/gtx/transform.hpp>
+#include <glm/gtx/quaternion.hpp>
+#include <glm/gtx/compatibility.hpp>
+#include <glm/ext.hpp>
+
+int test_quat_fastMix()
+{
+ int Error = 0;
+
+ glm::quat A = glm::angleAxis(0.0f, glm::vec3(0, 0, 1));
+ glm::quat B = glm::angleAxis(glm::pi<float>() * 0.5f, glm::vec3(0, 0, 1));
+ glm::quat C = glm::fastMix(A, B, 0.5f);
+ glm::quat D = glm::angleAxis(glm::pi<float>() * 0.25f, glm::vec3(0, 0, 1));
+
+ Error += glm::epsilonEqual(C.x, D.x, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(C.y, D.y, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(C.z, D.z, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(C.w, D.w, 0.01f) ? 0 : 1;
+
+ return Error;
+}
+
+int test_quat_shortMix()
+{
+ int Error(0);
+
+ glm::quat A = glm::angleAxis(0.0f, glm::vec3(0, 0, 1));
+ glm::quat B = glm::angleAxis(glm::pi<float>() * 0.5f, glm::vec3(0, 0, 1));
+ glm::quat C = glm::shortMix(A, B, 0.5f);
+ glm::quat D = glm::angleAxis(glm::pi<float>() * 0.25f, glm::vec3(0, 0, 1));
+
+ Error += glm::epsilonEqual(C.x, D.x, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(C.y, D.y, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(C.z, D.z, 0.01f) ? 0 : 1;
+ Error += glm::epsilonEqual(C.w, D.w, 0.01f) ? 0 : 1;
+
+ return Error;
+}
+
+int test_orientation()
+{
+ int Error = 0;
+
+ {
+ glm::quat q(1.0f, 0.0f, 0.0f, 1.0f);
+ float p = glm::roll(q);
+ Error += glm::epsilonEqual(p, glm::pi<float>() * 0.5f, 0.0001f) ? 0 : 1;
+ }
+
+ {
+ glm::quat q(1.0f, 0.0f, 0.0f, 1.0f);
+ float p = glm::pitch(q);
+ Error += glm::epsilonEqual(p, 0.f, 0.0001f) ? 0 : 1;
+ }
+
+ {
+ glm::quat q(1.0f, 0.0f, 0.0f, 1.0f);
+ float p = glm::yaw(q);
+ Error += glm::epsilonEqual(p, 0.f, 0.0001f) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_rotation()
+{
+ int Error(0);
+
+ glm::vec3 v(1, 0, 0);
+ glm::vec3 u(0, 1, 0);
+
+ glm::quat Rotation = glm::rotation(v, u);
+
+ float Angle = glm::angle(Rotation);
+
+ Error += glm::abs(Angle - glm::pi<float>() * 0.5f) < glm::epsilon<float>() ? 0 : 1;
+
+ return Error;
+}
+
+int test_log()
+{
+ int Error(0);
+
+ glm::quat q;
+ glm::quat p = glm::log(q);
+ glm::quat r = glm::exp(p);
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_log();
+ Error += test_rotation();
+ Error += test_orientation();
+ Error += test_quat_fastMix();
+ Error += test_quat_shortMix();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_random.cpp b/3rdparty/glm/source/test/gtx/gtx_random.cpp
new file mode 100644
index 0000000..e562c31
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_random.cpp
@@ -0,0 +1,99 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// OpenGL Mathematics Copyright (c) 2005 - 2012 G-Truc Creation (www.g-truc.net)
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Created : 2011-05-31
+// Updated : 2011-05-31
+// Licence : This source is under MIT licence
+// File : test/gtx/random.cpp
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include <glm/glm.hpp>
+#include <glm/gtx/random.hpp>
+#include <glm/gtx/epsilon.hpp>
+#include <iostream>
+
+int test_signedRand1()
+{
+ int Error = 0;
+
+ {
+ float ResultFloat = 0.0f;
+ double ResultDouble = 0.0f;
+ for(std::size_t i = 0; i < 100000; ++i)
+ {
+ ResultFloat += glm::signedRand1<float>();
+ ResultDouble += glm::signedRand1<double>();
+ }
+
+ Error += glm::equalEpsilon(ResultFloat, 0.0f, 0.0001f);
+ Error += glm::equalEpsilon(ResultDouble, 0.0, 0.0001);
+ }
+
+ return Error;
+}
+
+int test_normalizedRand2()
+{
+ int Error = 0;
+
+ {
+ std::size_t Max = 100000;
+ float ResultFloat = 0.0f;
+ double ResultDouble = 0.0f;
+ for(std::size_t i = 0; i < Max; ++i)
+ {
+ ResultFloat += glm::length(glm::normalizedRand2<float>());
+ ResultDouble += glm::length(glm::normalizedRand2<double>());
+ }
+
+ Error += glm::equalEpsilon(ResultFloat, float(Max), 0.000001f) ? 0 : 1;
+ Error += glm::equalEpsilon(ResultDouble, double(Max), 0.000001) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int test_normalizedRand3()
+{
+ int Error = 0;
+
+ {
+ std::size_t Max = 100000;
+ float ResultFloatA = 0.0f;
+ float ResultFloatB = 0.0f;
+ float ResultFloatC = 0.0f;
+ double ResultDoubleA = 0.0f;
+ double ResultDoubleB = 0.0f;
+ double ResultDoubleC = 0.0f;
+ for(std::size_t i = 0; i < Max; ++i)
+ {
+ ResultFloatA += glm::length(glm::normalizedRand3<float>());
+ ResultDoubleA += glm::length(glm::normalizedRand3<double>());
+ ResultFloatB += glm::length(glm::normalizedRand3(2.0f, 2.0f));
+ ResultDoubleB += glm::length(glm::normalizedRand3(2.0, 2.0));
+ ResultFloatC += glm::length(glm::normalizedRand3(1.0f, 3.0f));
+ ResultDoubleC += glm::length(glm::normalizedRand3(1.0, 3.0));
+ }
+
+ Error += glm::equalEpsilon(ResultFloatA, float(Max), 0.0001f) ? 0 : 1;
+ Error += glm::equalEpsilon(ResultDoubleA, double(Max), 0.0001) ? 0 : 1;
+ Error += glm::equalEpsilon(ResultFloatB, float(Max * 2), 0.0001f) ? 0 : 1;
+ Error += glm::equalEpsilon(ResultDoubleB, double(Max * 2), 0.0001) ? 0 : 1;
+ Error += (ResultFloatC >= float(Max) && ResultFloatC <= float(Max * 3)) ? 0 : 1;
+ Error += (ResultDoubleC >= double(Max) && ResultDoubleC <= double(Max * 3)) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_signedRand1();
+ Error += test_normalizedRand2();
+ Error += test_normalizedRand3();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_range.cpp b/3rdparty/glm/source/test/gtx/gtx_range.cpp
new file mode 100644
index 0000000..434731b
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_range.cpp
@@ -0,0 +1,83 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/glm.hpp>
+
+#if GLM_HAS_RANGE_FOR
+
+#include <glm/gtx/range.hpp>
+
+int test_vec()
+{
+ int Error = 0;
+
+ {
+ glm::ivec3 const v(1, 2, 3);
+
+ int count = 0;
+ glm::ivec3 Result(0);
+ for(int x : v)
+ {
+ Result[count] = x;
+ count++;
+ }
+ Error += count == 3 ? 0 : 1;
+ Error += v == Result ? 0 : 1;
+ }
+
+ {
+ glm::ivec3 v(1, 2, 3);
+ for(int& x : v)
+ x = 0;
+ Error += glm::all(glm::equal(v, glm::ivec3(0))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_mat()
+{
+ int Error = 0;
+
+ {
+ glm::mat4x3 m(1.0f);
+
+ int count = 0;
+ float Sum = 0.0f;
+ for(float x : m)
+ {
+ count++;
+ Sum += x;
+ }
+ Error += count == 12 ? 0 : 1;
+ Error += glm::equal(Sum, 3.0f, 0.001f) ? 0 : 1;
+ }
+
+ {
+ glm::mat4x3 m(1.0f);
+
+ for (float& x : m) { x = 0; }
+ glm::vec4 v(1, 1, 1, 1);
+ Error += glm::all(glm::equal(m*v, glm::vec3(0, 0, 0), glm::epsilon<float>())) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+ Error += test_vec();
+ Error += test_mat();
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif//GLM_HAS_RANGE_FOR
diff --git a/3rdparty/glm/source/test/gtx/gtx_rotate_normalized_axis.cpp b/3rdparty/glm/source/test/gtx/gtx_rotate_normalized_axis.cpp
new file mode 100644
index 0000000..d4eecdf
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_rotate_normalized_axis.cpp
@@ -0,0 +1,9 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/rotate_normalized_axis.hpp>
+
+int main()
+{
+ int Error(0);
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_rotate_vector.cpp b/3rdparty/glm/source/test/gtx/gtx_rotate_vector.cpp
new file mode 100644
index 0000000..becd63f
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_rotate_vector.cpp
@@ -0,0 +1,77 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/rotate_vector.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/vector_relational.hpp>
+
+int test_rotate()
+{
+ int Error = 0;
+
+ glm::vec2 A = glm::rotate(glm::vec2(1, 0), glm::pi<float>() * 0.5f);
+ glm::vec3 B = glm::rotate(glm::vec3(1, 0, 0), glm::pi<float>() * 0.5f, glm::vec3(0, 0, 1));
+ glm::vec4 C = glm::rotate(glm::vec4(1, 0, 0, 1), glm::pi<float>() * 0.5f, glm::vec3(0, 0, 1));
+ glm::vec3 D = glm::rotateX(glm::vec3(1, 0, 0), glm::pi<float>() * 0.5f);
+ glm::vec4 E = glm::rotateX(glm::vec4(1, 0, 0, 1), glm::pi<float>() * 0.5f);
+ glm::vec3 F = glm::rotateY(glm::vec3(1, 0, 0), glm::pi<float>() * 0.5f);
+ glm::vec4 G = glm::rotateY(glm::vec4(1, 0, 0, 1), glm::pi<float>() * 0.5f);
+ glm::vec3 H = glm::rotateZ(glm::vec3(1, 0, 0), glm::pi<float>() * 0.5f);
+ glm::vec4 I = glm::rotateZ(glm::vec4(1, 0, 0,1 ), glm::pi<float>() * 0.5f);
+ glm::mat4 O = glm::orientation(glm::normalize(glm::vec3(1)), glm::vec3(0, 0, 1));
+
+ return Error;
+}
+
+int test_rotateX()
+{
+ int Error = 0;
+
+ glm::vec3 D = glm::rotateX(glm::vec3(1, 0, 0), glm::pi<float>() * 0.5f);
+ glm::vec4 E = glm::rotateX(glm::vec4(1, 0, 0, 1), glm::pi<float>() * 0.5f);
+
+ return Error;
+}
+
+int test_rotateY()
+{
+ int Error = 0;
+
+ glm::vec3 F = glm::rotateY(glm::vec3(1, 0, 0), glm::pi<float>() * 0.5f);
+ glm::vec4 G = glm::rotateY(glm::vec4(1, 0, 0, 1), glm::pi<float>() * 0.5f);
+
+ return Error;
+}
+
+
+int test_rotateZ()
+{
+ int Error = 0;
+
+ glm::vec3 H = glm::rotateZ(glm::vec3(1, 0, 0), glm::pi<float>() * 0.5f);
+ glm::vec4 I = glm::rotateZ(glm::vec4(1, 0, 0,1 ), glm::pi<float>() * 0.5f);
+
+ return Error;
+}
+
+int test_orientation()
+{
+ int Error = 0;
+
+ glm::mat4 O = glm::orientation(glm::normalize(glm::vec3(1)), glm::vec3(0, 0, 1));
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_rotate();
+ Error += test_rotateX();
+ Error += test_rotateY();
+ Error += test_rotateZ();
+ Error += test_orientation();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_scalar_multiplication.cpp b/3rdparty/glm/source/test/gtx/gtx_scalar_multiplication.cpp
new file mode 100644
index 0000000..4aa96d6
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_scalar_multiplication.cpp
@@ -0,0 +1,37 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/glm.hpp>
+
+#if GLM_HAS_TEMPLATE_ALIASES && !(GLM_COMPILER & GLM_COMPILER_GCC)
+#include <glm/gtx/scalar_multiplication.hpp>
+
+int main()
+{
+ int Error(0);
+ glm::vec3 v(0.5, 3.1, -9.1);
+
+ Error += glm::all(glm::equal(v, 1.0 * v, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v, 1 * v, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(v, 1u * v, glm::epsilon<float>())) ? 0 : 1;
+
+ glm::mat3 m(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ glm::vec3 w = 0.5f * m * v;
+
+ Error += glm::all(glm::equal((m*v)/2, w, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(m*(v/2), w, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal((m/2)*v, w, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal((0.5*m)*v, w, glm::epsilon<float>())) ? 0 : 1;
+ Error += glm::all(glm::equal(0.5*(m*v), w, glm::epsilon<float>())) ? 0 : 1;
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/test/gtx/gtx_scalar_relational.cpp b/3rdparty/glm/source/test/gtx/gtx_scalar_relational.cpp
new file mode 100644
index 0000000..fc6a09a
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_scalar_relational.cpp
@@ -0,0 +1,174 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/scalar_relational.hpp>
+#include <glm/gtc/constants.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/glm.hpp>
+
+static int test_lessThan()
+{
+ int Error = 0;
+
+ Error += glm::lessThan(0, 1) ? 0 : 1;
+ Error += glm::lessThan(1, 0) ? 1 : 0;
+ Error += glm::lessThan(0, 0) ? 1 : 0;
+ Error += glm::lessThan(1, 1) ? 1 : 0;
+ Error += glm::lessThan(0.0f, 1.0f) ? 0 : 1;
+ Error += glm::lessThan(1.0f, 0.0f) ? 1 : 0;
+ Error += glm::lessThan(0.0f, 0.0f) ? 1 : 0;
+ Error += glm::lessThan(1.0f, 1.0f) ? 1 : 0;
+ Error += glm::lessThan(0.0, 1.0) ? 0 : 1;
+ Error += glm::lessThan(1.0, 0.0) ? 1 : 0;
+ Error += glm::lessThan(0.0, 0.0) ? 1 : 0;
+ Error += glm::lessThan(1.0, 1.0) ? 1 : 0;
+
+ return Error;
+}
+
+static int test_lessThanEqual()
+{
+ int Error = 0;
+
+ Error += glm::lessThanEqual(0, 1) ? 0 : 1;
+ Error += glm::lessThanEqual(1, 0) ? 1 : 0;
+ Error += glm::lessThanEqual(0, 0) ? 0 : 1;
+ Error += glm::lessThanEqual(1, 1) ? 0 : 1;
+ Error += glm::lessThanEqual(0.0f, 1.0f) ? 0 : 1;
+ Error += glm::lessThanEqual(1.0f, 0.0f) ? 1 : 0;
+ Error += glm::lessThanEqual(0.0f, 0.0f) ? 0 : 1;
+ Error += glm::lessThanEqual(1.0f, 1.0f) ? 0 : 1;
+ Error += glm::lessThanEqual(0.0, 1.0) ? 0 : 1;
+ Error += glm::lessThanEqual(1.0, 0.0) ? 1 : 0;
+ Error += glm::lessThanEqual(0.0, 0.0) ? 0 : 1;
+ Error += glm::lessThanEqual(1.0, 1.0) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_greaterThan()
+{
+ int Error = 0;
+
+ Error += glm::greaterThan(0, 1) ? 1 : 0;
+ Error += glm::greaterThan(1, 0) ? 0 : 1;
+ Error += glm::greaterThan(0, 0) ? 1 : 0;
+ Error += glm::greaterThan(1, 1) ? 1 : 0;
+ Error += glm::greaterThan(0.0f, 1.0f) ? 1 : 0;
+ Error += glm::greaterThan(1.0f, 0.0f) ? 0 : 1;
+ Error += glm::greaterThan(0.0f, 0.0f) ? 1 : 0;
+ Error += glm::greaterThan(1.0f, 1.0f) ? 1 : 0;
+ Error += glm::greaterThan(0.0, 1.0) ? 1 : 0;
+ Error += glm::greaterThan(1.0, 0.0) ? 0 : 1;
+ Error += glm::greaterThan(0.0, 0.0) ? 1 : 0;
+ Error += glm::greaterThan(1.0, 1.0) ? 1 : 0;
+
+ return Error;
+}
+
+static int test_greaterThanEqual()
+{
+ int Error = 0;
+
+ Error += glm::greaterThanEqual(0, 1) ? 1 : 0;
+ Error += glm::greaterThanEqual(1, 0) ? 0 : 1;
+ Error += glm::greaterThanEqual(0, 0) ? 0 : 1;
+ Error += glm::greaterThanEqual(1, 1) ? 0 : 1;
+ Error += glm::greaterThanEqual(0.0f, 1.0f) ? 1 : 0;
+ Error += glm::greaterThanEqual(1.0f, 0.0f) ? 0 : 1;
+ Error += glm::greaterThanEqual(0.0f, 0.0f) ? 0 : 1;
+ Error += glm::greaterThanEqual(1.0f, 1.0f) ? 0 : 1;
+ Error += glm::greaterThanEqual(0.0, 1.0) ? 1 : 0;
+ Error += glm::greaterThanEqual(1.0, 0.0) ? 0 : 1;
+ Error += glm::greaterThanEqual(0.0, 0.0) ? 0 : 1;
+ Error += glm::greaterThanEqual(1.0, 1.0) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_equal()
+{
+ int Error = 0;
+
+ Error += glm::equal(0, 1) ? 1 : 0;
+ Error += glm::equal(1, 0) ? 1 : 0;
+ Error += glm::equal(0, 0) ? 0 : 1;
+ Error += glm::equal(1, 1) ? 0 : 1;
+ Error += glm::equal(0.0f, 1.0f, glm::epsilon<float>()) ? 1 : 0;
+ Error += glm::equal(1.0f, 0.0f, glm::epsilon<float>()) ? 1 : 0;
+ Error += glm::equal(0.0f, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(1.0f, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::equal(0.0, 1.0, glm::epsilon<double>()) ? 1 : 0;
+ Error += glm::equal(1.0, 0.0, glm::epsilon<double>()) ? 1 : 0;
+ Error += glm::equal(0.0, 0.0, glm::epsilon<double>()) ? 0 : 1;
+ Error += glm::equal(1.0, 1.0, glm::epsilon<double>()) ? 0 : 1;
+
+ return Error;
+}
+
+static int test_notEqual()
+{
+ int Error = 0;
+
+ Error += glm::notEqual(0, 1) ? 0 : 1;
+ Error += glm::notEqual(1, 0) ? 0 : 1;
+ Error += glm::notEqual(0, 0) ? 1 : 0;
+ Error += glm::notEqual(1, 1) ? 1 : 0;
+ Error += glm::notEqual(0.0f, 1.0f, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::notEqual(1.0f, 0.0f, glm::epsilon<float>()) ? 0 : 1;
+ Error += glm::notEqual(0.0f, 0.0f, glm::epsilon<float>()) ? 1 : 0;
+ Error += glm::notEqual(1.0f, 1.0f, glm::epsilon<float>()) ? 1 : 0;
+ Error += glm::notEqual(0.0, 1.0, glm::epsilon<double>()) ? 0 : 1;
+ Error += glm::notEqual(1.0, 0.0, glm::epsilon<double>()) ? 0 : 1;
+ Error += glm::notEqual(0.0, 0.0, glm::epsilon<double>()) ? 1 : 0;
+ Error += glm::notEqual(1.0, 1.0, glm::epsilon<double>()) ? 1 : 0;
+
+ return Error;
+}
+
+static int test_any()
+{
+ int Error = 0;
+
+ Error += glm::any(true) ? 0 : 1;
+ Error += glm::any(false) ? 1 : 0;
+
+ return Error;
+}
+
+static int test_all()
+{
+ int Error = 0;
+
+ Error += glm::all(true) ? 0 : 1;
+ Error += glm::all(false) ? 1 : 0;
+
+ return Error;
+}
+
+static int test_not()
+{
+ int Error = 0;
+
+ Error += glm::not_(true) ? 1 : 0;
+ Error += glm::not_(false) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_lessThan();
+ Error += test_lessThanEqual();
+ Error += test_greaterThan();
+ Error += test_greaterThanEqual();
+ Error += test_equal();
+ Error += test_notEqual();
+ Error += test_any();
+ Error += test_all();
+ Error += test_not();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_simd_mat4.cpp b/3rdparty/glm/source/test/gtx/gtx_simd_mat4.cpp
new file mode 100644
index 0000000..28d7ec5
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_simd_mat4.cpp
@@ -0,0 +1,324 @@
+///////////////////////////////////////////////////////////////////////////////////
+/// OpenGL Mathematics (glm.g-truc.net)
+///
+/// Copyright (c) 2005 - 2012 G-Truc Creation (www.g-truc.net)
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to deal
+/// in the Software without restriction, including without limitation the rights
+/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+/// copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in
+/// all copies or substantial portions of the Software.
+///
+/// Restrictions:
+/// By making use of the Software for military purposes, you choose to make
+/// a Bunny unhappy.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+/// THE SOFTWARE.
+///
+/// @file test/gtx/gtx_simd_mat4.cpp
+/// @date 2010-09-16 / 2014-11-25
+/// @author Christophe Riccio
+///////////////////////////////////////////////////////////////////////////////////
+
+#include <glm/glm.hpp>
+#include <glm/gtc/matrix_transform.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/gtc/random.hpp>
+#include <glm/gtx/simd_vec4.hpp>
+#include <glm/gtx/simd_mat4.hpp>
+#include <cstdio>
+#include <ctime>
+#include <vector>
+
+#if(GLM_ARCH != GLM_ARCH_PURE)
+
+std::vector<float> test_detA(std::vector<glm::mat4> const & Data)
+{
+ std::vector<float> Test(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Test.size() - 1; ++i)
+ Test[i] = glm::determinant(Data[i]);
+
+ std::clock_t TimeEnd = clock();
+ printf("Det A: %ld\n", TimeEnd - TimeStart);
+
+ return Test;
+}
+
+std::vector<float> test_detB(std::vector<glm::mat4> const & Data)
+{
+ std::vector<float> Test(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Test.size() - 1; ++i)
+ {
+ _mm_prefetch((char*)&Data[i + 1], _MM_HINT_T0);
+ glm::simdMat4 m(Data[i]);
+ glm::simdVec4 d(glm::detail::sse_slow_det_ps((__m128 const * const)&m));
+ glm::vec4 v;//(d);
+ Test[i] = v.x;
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("Det B: %ld\n", TimeEnd - TimeStart);
+
+ return Test;
+}
+
+std::vector<float> test_detC(std::vector<glm::mat4> const & Data)
+{
+ std::vector<float> Test(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Test.size() - 1; ++i)
+ {
+ _mm_prefetch((char*)&Data[i + 1], _MM_HINT_T0);
+ glm::simdMat4 m(Data[i]);
+ glm::simdVec4 d(glm::detail::sse_det_ps((__m128 const * const)&m));
+ glm::vec4 v;//(d);
+ Test[i] = v.x;
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("Det C: %ld\n", TimeEnd - TimeStart);
+
+ return Test;
+}
+
+std::vector<float> test_detD(std::vector<glm::mat4> const & Data)
+{
+ std::vector<float> Test(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Test.size() - 1; ++i)
+ {
+ _mm_prefetch((char*)&Data[i + 1], _MM_HINT_T0);
+ glm::simdMat4 m(Data[i]);
+ glm::simdVec4 d(glm::detail::sse_detd_ps((__m128 const * const)&m));
+ glm::vec4 v;//(d);
+ Test[i] = v.x;
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("Det D: %ld\n", TimeEnd - TimeStart);
+
+ return Test;
+}
+
+void test_invA(std::vector<glm::mat4> const & Data, std::vector<glm::mat4> & Out)
+{
+ //std::vector<float> Test(Data.size());
+ Out.resize(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Out.size() - 1; ++i)
+ {
+ Out[i] = glm::inverse(Data[i]);
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("Inv A: %ld\n", TimeEnd - TimeStart);
+}
+
+void test_invC(std::vector<glm::mat4> const & Data, std::vector<glm::mat4> & Out)
+{
+ //std::vector<float> Test(Data.size());
+ Out.resize(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Out.size() - 1; ++i)
+ {
+ _mm_prefetch((char*)&Data[i + 1], _MM_HINT_T0);
+ glm::simdMat4 m(Data[i]);
+ glm::simdMat4 o;
+ glm::detail::sse_inverse_fast_ps((__m128 const * const)&m, (__m128 *)&o);
+ Out[i] = *(glm::mat4*)&o;
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("Inv C: %ld\n", TimeEnd - TimeStart);
+}
+
+void test_invD(std::vector<glm::mat4> const & Data, std::vector<glm::mat4> & Out)
+{
+ //std::vector<float> Test(Data.size());
+ Out.resize(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Out.size() - 1; ++i)
+ {
+ _mm_prefetch((char*)&Data[i + 1], _MM_HINT_T0);
+ glm::simdMat4 m(Data[i]);
+ glm::simdMat4 o;
+ glm::detail::sse_inverse_ps((__m128 const * const)&m, (__m128 *)&o);
+ Out[i] = *(glm::mat4*)&o;
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("Inv D: %ld\n", TimeEnd - TimeStart);
+}
+
+void test_mulA(std::vector<glm::mat4> const & Data, std::vector<glm::mat4> & Out)
+{
+ //std::vector<float> Test(Data.size());
+ Out.resize(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Out.size() - 1; ++i)
+ {
+ Out[i] = Data[i] * Data[i];
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("Mul A: %ld\n", TimeEnd - TimeStart);
+}
+
+void test_mulD(std::vector<glm::mat4> const & Data, std::vector<glm::mat4> & Out)
+{
+ //std::vector<float> Test(Data.size());
+ Out.resize(Data.size());
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t i = 0; i < Out.size() - 1; ++i)
+ {
+ _mm_prefetch((char*)&Data[i + 1], _MM_HINT_T0);
+ glm::simdMat4 m(Data[i]);
+ glm::simdMat4 o;
+ glm::detail::sse_mul_ps((__m128 const * const)&m, (__m128 const * const)&m, (__m128*)&o);
+ Out[i] = *(glm::mat4*)&o;
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("Mul D: %ld\n", TimeEnd - TimeStart);
+}
+
+int test_compute_glm()
+{
+ return 0;
+}
+
+int test_compute_gtx()
+{
+ std::vector<glm::vec4> Output(1000000);
+
+ std::clock_t TimeStart = clock();
+
+ for(std::size_t k = 0; k < Output.size(); ++k)
+ {
+ float i = float(k) / 1000.f + 0.001f;
+ glm::vec3 A = glm::normalize(glm::vec3(i));
+ glm::vec3 B = glm::cross(A, glm::normalize(glm::vec3(1, 1, 2)));
+ glm::mat4 C = glm::rotate(glm::mat4(1.0f), i, B);
+ glm::mat4 D = glm::scale(C, glm::vec3(0.8f, 1.0f, 1.2f));
+ glm::mat4 E = glm::translate(D, glm::vec3(1.4f, 1.2f, 1.1f));
+ glm::mat4 F = glm::perspective(i, 1.5f, 0.1f, 1000.f);
+ glm::mat4 G = glm::inverse(F * E);
+ glm::vec3 H = glm::unProject(glm::vec3(i), G, F, E[3]);
+ glm::vec3 I = glm::any(glm::isnan(glm::project(H, G, F, E[3]))) ? glm::vec3(2) : glm::vec3(1);
+ glm::mat4 J = glm::lookAt(glm::normalize(glm::max(B, glm::vec3(0.001f))), H, I);
+ glm::mat4 K = glm::transpose(J);
+ glm::quat L = glm::normalize(glm::quat_cast(K));
+ glm::vec4 M = L * glm::smoothstep(K[3], J[3], glm::vec4(i));
+ glm::mat4 N = glm::mat4(glm::normalize(glm::max(M, glm::vec4(0.001f))), K[3], J[3], glm::vec4(i));
+ glm::mat4 O = N * glm::inverse(N);
+ glm::vec4 P = O * glm::reflect(N[3], glm::vec4(A, 1.0f));
+ glm::vec4 Q = glm::vec4(glm::dot(M, P));
+ glm::vec4 R = glm::quat(Q.w, glm::vec3(Q)) * P;
+ Output[k] = R;
+ }
+
+ std::clock_t TimeEnd = clock();
+ printf("test_compute_gtx: %ld\n", TimeEnd - TimeStart);
+
+ return 0;
+}
+
+int main()
+{
+ int Error = 0;
+
+ std::vector<glm::mat4> Data(64 * 64 * 1);
+ for(std::size_t i = 0; i < Data.size(); ++i)
+ Data[i] = glm::mat4(
+ glm::vec4(glm::linearRand(glm::vec4(-2.0f), glm::vec4(2.0f))),
+ glm::vec4(glm::linearRand(glm::vec4(-2.0f), glm::vec4(2.0f))),
+ glm::vec4(glm::linearRand(glm::vec4(-2.0f), glm::vec4(2.0f))),
+ glm::vec4(glm::linearRand(glm::vec4(-2.0f), glm::vec4(2.0f))));
+
+ {
+ std::vector<glm::mat4> TestInvA;
+ test_invA(Data, TestInvA);
+ }
+ {
+ std::vector<glm::mat4> TestInvC;
+ test_invC(Data, TestInvC);
+ }
+ {
+ std::vector<glm::mat4> TestInvD;
+ test_invD(Data, TestInvD);
+ }
+
+ {
+ std::vector<glm::mat4> TestA;
+ test_mulA(Data, TestA);
+ }
+ {
+ std::vector<glm::mat4> TestD;
+ test_mulD(Data, TestD);
+ }
+
+ {
+ std::vector<float> TestDetA = test_detA(Data);
+ std::vector<float> TestDetB = test_detB(Data);
+ std::vector<float> TestDetD = test_detD(Data);
+ std::vector<float> TestDetC = test_detC(Data);
+
+ for(std::size_t i = 0; i < TestDetA.size(); ++i)
+ if(TestDetA[i] != TestDetB[i] && TestDetC[i] != TestDetB[i] && TestDetC[i] != TestDetD[i])
+ return 1;
+ }
+
+ // shuffle test
+ glm::simdVec4 A(1.0f, 2.0f, 3.0f, 4.0f);
+ glm::simdVec4 B(5.0f, 6.0f, 7.0f, 8.0f);
+ //__m128 C = _mm_shuffle_ps(A.Data, B.Data, _MM_SHUFFLE(1, 0, 1, 0));
+
+ Error += test_compute_glm();
+ Error += test_compute_gtx();
+ float Det = glm::determinant(glm::simdMat4(1.0));
+ Error += Det == 1.0f ? 0 : 1;
+
+ glm::simdMat4 D = glm::matrixCompMult(glm::simdMat4(1.0), glm::simdMat4(1.0));
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
+#endif//(GLM_ARCH != GLM_ARCH_PURE)
diff --git a/3rdparty/glm/source/test/gtx/gtx_simd_vec4.cpp b/3rdparty/glm/source/test/gtx/gtx_simd_vec4.cpp
new file mode 100644
index 0000000..e71a60b
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_simd_vec4.cpp
@@ -0,0 +1,71 @@
+///////////////////////////////////////////////////////////////////////////////////
+/// OpenGL Mathematics (glm.g-truc.net)
+///
+/// Copyright (c) 2005 - 2012 G-Truc Creation (www.g-truc.net)
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to deal
+/// in the Software without restriction, including without limitation the rights
+/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+/// copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in
+/// all copies or substantial portions of the Software.
+///
+/// Restrictions:
+/// By making use of the Software for military purposes, you choose to make
+/// a Bunny unhappy.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+/// THE SOFTWARE.
+///
+/// @file test/gtx/gtx_simd_vec4.cpp
+/// @date 2010-09-16 / 2014-11-25
+/// @author Christophe Riccio
+///////////////////////////////////////////////////////////////////////////////////
+
+#include <glm/glm.hpp>
+#include <glm/gtx/simd_vec4.hpp>
+#include <cstdio>
+
+#if(GLM_ARCH != GLM_ARCH_PURE)
+
+int main()
+{
+ glm::simdVec4 A1(0.0f, 0.1f, 0.2f, 0.3f);
+ glm::simdVec4 B1(0.4f, 0.5f, 0.6f, 0.7f);
+ glm::simdVec4 C1 = A1 + B1;
+ glm::simdVec4 D1 = A1.swizzle<glm::X, glm::Z, glm::Y, glm::W>();
+ glm::simdVec4 E1(glm::vec4(1.0f));
+ glm::vec4 F1 = glm::vec4_cast(E1);
+ //glm::vec4 G1(E1);
+
+ //printf("A1(%2.3f, %2.3f, %2.3f, %2.3f)\n", A1.x, A1.y, A1.z, A1.w);
+ //printf("B1(%2.3f, %2.3f, %2.3f, %2.3f)\n", B1.x, B1.y, B1.z, B1.w);
+ //printf("C1(%2.3f, %2.3f, %2.3f, %2.3f)\n", C1.x, C1.y, C1.z, C1.w);
+ //printf("D1(%2.3f, %2.3f, %2.3f, %2.3f)\n", D1.x, D1.y, D1.z, D1.w);
+
+ __m128 value = _mm_set1_ps(0.0f);
+ __m128 data = _mm_cmpeq_ps(value, value);
+ __m128 add0 = _mm_add_ps(data, data);
+
+ glm::simdVec4 GNI(add0);
+
+ return 0;
+}
+
+#else
+
+int main()
+{
+ int Error = 0;
+
+ return Error;
+}
+
+#endif//(GLM_ARCH != GLM_ARCH_PURE)
diff --git a/3rdparty/glm/source/test/gtx/gtx_spline.cpp b/3rdparty/glm/source/test/gtx/gtx_spline.cpp
new file mode 100644
index 0000000..c93ee17
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_spline.cpp
@@ -0,0 +1,100 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/vec2.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec4.hpp>
+#include <glm/gtx/spline.hpp>
+
+namespace catmullRom
+{
+ int test()
+ {
+ int Error(0);
+
+ glm::vec2 Result2 = glm::catmullRom(
+ glm::vec2(0.0f, 0.0f),
+ glm::vec2(1.0f, 0.0f),
+ glm::vec2(1.0f, 1.0f),
+ glm::vec2(0.0f, 1.0f), 0.5f);
+
+ glm::vec3 Result3 = glm::catmullRom(
+ glm::vec3(0.0f, 0.0f, 0.0f),
+ glm::vec3(1.0f, 0.0f, 0.0f),
+ glm::vec3(1.0f, 1.0f, 0.0f),
+ glm::vec3(0.0f, 1.0f, 0.0f), 0.5f);
+
+ glm::vec4 Result4 = glm::catmullRom(
+ glm::vec4(0.0f, 0.0f, 0.0f, 1.0f),
+ glm::vec4(1.0f, 0.0f, 0.0f, 1.0f),
+ glm::vec4(1.0f, 1.0f, 0.0f, 1.0f),
+ glm::vec4(0.0f, 1.0f, 0.0f, 1.0f), 0.5f);
+
+ return Error;
+ }
+}//catmullRom
+
+namespace hermite
+{
+ int test()
+ {
+ int Error(0);
+
+ glm::vec2 Result2 = glm::hermite(
+ glm::vec2(0.0f, 0.0f),
+ glm::vec2(1.0f, 0.0f),
+ glm::vec2(1.0f, 1.0f),
+ glm::vec2(0.0f, 1.0f), 0.5f);
+
+ glm::vec3 Result3 = glm::hermite(
+ glm::vec3(0.0f, 0.0f, 0.0f),
+ glm::vec3(1.0f, 0.0f, 0.0f),
+ glm::vec3(1.0f, 1.0f, 0.0f),
+ glm::vec3(0.0f, 1.0f, 0.0f), 0.5f);
+
+ glm::vec4 Result4 = glm::hermite(
+ glm::vec4(0.0f, 0.0f, 0.0f, 1.0f),
+ glm::vec4(1.0f, 0.0f, 0.0f, 1.0f),
+ glm::vec4(1.0f, 1.0f, 0.0f, 1.0f),
+ glm::vec4(0.0f, 1.0f, 0.0f, 1.0f), 0.5f);
+
+ return Error;
+ }
+}//catmullRom
+
+namespace cubic
+{
+ int test()
+ {
+ int Error(0);
+
+ glm::vec2 Result2 = glm::cubic(
+ glm::vec2(0.0f, 0.0f),
+ glm::vec2(1.0f, 0.0f),
+ glm::vec2(1.0f, 1.0f),
+ glm::vec2(0.0f, 1.0f), 0.5f);
+
+ glm::vec3 Result3 = glm::cubic(
+ glm::vec3(0.0f, 0.0f, 0.0f),
+ glm::vec3(1.0f, 0.0f, 0.0f),
+ glm::vec3(1.0f, 1.0f, 0.0f),
+ glm::vec3(0.0f, 1.0f, 0.0f), 0.5f);
+
+ glm::vec4 Result = glm::cubic(
+ glm::vec4(0.0f, 0.0f, 0.0f, 1.0f),
+ glm::vec4(1.0f, 0.0f, 0.0f, 1.0f),
+ glm::vec4(1.0f, 1.0f, 0.0f, 1.0f),
+ glm::vec4(0.0f, 1.0f, 0.0f, 1.0f), 0.5f);
+
+ return Error;
+ }
+}//catmullRom
+
+int main()
+{
+ int Error(0);
+
+ Error += catmullRom::test();
+ Error += hermite::test();
+ Error += cubic::test();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_string_cast.cpp b/3rdparty/glm/source/test/gtx/gtx_string_cast.cpp
new file mode 100644
index 0000000..b04c870
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_string_cast.cpp
@@ -0,0 +1,155 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/glm.hpp>
+#include <glm/gtx/string_cast.hpp>
+#include <limits>
+
+int test_string_cast_vector()
+{
+ int Error = 0;
+
+ {
+ glm::vec2 A1(1, 2);
+ std::string A2 = glm::to_string(A1);
+ Error += A2 != std::string("vec2(1.000000, 2.000000)") ? 1 : 0;
+
+ glm::vec3 B1(1, 2, 3);
+ std::string B2 = glm::to_string(B1);
+ Error += B2 != std::string("vec3(1.000000, 2.000000, 3.000000)") ? 1 : 0;
+
+ glm::vec4 C1(1, 2, 3, 4);
+ std::string C2 = glm::to_string(C1);
+ Error += C2 != std::string("vec4(1.000000, 2.000000, 3.000000, 4.000000)") ? 1 : 0;
+
+ glm::dvec2 J1(1, 2);
+ std::string J2 = glm::to_string(J1);
+ Error += J2 != std::string("dvec2(1.000000, 2.000000)") ? 1 : 0;
+
+ glm::dvec3 K1(1, 2, 3);
+ std::string K2 = glm::to_string(K1);
+ Error += K2 != std::string("dvec3(1.000000, 2.000000, 3.000000)") ? 1 : 0;
+
+ glm::dvec4 L1(1, 2, 3, 4);
+ std::string L2 = glm::to_string(L1);
+ Error += L2 != std::string("dvec4(1.000000, 2.000000, 3.000000, 4.000000)") ? 1 : 0;
+ }
+
+ {
+ glm::bvec2 M1(false, true);
+ std::string M2 = glm::to_string(M1);
+ Error += M2 != std::string("bvec2(false, true)") ? 1 : 0;
+
+ glm::bvec3 O1(false, true, false);
+ std::string O2 = glm::to_string(O1);
+ Error += O2 != std::string("bvec3(false, true, false)") ? 1 : 0;
+
+ glm::bvec4 P1(false, true, false, true);
+ std::string P2 = glm::to_string(P1);
+ Error += P2 != std::string("bvec4(false, true, false, true)") ? 1 : 0;
+ }
+
+ {
+ glm::ivec2 D1(1, 2);
+ std::string D2 = glm::to_string(D1);
+ Error += D2 != std::string("ivec2(1, 2)") ? 1 : 0;
+
+ glm::ivec3 E1(1, 2, 3);
+ std::string E2 = glm::to_string(E1);
+ Error += E2 != std::string("ivec3(1, 2, 3)") ? 1 : 0;
+
+ glm::ivec4 F1(1, 2, 3, 4);
+ std::string F2 = glm::to_string(F1);
+ Error += F2 != std::string("ivec4(1, 2, 3, 4)") ? 1 : 0;
+ }
+
+ {
+ glm::i8vec2 D1(1, 2);
+ std::string D2 = glm::to_string(D1);
+ Error += D2 != std::string("i8vec2(1, 2)") ? 1 : 0;
+
+ glm::i8vec3 E1(1, 2, 3);
+ std::string E2 = glm::to_string(E1);
+ Error += E2 != std::string("i8vec3(1, 2, 3)") ? 1 : 0;
+
+ glm::i8vec4 F1(1, 2, 3, 4);
+ std::string F2 = glm::to_string(F1);
+ Error += F2 != std::string("i8vec4(1, 2, 3, 4)") ? 1 : 0;
+ }
+
+ {
+ glm::i16vec2 D1(1, 2);
+ std::string D2 = glm::to_string(D1);
+ Error += D2 != std::string("i16vec2(1, 2)") ? 1 : 0;
+
+ glm::i16vec3 E1(1, 2, 3);
+ std::string E2 = glm::to_string(E1);
+ Error += E2 != std::string("i16vec3(1, 2, 3)") ? 1 : 0;
+
+ glm::i16vec4 F1(1, 2, 3, 4);
+ std::string F2 = glm::to_string(F1);
+ Error += F2 != std::string("i16vec4(1, 2, 3, 4)") ? 1 : 0;
+ }
+
+ {
+ glm::i64vec2 D1(1, 2);
+ std::string D2 = glm::to_string(D1);
+ Error += D2 != std::string("i64vec2(1, 2)") ? 1 : 0;
+
+ glm::i64vec3 E1(1, 2, 3);
+ std::string E2 = glm::to_string(E1);
+ Error += E2 != std::string("i64vec3(1, 2, 3)") ? 1 : 0;
+
+ glm::i64vec4 F1(1, 2, 3, 4);
+ std::string F2 = glm::to_string(F1);
+ Error += F2 != std::string("i64vec4(1, 2, 3, 4)") ? 1 : 0;
+ }
+
+ return Error;
+}
+
+int test_string_cast_matrix()
+{
+ int Error = 0;
+
+ glm::mat2x2 A1(1.000000, 2.000000, 3.000000, 4.000000);
+ std::string A2 = glm::to_string(A1);
+ Error += A2 != std::string("mat2x2((1.000000, 2.000000), (3.000000, 4.000000))") ? 1 : 0;
+
+ return Error;
+}
+
+int test_string_cast_quaternion()
+{
+ int Error = 0;
+
+ glm::quat Q0 = glm::quat(1.0f, 2.0f, 3.0f, 4.0f);
+ std::string S0 = glm::to_string(Q0);
+ Error += S0 != std::string("quat(1.000000, {2.000000, 3.000000, 4.000000})") ? 1 : 0;
+
+ return Error;
+
+}
+
+int test_string_cast_dual_quaternion()
+{
+ int Error = 0;
+
+ glm::dualquat Q0 = glm::dualquat(glm::quat(1.0f, 2.0f, 3.0f, 4.0f), glm::quat(5.0f, 6.0f, 7.0f, 8.0f));
+ std::string S0 = glm::to_string(Q0);
+ Error += S0 != std::string("dualquat((1.000000, {2.000000, 3.000000, 4.000000}), (5.000000, {6.000000, 7.000000, 8.000000}))") ? 1 : 0;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_string_cast_vector();
+ Error += test_string_cast_matrix();
+ Error += test_string_cast_quaternion();
+ Error += test_string_cast_dual_quaternion();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_texture.cpp b/3rdparty/glm/source/test/gtx/gtx_texture.cpp
new file mode 100644
index 0000000..0b98ed7
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_texture.cpp
@@ -0,0 +1,22 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/vec2.hpp>
+#include <glm/gtx/texture.hpp>
+
+int test_levels()
+{
+ int Error = 0;
+
+ int const Levels = glm::levels(glm::ivec2(3, 2));
+ Error += Levels == 2 ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error = 0;
+
+ Error += test_levels();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_type_aligned.cpp b/3rdparty/glm/source/test/gtx/gtx_type_aligned.cpp
new file mode 100644
index 0000000..8d045c0
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_type_aligned.cpp
@@ -0,0 +1,114 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/type_aligned.hpp>
+#include <cstdio>
+
+int test_decl()
+{
+ int Error(0);
+
+ {
+ struct S1
+ {
+ glm::aligned_vec4 B;
+ };
+
+ struct S2
+ {
+ glm::vec4 B;
+ };
+
+ std::printf("vec4 - Aligned: %d, unaligned: %d\n", static_cast<int>(sizeof(S1)), static_cast<int>(sizeof(S2)));
+
+ Error += sizeof(S1) >= sizeof(S2) ? 0 : 1;
+ }
+
+ {
+ struct S1
+ {
+ bool A;
+ glm::vec3 B;
+ };
+
+ struct S2
+ {
+ bool A;
+ glm::aligned_vec3 B;
+ };
+
+ std::printf("vec3 - Aligned: %d, unaligned: %d\n", static_cast<int>(sizeof(S1)), static_cast<int>(sizeof(S2)));
+
+ Error += sizeof(S1) <= sizeof(S2) ? 0 : 1;
+ }
+
+ {
+ struct S1
+ {
+ bool A;
+ glm::aligned_vec4 B;
+ };
+
+ struct S2
+ {
+ bool A;
+ glm::vec4 B;
+ };
+
+ std::printf("vec4 - Aligned: %d, unaligned: %d\n", static_cast<int>(sizeof(S1)), static_cast<int>(sizeof(S2)));
+
+ Error += sizeof(S1) >= sizeof(S2) ? 0 : 1;
+ }
+
+ {
+ struct S1
+ {
+ bool A;
+ glm::aligned_dvec4 B;
+ };
+
+ struct S2
+ {
+ bool A;
+ glm::dvec4 B;
+ };
+
+ std::printf("dvec4 - Aligned: %d, unaligned: %d\n", static_cast<int>(sizeof(S1)), static_cast<int>(sizeof(S2)));
+
+ Error += sizeof(S1) >= sizeof(S2) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template<typename genType>
+void print(genType const& Mat0)
+{
+ std::printf("mat4(\n");
+ std::printf("\tvec4(%2.9f, %2.9f, %2.9f, %2.9f)\n", static_cast<double>(Mat0[0][0]), static_cast<double>(Mat0[0][1]), static_cast<double>(Mat0[0][2]), static_cast<double>(Mat0[0][3]));
+ std::printf("\tvec4(%2.9f, %2.9f, %2.9f, %2.9f)\n", static_cast<double>(Mat0[1][0]), static_cast<double>(Mat0[1][1]), static_cast<double>(Mat0[1][2]), static_cast<double>(Mat0[1][3]));
+ std::printf("\tvec4(%2.9f, %2.9f, %2.9f, %2.9f)\n", static_cast<double>(Mat0[2][0]), static_cast<double>(Mat0[2][1]), static_cast<double>(Mat0[2][2]), static_cast<double>(Mat0[2][3]));
+ std::printf("\tvec4(%2.9f, %2.9f, %2.9f, %2.9f))\n\n", static_cast<double>(Mat0[3][0]), static_cast<double>(Mat0[3][1]), static_cast<double>(Mat0[3][2]), static_cast<double>(Mat0[3][3]));
+}
+
+int perf_mul()
+{
+ int Error = 0;
+
+ glm::mat4 A(1.0f);
+ glm::mat4 B(1.0f);
+
+ glm::mat4 C = A * B;
+
+ print(C);
+
+ return Error;
+}
+
+int main()
+{
+ int Error(0);
+
+ Error += test_decl();
+ Error += perf_mul();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/gtx/gtx_type_trait.cpp b/3rdparty/glm/source/test/gtx/gtx_type_trait.cpp
new file mode 100644
index 0000000..9b96a36
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_type_trait.cpp
@@ -0,0 +1,13 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/vec4.hpp>
+#include <glm/gtx/type_trait.hpp>
+
+int main()
+{
+ int Error = 0;
+
+
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_vec_swizzle.cpp b/3rdparty/glm/source/test/gtx/gtx_vec_swizzle.cpp
new file mode 100644
index 0000000..0b0c8b8
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_vec_swizzle.cpp
@@ -0,0 +1,11 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/vec_swizzle.hpp>
+
+int main()
+{
+ int Error = 0;
+
+
+ return Error;
+}
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_vector_angle.cpp b/3rdparty/glm/source/test/gtx/gtx_vector_angle.cpp
new file mode 100644
index 0000000..4e8172b
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_vector_angle.cpp
@@ -0,0 +1,59 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtc/constants.hpp>
+#include <glm/gtx/vector_angle.hpp>
+#include <limits>
+
+int test_angle()
+{
+ int Error = 0;
+
+ float AngleA = glm::angle(glm::vec2(1, 0), glm::normalize(glm::vec2(1, 1)));
+ Error += glm::epsilonEqual(AngleA, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ float AngleB = glm::angle(glm::vec3(1, 0, 0), glm::normalize(glm::vec3(1, 1, 0)));
+ Error += glm::epsilonEqual(AngleB, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ float AngleC = glm::angle(glm::vec4(1, 0, 0, 0), glm::normalize(glm::vec4(1, 1, 0, 0)));
+ Error += glm::epsilonEqual(AngleC, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+
+ return Error;
+}
+
+int test_orientedAngle_vec2()
+{
+ int Error = 0;
+
+ float AngleA = glm::orientedAngle(glm::vec2(1, 0), glm::normalize(glm::vec2(1, 1)));
+ Error += glm::epsilonEqual(AngleA, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ float AngleB = glm::orientedAngle(glm::vec2(0, 1), glm::normalize(glm::vec2(1, 1)));
+ Error += glm::epsilonEqual(AngleB, -glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ float AngleC = glm::orientedAngle(glm::normalize(glm::vec2(1, 1)), glm::vec2(0, 1));
+ Error += glm::epsilonEqual(AngleC, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+
+ return Error;
+}
+
+int test_orientedAngle_vec3()
+{
+ int Error = 0;
+
+ float AngleA = glm::orientedAngle(glm::vec3(1, 0, 0), glm::normalize(glm::vec3(1, 1, 0)), glm::vec3(0, 0, 1));
+ Error += glm::epsilonEqual(AngleA, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ float AngleB = glm::orientedAngle(glm::vec3(0, 1, 0), glm::normalize(glm::vec3(1, 1, 0)), glm::vec3(0, 0, 1));
+ Error += glm::epsilonEqual(AngleB, -glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+ float AngleC = glm::orientedAngle(glm::normalize(glm::vec3(1, 1, 0)), glm::vec3(0, 1, 0), glm::vec3(0, 0, 1));
+ Error += glm::epsilonEqual(AngleC, glm::pi<float>() * 0.25f, 0.01f) ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error(0);
+
+ Error += test_angle();
+ Error += test_orientedAngle_vec2();
+ Error += test_orientedAngle_vec3();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_vector_query.cpp b/3rdparty/glm/source/test/gtx/gtx_vector_query.cpp
new file mode 100644
index 0000000..729f9e1
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_vector_query.cpp
@@ -0,0 +1,82 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/vec2.hpp>
+#include <glm/vec3.hpp>
+#include <glm/vec4.hpp>
+#include <glm/gtx/vector_query.hpp>
+
+int test_areCollinear()
+{
+ int Error(0);
+
+ {
+ bool TestA = glm::areCollinear(glm::vec2(-1), glm::vec2(1), 0.00001f);
+ Error += TestA ? 0 : 1;
+ }
+
+ {
+ bool TestA = glm::areCollinear(glm::vec3(-1), glm::vec3(1), 0.00001f);
+ Error += TestA ? 0 : 1;
+ }
+
+ {
+ bool TestA = glm::areCollinear(glm::vec4(-1), glm::vec4(1), 0.00001f);
+ Error += TestA ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int test_areOrthogonal()
+{
+ int Error(0);
+
+ bool TestA = glm::areOrthogonal(glm::vec2(1, 0), glm::vec2(0, 1), 0.00001f);
+ Error += TestA ? 0 : 1;
+
+ return Error;
+}
+
+int test_isNormalized()
+{
+ int Error(0);
+
+ bool TestA = glm::isNormalized(glm::vec4(1, 0, 0, 0), 0.00001f);
+ Error += TestA ? 0 : 1;
+
+ return Error;
+}
+
+int test_isNull()
+{
+ int Error(0);
+
+ bool TestA = glm::isNull(glm::vec4(0), 0.00001f);
+ Error += TestA ? 0 : 1;
+
+ return Error;
+}
+
+int test_areOrthonormal()
+{
+ int Error(0);
+
+ bool TestA = glm::areOrthonormal(glm::vec2(1, 0), glm::vec2(0, 1), 0.00001f);
+ Error += TestA ? 0 : 1;
+
+ return Error;
+}
+
+int main()
+{
+ int Error(0);
+
+ Error += test_areCollinear();
+ Error += test_areOrthogonal();
+ Error += test_isNormalized();
+ Error += test_isNull();
+ Error += test_areOrthonormal();
+
+ return Error;
+}
+
+
diff --git a/3rdparty/glm/source/test/gtx/gtx_wrap.cpp b/3rdparty/glm/source/test/gtx/gtx_wrap.cpp
new file mode 100644
index 0000000..2354cc8
--- /dev/null
+++ b/3rdparty/glm/source/test/gtx/gtx_wrap.cpp
@@ -0,0 +1,191 @@
+#define GLM_ENABLE_EXPERIMENTAL
+#include <glm/gtx/wrap.hpp>
+#include <glm/ext/scalar_relational.hpp>
+#include <glm/ext/vector_relational.hpp>
+
+namespace clamp
+{
+ int test()
+ {
+ int Error(0);
+
+ float A = glm::clamp(0.5f);
+ Error += glm::equal(A, 0.5f, 0.00001f) ? 0 : 1;
+
+ float B = glm::clamp(0.0f);
+ Error += glm::equal(B, 0.0f, 0.00001f) ? 0 : 1;
+
+ float C = glm::clamp(1.0f);
+ Error += glm::equal(C, 1.0f, 0.00001f) ? 0 : 1;
+
+ float D = glm::clamp(-0.5f);
+ Error += glm::equal(D, 0.0f, 0.00001f) ? 0 : 1;
+
+ float E = glm::clamp(1.5f);
+ Error += glm::equal(E, 1.0f, 0.00001f) ? 0 : 1;
+
+ glm::vec2 K = glm::clamp(glm::vec2(0.5f));
+ Error += glm::all(glm::equal(K, glm::vec2(0.5f), glm::vec2(0.00001f))) ? 0 : 1;
+
+ glm::vec3 L = glm::clamp(glm::vec3(0.5f));
+ Error += glm::all(glm::equal(L, glm::vec3(0.5f), glm::vec3(0.00001f))) ? 0 : 1;
+
+ glm::vec4 M = glm::clamp(glm::vec4(0.5f));
+ Error += glm::all(glm::equal(M, glm::vec4(0.5f), glm::vec4(0.00001f))) ? 0 : 1;
+
+ glm::vec1 N = glm::clamp(glm::vec1(0.5f));
+ Error += glm::all(glm::equal(N, glm::vec1(0.5f), glm::vec1(0.00001f))) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace clamp
+
+namespace repeat
+{
+ int test()
+ {
+ int Error(0);
+
+ float A = glm::repeat(0.5f);
+ Error += glm::equal(A, 0.5f, 0.00001f) ? 0 : 1;
+
+ float B = glm::repeat(0.0f);
+ Error += glm::equal(B, 0.0f, 0.00001f) ? 0 : 1;
+
+ float C = glm::repeat(1.0f);
+ Error += glm::equal(C, 0.0f, 0.00001f) ? 0 : 1;
+
+ float D = glm::repeat(-0.5f);
+ Error += glm::equal(D, 0.5f, 0.00001f) ? 0 : 1;
+
+ float E = glm::repeat(1.5f);
+ Error += glm::equal(E, 0.5f, 0.00001f) ? 0 : 1;
+
+ float F = glm::repeat(0.9f);
+ Error += glm::equal(F, 0.9f, 0.00001f) ? 0 : 1;
+
+ glm::vec2 K = glm::repeat(glm::vec2(0.5f));
+ Error += glm::all(glm::equal(K, glm::vec2(0.5f), glm::vec2(0.00001f))) ? 0 : 1;
+
+ glm::vec3 L = glm::repeat(glm::vec3(0.5f));
+ Error += glm::all(glm::equal(L, glm::vec3(0.5f), glm::vec3(0.00001f))) ? 0 : 1;
+
+ glm::vec4 M = glm::repeat(glm::vec4(0.5f));
+ Error += glm::all(glm::equal(M, glm::vec4(0.5f), glm::vec4(0.00001f))) ? 0 : 1;
+
+ glm::vec1 N = glm::repeat(glm::vec1(0.5f));
+ Error += glm::all(glm::equal(N, glm::vec1(0.5f), glm::vec1(0.00001f))) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace repeat
+
+namespace mirrorClamp
+{
+ int test()
+ {
+ int Error(0);
+
+ float A = glm::mirrorClamp(0.5f);
+ Error += glm::equal(A, 0.5f, 0.00001f) ? 0 : 1;
+
+ float B = glm::mirrorClamp(0.0f);
+ Error += glm::equal(B, 0.0f, 0.00001f) ? 0 : 1;
+
+ float C = glm::mirrorClamp(1.1f);
+ Error += glm::equal(C, 0.1f, 0.00001f) ? 0 : 1;
+
+ float D = glm::mirrorClamp(-0.5f);
+ Error += glm::equal(D, 0.5f, 0.00001f) ? 0 : 1;
+
+ float E = glm::mirrorClamp(1.5f);
+ Error += glm::equal(E, 0.5f, 0.00001f) ? 0 : 1;
+
+ float F = glm::mirrorClamp(0.9f);
+ Error += glm::equal(F, 0.9f, 0.00001f) ? 0 : 1;
+
+ float G = glm::mirrorClamp(3.1f);
+ Error += glm::equal(G, 0.1f, 0.00001f) ? 0 : 1;
+
+ float H = glm::mirrorClamp(-3.1f);
+ Error += glm::equal(H, 0.1f, 0.00001f) ? 0 : 1;
+
+ float I = glm::mirrorClamp(-0.9f);
+ Error += glm::equal(I, 0.9f, 0.00001f) ? 0 : 1;
+
+ glm::vec2 K = glm::mirrorClamp(glm::vec2(0.5f));
+ Error += glm::all(glm::equal(K, glm::vec2(0.5f), glm::vec2(0.00001f))) ? 0 : 1;
+
+ glm::vec3 L = glm::mirrorClamp(glm::vec3(0.5f));
+ Error += glm::all(glm::equal(L, glm::vec3(0.5f), glm::vec3(0.00001f))) ? 0 : 1;
+
+ glm::vec4 M = glm::mirrorClamp(glm::vec4(0.5f));
+ Error += glm::all(glm::equal(M, glm::vec4(0.5f), glm::vec4(0.00001f))) ? 0 : 1;
+
+ glm::vec1 N = glm::mirrorClamp(glm::vec1(0.5f));
+ Error += glm::all(glm::equal(N, glm::vec1(0.5f), glm::vec1(0.00001f))) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace mirrorClamp
+
+namespace mirrorRepeat
+{
+ int test()
+ {
+ int Error(0);
+
+ float A = glm::mirrorRepeat(0.5f);
+ Error += glm::equal(A, 0.5f, 0.00001f) ? 0 : 1;
+
+ float B = glm::mirrorRepeat(0.0f);
+ Error += glm::equal(B, 0.0f, 0.00001f) ? 0 : 1;
+
+ float C = glm::mirrorRepeat(1.0f);
+ Error += glm::equal(C, 1.0f, 0.00001f) ? 0 : 1;
+
+ float D = glm::mirrorRepeat(-0.5f);
+ Error += glm::equal(D, 0.5f, 0.00001f) ? 0 : 1;
+
+ float E = glm::mirrorRepeat(1.5f);
+ Error += glm::equal(E, 0.5f, 0.00001f) ? 0 : 1;
+
+ float F = glm::mirrorRepeat(0.9f);
+ Error += glm::equal(F, 0.9f, 0.00001f) ? 0 : 1;
+
+ float G = glm::mirrorRepeat(3.0f);
+ Error += glm::equal(G, 1.0f, 0.00001f) ? 0 : 1;
+
+ float H = glm::mirrorRepeat(-3.0f);
+ Error += glm::equal(H, 1.0f, 0.00001f) ? 0 : 1;
+
+ float I = glm::mirrorRepeat(-1.0f);
+ Error += glm::equal(I, 1.0f, 0.00001f) ? 0 : 1;
+
+ glm::vec2 K = glm::mirrorRepeat(glm::vec2(0.5f));
+ Error += glm::all(glm::equal(K, glm::vec2(0.5f), glm::vec2(0.00001f))) ? 0 : 1;
+
+ glm::vec3 L = glm::mirrorRepeat(glm::vec3(0.5f));
+ Error += glm::all(glm::equal(L, glm::vec3(0.5f), glm::vec3(0.00001f))) ? 0 : 1;
+
+ glm::vec4 M = glm::mirrorRepeat(glm::vec4(0.5f));
+ Error += glm::all(glm::equal(M, glm::vec4(0.5f), glm::vec4(0.00001f))) ? 0 : 1;
+
+ glm::vec1 N = glm::mirrorRepeat(glm::vec1(0.5f));
+ Error += glm::all(glm::equal(N, glm::vec1(0.5f), glm::vec1(0.00001f))) ? 0 : 1;
+
+ return Error;
+ }
+}//namespace mirrorRepeat
+
+int main()
+{
+ int Error(0);
+
+ Error += clamp::test();
+ Error += repeat::test();
+ Error += mirrorClamp::test();
+ Error += mirrorRepeat::test();
+
+ return Error;
+}
diff --git a/3rdparty/glm/source/test/perf/CMakeLists.txt b/3rdparty/glm/source/test/perf/CMakeLists.txt
new file mode 100644
index 0000000..19c7050
--- /dev/null
+++ b/3rdparty/glm/source/test/perf/CMakeLists.txt
@@ -0,0 +1,6 @@
+glmCreateTestGTC(perf_matrix_div)
+glmCreateTestGTC(perf_matrix_inverse)
+glmCreateTestGTC(perf_matrix_mul)
+glmCreateTestGTC(perf_matrix_mul_vector)
+glmCreateTestGTC(perf_matrix_transpose)
+glmCreateTestGTC(perf_vector_mul_matrix)
diff --git a/3rdparty/glm/source/test/perf/perf_matrix_div.cpp b/3rdparty/glm/source/test/perf/perf_matrix_div.cpp
new file mode 100644
index 0000000..630188d
--- /dev/null
+++ b/3rdparty/glm/source/test/perf/perf_matrix_div.cpp
@@ -0,0 +1,153 @@
+#define GLM_FORCE_INLINE
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/matrix_double4x4.hpp>
+#include <glm/ext/matrix_transform.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+#include <glm/gtc/type_aligned.hpp>
+#include <vector>
+#include <chrono>
+#include <cstdio>
+
+template <typename matType>
+static void test_mat_div_mat(matType const& M, std::vector<matType> const& I, std::vector<matType>& O)
+{
+ for (std::size_t i = 0, n = I.size(); i < n; ++i)
+ O[i] = M / I[i];
+}
+
+template <typename matType>
+static int launch_mat_div_mat(std::vector<matType>& O, matType const& Transform, matType const& Scale, std::size_t Samples)
+{
+ typedef typename matType::value_type T;
+
+ std::vector<matType> I(Samples);
+ O.resize(Samples);
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ I[i] = Scale * static_cast<T>(i) + Scale;
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+ test_mat_div_mat<matType>(Transform, I, O);
+ std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
+
+ return static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count());
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat2_div_mat2(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4);
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_div_mat<packedMatType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_div_mat<alignedMatType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat3_div_mat3(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05, 0.01, 0.02, 0.03, 0.05, 0.01);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_div_mat<packedMatType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_div_mat<alignedMatType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat4_div_mat4(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+ packedMatType const Scale(0.01, 0.02, 0.05, 0.04, 0.02, 0.08, 0.05, 0.01, 0.08, 0.03, 0.05, 0.06, 0.02, 0.03, 0.07, 0.05);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_div_mat<packedMatType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_div_mat<alignedMatType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int main()
+{
+ std::size_t const Samples = 100000;
+
+ int Error = 0;
+
+ std::printf("mat2 / mat2:\n");
+ Error += comp_mat2_div_mat2<glm::mat2, glm::aligned_mat2>(Samples);
+
+ std::printf("dmat2 / dmat2:\n");
+ Error += comp_mat2_div_mat2<glm::dmat2, glm::aligned_dmat2>(Samples);
+
+ std::printf("mat3 / mat3:\n");
+ Error += comp_mat3_div_mat3<glm::mat3, glm::aligned_mat3>(Samples);
+
+ std::printf("dmat3 / dmat3:\n");
+ Error += comp_mat3_div_mat3<glm::dmat3, glm::aligned_dmat3>(Samples);
+
+ std::printf("mat4 / mat4:\n");
+ Error += comp_mat4_div_mat4<glm::mat4, glm::aligned_mat4>(Samples);
+
+ std::printf("dmat4 / dmat4:\n");
+ Error += comp_mat4_div_mat4<glm::dmat4, glm::aligned_dmat4>(Samples);
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/test/perf/perf_matrix_inverse.cpp b/3rdparty/glm/source/test/perf/perf_matrix_inverse.cpp
new file mode 100644
index 0000000..1a989ae
--- /dev/null
+++ b/3rdparty/glm/source/test/perf/perf_matrix_inverse.cpp
@@ -0,0 +1,150 @@
+#define GLM_FORCE_INLINE
+#include <glm/matrix.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/matrix_double4x4.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+#include <glm/gtc/type_aligned.hpp>
+#include <vector>
+#include <chrono>
+#include <cstdio>
+
+template <typename matType>
+static void test_mat_inverse(std::vector<matType> const& I, std::vector<matType>& O)
+{
+ for (std::size_t i = 0, n = I.size(); i < n; ++i)
+ O[i] = glm::inverse(I[i]);
+}
+
+template <typename matType>
+static int launch_mat_inverse(std::vector<matType>& O, matType const& Scale, std::size_t Samples)
+{
+ typedef typename matType::value_type T;
+
+ std::vector<matType> I(Samples);
+ O.resize(Samples);
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ I[i] = Scale * static_cast<T>(i) + Scale;
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+ test_mat_inverse<matType>(I, O);
+ std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
+
+ return static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count());
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat2_inverse(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_inverse<packedMatType>(SISD, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_inverse<alignedMatType>(SIMD, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat3_inverse(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05, 0.01, 0.02, 0.03, 0.05, 0.01);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_inverse<packedMatType>(SISD, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_inverse<alignedMatType>(SIMD, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat4_inverse(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Scale(0.01, 0.02, 0.05, 0.04, 0.02, 0.08, 0.05, 0.01, 0.08, 0.03, 0.05, 0.06, 0.02, 0.03, 0.07, 0.05);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_inverse<packedMatType>(SISD, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_inverse<alignedMatType>(SIMD, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int main()
+{
+ std::size_t const Samples = 100000;
+
+ int Error = 0;
+
+ std::printf("glm::inverse(mat2):\n");
+ Error += comp_mat2_inverse<glm::mat2, glm::aligned_mat2>(Samples);
+
+ std::printf("glm::inverse(dmat2):\n");
+ Error += comp_mat2_inverse<glm::dmat2, glm::aligned_dmat2>(Samples);
+
+ std::printf("glm::inverse(mat3):\n");
+ Error += comp_mat3_inverse<glm::mat3, glm::aligned_mat3>(Samples);
+
+ std::printf("glm::inverse(dmat3):\n");
+ Error += comp_mat3_inverse<glm::dmat3, glm::aligned_dmat3>(Samples);
+
+ std::printf("glm::inverse(mat4):\n");
+ Error += comp_mat4_inverse<glm::mat4, glm::aligned_mat4>(Samples);
+
+ std::printf("glm::inverse(dmat4):\n");
+ Error += comp_mat4_inverse<glm::dmat4, glm::aligned_dmat4>(Samples);
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/test/perf/perf_matrix_mul.cpp b/3rdparty/glm/source/test/perf/perf_matrix_mul.cpp
new file mode 100644
index 0000000..d6b1f10
--- /dev/null
+++ b/3rdparty/glm/source/test/perf/perf_matrix_mul.cpp
@@ -0,0 +1,154 @@
+#define GLM_FORCE_INLINE
+#include <glm/ext/matrix_float2x2.hpp>
+#include <glm/ext/matrix_double2x2.hpp>
+#include <glm/ext/matrix_float3x3.hpp>
+#include <glm/ext/matrix_double3x3.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/matrix_double4x4.hpp>
+#include <glm/ext/matrix_transform.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+#include <glm/gtc/type_aligned.hpp>
+#include <vector>
+#include <chrono>
+#include <cstdio>
+
+template <typename matType>
+static void test_mat_mul_mat(matType const& M, std::vector<matType> const& I, std::vector<matType>& O)
+{
+ for (std::size_t i = 0, n = I.size(); i < n; ++i)
+ O[i] = M * I[i];
+}
+
+template <typename matType>
+static int launch_mat_mul_mat(std::vector<matType>& O, matType const& Transform, matType const& Scale, std::size_t Samples)
+{
+ typedef typename matType::value_type T;
+
+ std::vector<matType> I(Samples);
+ O.resize(Samples);
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ I[i] = Scale * static_cast<T>(i);
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+ test_mat_mul_mat<matType>(Transform, I, O);
+ std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
+
+ return static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count());
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat2_mul_mat2(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4);
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_mul_mat<packedMatType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_mul_mat<alignedMatType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat3_mul_mat3(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05, 0.01, 0.02, 0.03, 0.05, 0.01);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_mul_mat<packedMatType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_mul_mat<alignedMatType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat4_mul_mat4(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05, 0.01, 0.02, 0.03, 0.05, 0.01, 0.02, 0.03, 0.05, 0.01, 0.02, 0.03, 0.05);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_mul_mat<packedMatType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_mul_mat<alignedMatType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ std::size_t const Samples = 100000;
+
+ int Error = 0;
+
+ std::printf("mat2 * mat2:\n");
+ Error += comp_mat2_mul_mat2<glm::mat2, glm::aligned_mat2>(Samples);
+
+ std::printf("dmat2 * dmat2:\n");
+ Error += comp_mat2_mul_mat2<glm::dmat2, glm::aligned_dmat2>(Samples);
+
+ std::printf("mat3 * mat3:\n");
+ Error += comp_mat3_mul_mat3<glm::mat3, glm::aligned_mat3>(Samples);
+
+ std::printf("dmat3 * dmat3:\n");
+ Error += comp_mat3_mul_mat3<glm::dmat3, glm::aligned_dmat3>(Samples);
+
+ std::printf("mat4 * mat4:\n");
+ Error += comp_mat4_mul_mat4<glm::mat4, glm::aligned_mat4>(Samples);
+
+ std::printf("dmat4 * dmat4:\n");
+ Error += comp_mat4_mul_mat4<glm::dmat4, glm::aligned_dmat4>(Samples);
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/test/perf/perf_matrix_mul_vector.cpp b/3rdparty/glm/source/test/perf/perf_matrix_mul_vector.cpp
new file mode 100644
index 0000000..8e555f8
--- /dev/null
+++ b/3rdparty/glm/source/test/perf/perf_matrix_mul_vector.cpp
@@ -0,0 +1,154 @@
+#define GLM_FORCE_INLINE
+#include <glm/ext/matrix_float2x2.hpp>
+#include <glm/ext/matrix_double2x2.hpp>
+#include <glm/ext/matrix_float3x3.hpp>
+#include <glm/ext/matrix_double3x3.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/matrix_double4x4.hpp>
+#include <glm/ext/matrix_transform.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+#include <glm/gtc/type_aligned.hpp>
+#include <vector>
+#include <chrono>
+#include <cstdio>
+
+template <typename matType, typename vecType>
+static void test_mat_mul_vec(matType const& M, std::vector<vecType> const& I, std::vector<vecType>& O)
+{
+ for (std::size_t i = 0, n = I.size(); i < n; ++i)
+ O[i] = M * I[i];
+}
+
+template <typename matType, typename vecType>
+static int launch_mat_mul_vec(std::vector<vecType>& O, matType const& Transform, vecType const& Scale, std::size_t Samples)
+{
+ typedef typename matType::value_type T;
+
+ std::vector<vecType> I(Samples);
+ O.resize(Samples);
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ I[i] = Scale * static_cast<T>(i);
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+ test_mat_mul_vec<matType, vecType>(Transform, I, O);
+ std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
+
+ return static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count());
+}
+
+template <typename packedMatType, typename packedVecType, typename alignedMatType, typename alignedVecType>
+static int comp_mat2_mul_vec2(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4);
+ packedVecType const Scale(0.01, 0.02);
+
+ std::vector<packedVecType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_mul_vec<packedMatType, packedVecType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedVecType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_mul_vec<alignedMatType, alignedVecType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedVecType const A = SISD[i];
+ packedVecType const B = packedVecType(SIMD[i]);
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename packedVecType, typename alignedMatType, typename alignedVecType>
+static int comp_mat3_mul_vec3(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ packedVecType const Scale(0.01, 0.02, 0.05);
+
+ std::vector<packedVecType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_mul_vec<packedMatType, packedVecType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedVecType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_mul_vec<alignedMatType, alignedVecType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedVecType const A = SISD[i];
+ packedVecType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename packedVecType, typename alignedMatType, typename alignedVecType>
+static int comp_mat4_mul_vec4(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+ packedVecType const Scale(0.01, 0.02, 0.03, 0.05);
+
+ std::vector<packedVecType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_mul_vec<packedMatType, packedVecType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedVecType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_mul_vec<alignedMatType, alignedVecType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedVecType const A = SISD[i];
+ packedVecType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ std::size_t const Samples = 100000;
+
+ int Error = 0;
+
+ std::printf("mat2 * vec2:\n");
+ Error += comp_mat2_mul_vec2<glm::mat2, glm::vec2, glm::aligned_mat2, glm::aligned_vec2>(Samples);
+
+ std::printf("dmat2 * dvec2:\n");
+ Error += comp_mat2_mul_vec2<glm::dmat2, glm::dvec2,glm::aligned_dmat2, glm::aligned_dvec2>(Samples);
+
+ std::printf("mat3 * vec3:\n");
+ Error += comp_mat3_mul_vec3<glm::mat3, glm::vec3, glm::aligned_mat3, glm::aligned_vec3>(Samples);
+
+ std::printf("dmat3 * dvec3:\n");
+ Error += comp_mat3_mul_vec3<glm::dmat3, glm::dvec3, glm::aligned_dmat3, glm::aligned_dvec3>(Samples);
+
+ std::printf("mat4 * vec4:\n");
+ Error += comp_mat4_mul_vec4<glm::mat4, glm::vec4, glm::aligned_mat4, glm::aligned_vec4>(Samples);
+
+ std::printf("dmat4 * dvec4:\n");
+ Error += comp_mat4_mul_vec4<glm::dmat4, glm::dvec4, glm::aligned_dmat4, glm::aligned_dvec4>(Samples);
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/test/perf/perf_matrix_transpose.cpp b/3rdparty/glm/source/test/perf/perf_matrix_transpose.cpp
new file mode 100644
index 0000000..2fdc782
--- /dev/null
+++ b/3rdparty/glm/source/test/perf/perf_matrix_transpose.cpp
@@ -0,0 +1,150 @@
+#define GLM_FORCE_INLINE
+#include <glm/matrix.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/matrix_double4x4.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+#include <glm/gtc/type_aligned.hpp>
+#include <vector>
+#include <chrono>
+#include <cstdio>
+
+template <typename matType>
+static void test_mat_transpose(std::vector<matType> const& I, std::vector<matType>& O)
+{
+ for (std::size_t i = 0, n = I.size(); i < n; ++i)
+ O[i] = glm::transpose(I[i]);
+}
+
+template <typename matType>
+static int launch_mat_transpose(std::vector<matType>& O, matType const& Scale, std::size_t Samples)
+{
+ typedef typename matType::value_type T;
+
+ std::vector<matType> I(Samples);
+ O.resize(Samples);
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ I[i] = Scale * static_cast<T>(i) + Scale;
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+ test_mat_transpose<matType>(I, O);
+ std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
+
+ return static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count());
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat2_transpose(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_transpose<packedMatType>(SISD, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_transpose<alignedMatType>(SIMD, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat3_transpose(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Scale(0.01, 0.02, 0.03, 0.05, 0.01, 0.02, 0.03, 0.05, 0.01);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_transpose<packedMatType>(SISD, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_transpose<alignedMatType>(SIMD, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename alignedMatType>
+static int comp_mat4_transpose(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Scale(0.01, 0.02, 0.05, 0.04, 0.02, 0.08, 0.05, 0.01, 0.08, 0.03, 0.05, 0.06, 0.02, 0.03, 0.07, 0.05);
+
+ std::vector<packedMatType> SISD;
+ std::printf("- SISD: %d us\n", launch_mat_transpose<packedMatType>(SISD, Scale, Samples));
+
+ std::vector<alignedMatType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_mat_transpose<alignedMatType>(SIMD, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedMatType const A = SISD[i];
+ packedMatType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ assert(!Error);
+ }
+
+ return Error;
+}
+
+int main()
+{
+ std::size_t const Samples = 100000;
+
+ int Error = 0;
+
+ std::printf("glm::transpose(mat2):\n");
+ Error += comp_mat2_transpose<glm::mat2, glm::aligned_mat2>(Samples);
+
+ std::printf("glm::transpose(dmat2):\n");
+ Error += comp_mat2_transpose<glm::dmat2, glm::aligned_dmat2>(Samples);
+
+ std::printf("glm::transpose(mat3):\n");
+ Error += comp_mat3_transpose<glm::mat3, glm::aligned_mat3>(Samples);
+
+ std::printf("glm::transpose(dmat3):\n");
+ Error += comp_mat3_transpose<glm::dmat3, glm::aligned_dmat3>(Samples);
+
+ std::printf("glm::transpose(mat4):\n");
+ Error += comp_mat4_transpose<glm::mat4, glm::aligned_mat4>(Samples);
+
+ std::printf("glm::transpose(dmat4):\n");
+ Error += comp_mat4_transpose<glm::dmat4, glm::aligned_dmat4>(Samples);
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/test/perf/perf_vector_mul_matrix.cpp b/3rdparty/glm/source/test/perf/perf_vector_mul_matrix.cpp
new file mode 100644
index 0000000..20991df
--- /dev/null
+++ b/3rdparty/glm/source/test/perf/perf_vector_mul_matrix.cpp
@@ -0,0 +1,154 @@
+#define GLM_FORCE_INLINE
+#include <glm/ext/matrix_float2x2.hpp>
+#include <glm/ext/matrix_double2x2.hpp>
+#include <glm/ext/matrix_float3x3.hpp>
+#include <glm/ext/matrix_double3x3.hpp>
+#include <glm/ext/matrix_float4x4.hpp>
+#include <glm/ext/matrix_double4x4.hpp>
+#include <glm/ext/matrix_transform.hpp>
+#include <glm/ext/matrix_relational.hpp>
+#include <glm/ext/vector_float4.hpp>
+#if GLM_CONFIG_SIMD == GLM_ENABLE
+#include <glm/gtc/type_aligned.hpp>
+#include <vector>
+#include <chrono>
+#include <cstdio>
+
+template <typename matType, typename vecType>
+static void test_vec_mul_mat(matType const& M, std::vector<vecType> const& I, std::vector<vecType>& O)
+{
+ for (std::size_t i = 0, n = I.size(); i < n; ++i)
+ O[i] = I[i] * M;
+}
+
+template <typename matType, typename vecType>
+static int launch_vec_mul_mat(std::vector<vecType>& O, matType const& Transform, vecType const& Scale, std::size_t Samples)
+{
+ typedef typename matType::value_type T;
+
+ std::vector<vecType> I(Samples);
+ O.resize(Samples);
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ I[i] = Scale * static_cast<T>(i);
+
+ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
+ test_vec_mul_mat<matType, vecType>(Transform, I, O);
+ std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
+
+ return static_cast<int>(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count());
+}
+
+template <typename packedMatType, typename packedVecType, typename alignedMatType, typename alignedVecType>
+static int comp_vec2_mul_mat2(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4);
+ packedVecType const Scale(0.01, 0.02);
+
+ std::vector<packedVecType> SISD;
+ std::printf("- SISD: %d us\n", launch_vec_mul_mat<packedMatType, packedVecType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedVecType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_vec_mul_mat<alignedMatType, alignedVecType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedVecType const A = SISD[i];
+ packedVecType const B = packedVecType(SIMD[i]);
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename packedVecType, typename alignedMatType, typename alignedVecType>
+static int comp_vec3_mul_mat3(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ packedVecType const Scale(0.01, 0.02, 0.05);
+
+ std::vector<packedVecType> SISD;
+ std::printf("- SISD: %d us\n", launch_vec_mul_mat<packedMatType, packedVecType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedVecType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_vec_mul_mat<alignedMatType, alignedVecType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedVecType const A = SISD[i];
+ packedVecType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+template <typename packedMatType, typename packedVecType, typename alignedMatType, typename alignedVecType>
+static int comp_vec4_mul_mat4(std::size_t Samples)
+{
+ typedef typename packedMatType::value_type T;
+
+ int Error = 0;
+
+ packedMatType const Transform(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+ packedVecType const Scale(0.01, 0.02, 0.03, 0.05);
+
+ std::vector<packedVecType> SISD;
+ std::printf("- SISD: %d us\n", launch_vec_mul_mat<packedMatType, packedVecType>(SISD, Transform, Scale, Samples));
+
+ std::vector<alignedVecType> SIMD;
+ std::printf("- SIMD: %d us\n", launch_vec_mul_mat<alignedMatType, alignedVecType>(SIMD, Transform, Scale, Samples));
+
+ for(std::size_t i = 0; i < Samples; ++i)
+ {
+ packedVecType const A = SISD[i];
+ packedVecType const B = SIMD[i];
+ Error += glm::all(glm::equal(A, B, static_cast<T>(0.001))) ? 0 : 1;
+ }
+
+ return Error;
+}
+
+int main()
+{
+ std::size_t const Samples = 100000;
+
+ int Error = 0;
+
+ std::printf("vec2 * mat2:\n");
+ Error += comp_vec2_mul_mat2<glm::mat2, glm::vec2, glm::aligned_mat2, glm::aligned_vec2>(Samples);
+
+ std::printf("dvec2 * dmat2:\n");
+ Error += comp_vec2_mul_mat2<glm::dmat2, glm::dvec2,glm::aligned_dmat2, glm::aligned_dvec2>(Samples);
+
+ std::printf("vec3 * mat3:\n");
+ Error += comp_vec3_mul_mat3<glm::mat3, glm::vec3, glm::aligned_mat3, glm::aligned_vec3>(Samples);
+
+ std::printf("dvec3 * dmat3:\n");
+ Error += comp_vec3_mul_mat3<glm::dmat3, glm::dvec3, glm::aligned_dmat3, glm::aligned_dvec3>(Samples);
+
+ std::printf("vec4 * mat4:\n");
+ Error += comp_vec4_mul_mat4<glm::mat4, glm::vec4, glm::aligned_mat4, glm::aligned_vec4>(Samples);
+
+ std::printf("dvec4 * dmat4:\n");
+ Error += comp_vec4_mul_mat4<glm::dmat4, glm::dvec4, glm::aligned_dmat4, glm::aligned_dvec4>(Samples);
+
+ return Error;
+}
+
+#else
+
+int main()
+{
+ return 0;
+}
+
+#endif
diff --git a/3rdparty/glm/source/util/autoexp.txt b/3rdparty/glm/source/util/autoexp.txt
new file mode 100644
index 0000000..5a07bed
--- /dev/null
+++ b/3rdparty/glm/source/util/autoexp.txt
@@ -0,0 +1,28 @@
+[Visualizer]
+
+glm::detail::tvec2<*>{
+ preview (
+ #(#($c.x,$c.y))
+ )
+ children (
+ #([x]: $c.x,[y]: $c.y)
+ )
+}
+
+glm::detail::tvec3<*>{
+ preview (
+ #($e.x,$e.y,$e.z)
+ )
+ children (
+ #([x]: $e.x,[y]: $e.y,[z]: $e.z)
+ )
+}
+
+glm::detail::tvec4<*>{
+ preview (
+ #($c.x,$c.y,$c.z,$c.w)
+ )
+ children (
+ #([x]: $e.x,[y]: $e.y,[z]: $e.z, #([w]: $e.w))
+ )
+}
diff --git a/3rdparty/glm/source/util/autoexp.vc2010.dat b/3rdparty/glm/source/util/autoexp.vc2010.dat
new file mode 100644
index 0000000..e28d31d
--- /dev/null
+++ b/3rdparty/glm/source/util/autoexp.vc2010.dat
@@ -0,0 +1,3896 @@
+; AutoExp.Dat - templates for automatically expanding data
+; Copyright(c) Microsoft Corporation. All Rights Reserved.
+;---------------------------------------------------------------
+;
+; While debugging, Data Tips and items in the Watch and Variable
+; windows are automatically expanded to show their most important
+; elements. The expansion follows the format given by the rules
+; in this file. You can add rules for your types or change the
+; predefined rules.
+;
+; For good examples, read the rules in this file.
+;
+; To find what the debugger considers the type of a variable to
+; be, add it to the Watch window and look at the Type column.
+;
+; An AutoExpand rule is a line with the name of a type, an equals
+; sign, and text with replaceable parts in angle brackets. The
+; part in angle brackets names a member of the type and an
+; optional Watch format specifier.
+;
+; AutoExpand rules use the following syntax. The equals sign (=),
+; angle brackets (<>), and comma are taken literally. Square
+; brackets ([]) indicate optional items.
+;
+; type=[text]<member[,format]>...
+;
+; type Name of the type (may be followed by <*> for template
+; types such as the ATL types listed below).
+;
+; text Any text.Usually the name of the member to display,
+; or a shorthand name for the member.
+;
+; member Name of a member to display.
+;
+; format Watch format specifier. One of the following:
+;
+; Letter Description Sample Display
+; ------ -------------------------- ------------ -------------
+; d,i Signed decimal integer 0xF000F065,d -268373915
+; u Unsigned decimal integer 0x0065,u 101
+; o Unsigned octal integer 0xF065,o 0170145
+; x,X Hexadecimal integer 61541,X 0X0000F065
+; l,h long or short prefix for 00406042,hx 0x0c22
+; d, i, u, o, x, X
+; f Signed floating-point 3./2.,f 1.500000
+; e Signed scientific-notation 3./2.,e 1.500000e+000
+; g Shorter of e and f 3./2.,g 1.5
+; c Single character 0x0065,c 'e'
+; s Zero-terminated string pVar,s "Hello world"
+; su Unicode string pVar,su "Hello world"
+;
+; For details of other format specifiers see Help under:
+; "format specifiers/watch variable"
+;
+; The special format <,t> specifies the name of the most-derived
+; type of the object. This is especially useful with pointers or
+; references to a base class.
+;
+; If there is no rule for a class, the base classes are checked for
+; a matching rule.
+;
+; There are some special entries allowed in the AutoExpand section:
+; $BUILTIN is used to display more complex types that need to do more
+; than just show a member variable or two.
+; $ADDIN allows external DLLs to be added to display even more complex
+; types via the EE Add-in API. The first argument is the DLL name, the
+; second argument is the name of the export from the DLL to use. For
+; further information on this API see the sample called EEAddIn.
+;
+; WARNING: if hexadecimal mode is on in the watch window, all numbers here are
+; evaluated in hex, e.g. 42 becomes 0x42
+
+[AutoExpand]
+
+; from windef.h
+tagPOINT =x=<x> y=<y>
+tagRECT =top=<top> bottom=<bottom> left=<left> right=<right>
+
+; from winuser.h
+tagMSG =msg=<message,x> wp=<wParam,x> lp=<lParam,x>
+
+; intrinsics
+__m64 =<m64_i64,x>
+__m128=$BUILTIN(M128)
+__m128i=$BUILTIN(M128I)
+__m128d=$BUILTIN(M128D)
+
+; from afxwin.h
+CDC =hDC=<m_hDC> attrib=<m_hAttribDC>
+CPaintDC =<,t> hWnd=<m_hWnd>
+CPoint =x=<x> y=<y>
+CRect =top=<top> bottom=<bottom> left=<left> right=<right>
+CSize =cx=<cx> cy=<cy>
+CWnd =<,t> hWnd=<m_hWnd>
+CWinApp =<,t> <m_pszAppName,s>
+CWinThread =<,t> h=<m_hThread> proc=<m_pfnThreadProc>
+
+; from afxcoll.h
+CPtrList =cnt=<m_nCount>
+
+; from afxstat_.h
+CProcessLocalObject =<,t>
+CThreadLocalObject =<,t>
+
+; from afx.h
+CArchiveException =cause=<m_cause>
+CFile =hFile=<m_hFile> name=<m_strFileName.m_pchData,s>
+CFileException =cause=<m_cause> OS Error=m_lOsError
+CMemFile =pos=<m_nPosition> size=<m_nFileSize>
+CObject =<,t>
+CRuntimeClass =<m_lpszClassName,s>
+CStdioFile =FILE*=<m_pStream> name=<m_strFilename.m_pchData,s>
+CTimeSpan =time=<m_time>
+CTime =time=<m_time>
+
+; from afxcoll.h
+CByteArray =count=<m_nCount>
+CStringList =count=<m_nCount>
+; same for all CXXXArray classes
+; same for CXXXList
+; same for CMapXXToXX
+
+; various string classes from MFC & ATL
+
+_com_error=<m_hresult,hr>
+_bstr_t=<m_Data->m_wstr,su> (<m_Data->m_RefCount,u>)
+_com_ptr_t<*>=<m_pInterface>
+_LARGE_INTEGER=<QuadPart>
+_ULARGE_INTEGER=<QuadPart>
+ATL::CComPtr<*>=<p>
+
+ATL::CComQIPtr<*>=<p>
+
+tagVARIANT=$BUILTIN(VARIANT)
+VARIANT=$BUILTIN(VARIANT)
+_GUID=$BUILTIN(GUID)
+
+; see EEAddIn sample for how to use these
+;_SYSTEMTIME=$ADDIN(EEAddIn.dll,AddIn_SystemTime)
+;_FILETIME=$ADDIN(EEAddIn.dll,AddIn_FileTime)
+
+[Visualizer]
+; This section contains visualizers for STL and ATL containers
+; DO NOT MODIFY
+ATL::CStringT<char,*>|CSimpleStringT<char,*>|ATL::CSimpleStringT<char,*>{
+ preview ([$e.m_pszData,s])
+ stringview ([$e.m_pszData,sb])
+}
+ATL::CStringT<wchar_t,*>|CSimpleStringT<wchar_t,*>|ATL::CSimpleStringT<wchar_t,*>|ATL::CStringT<unsigned short,*>|CSimpleStringT<unsigned short,*>|ATL::CSimpleStringT<unsigned short,*>{
+ preview ([$e.m_pszData,su])
+ stringview ([$e.m_pszData,sub])
+}
+ATL::CComBSTR{
+ preview ([$e.m_str,su])
+ stringview ([$e.m_str,sub])
+}
+
+
+; Many visualizers use nested #()s.
+; Why not use #(foo, bar) instead of #(#(foo), #(bar))?
+; The former alphabetically sorts its fields, while the latter does not.
+
+;------------------------------------------------------------------------------
+; std::pair from <utility>
+;------------------------------------------------------------------------------
+std::pair<*>{
+ ; pair is previewed with "(<first>, <second>)".
+ preview (
+ #(
+ "(",
+ $e.first,
+ ", ",
+ $e.second,
+ ")"
+ )
+ )
+
+ ; We gloss over the fact that first and second are actually stored in _Pair_base.
+ children (
+ #(
+ #(first : $e.first),
+ #(second : $e.second)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::plus, etc. from <functional>
+;------------------------------------------------------------------------------
+; STL functors are previewed with their names.
+; They have no state, so they have no children.
+std::plus<*>{
+ preview ( "plus" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::minus<*>{
+ preview ( "minus" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::multiplies<*>{
+ preview ( "multiplies" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::divides<*>{
+ preview ( "divides" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::modulus<*>{
+ preview ( "modulus" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::negate<*>{
+ preview ( "negate" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::equal_to<*>{
+ preview ( "equal_to" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::not_equal_to<*>{
+ preview ( "not_equal_to" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::greater<*>{
+ preview ( "greater" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::less<*>{
+ preview ( "less" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::greater_equal<*>{
+ preview ( "greater_equal" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::less_equal<*>{
+ preview ( "less_equal" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::logical_and<*>{
+ preview ( "logical_and" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::logical_or<*>{
+ preview ( "logical_or" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::logical_not<*>{
+ preview ( "logical_not" )
+ children ( #array(expr: 0, size: 0) )
+}
+
+;------------------------------------------------------------------------------
+; std::not1() from <functional>
+; std::not2() from <functional>
+;------------------------------------------------------------------------------
+; STL negators are previewed with "not[12](<stored functor>)".
+; They have a child with the fake name of [pred], so that the
+; stored functor can be inspected.
+std::unary_negate<*>{
+ preview (
+ #(
+ "not1(",
+ $e._Functor,
+ ")"
+ )
+ )
+
+ children (
+ #([pred] : $e._Functor)
+ )
+}
+std::binary_negate<*>{
+ preview (
+ #(
+ "not2(",
+ $e._Functor,
+ ")"
+ )
+ )
+
+ children (
+ #([pred] : $e._Functor)
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::bind1st() from <functional>
+; std::bind2nd() from <functional>
+;------------------------------------------------------------------------------
+; STL binders are previewed with "bind1st(<op>, <value>)" or "bind2nd(<op>, <value>)".
+; We gloss over the fact that they derive from unary_function.
+std::binder1st<*>{
+ preview (
+ #(
+ "bind1st(",
+ $e.op,
+ ", ",
+ $e.value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #(op : $e.op),
+ #(value : $e.value)
+ )
+ )
+}
+std::binder2nd<*>{
+ preview (
+ #(
+ "bind2nd(",
+ $e.op,
+ ", ",
+ $e.value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #(op : $e.op),
+ #(value : $e.value)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::ptr_fun() from <functional>
+;------------------------------------------------------------------------------
+; STL function pointer adaptors are previewed with "ptr_fun(<stored function pointer>)".
+; Function pointers have no children, so the adaptors have no children.
+std::pointer_to_unary_function<*>|std::pointer_to_binary_function<*>{
+ preview (
+ #(
+ "ptr_fun(",
+ $e._Pfun,
+ ")"
+ )
+ )
+
+ children ( #array(expr: 0, size: 0) )
+}
+
+;------------------------------------------------------------------------------
+; std::mem_fun() from <functional>
+; std::mem_fun_ref() from <functional>
+;------------------------------------------------------------------------------
+; See ptr_fun().
+std::mem_fun_t<*>|std::mem_fun1_t<*>|std::const_mem_fun_t<*>|std::const_mem_fun1_t<*>{
+ preview (
+ #(
+ "mem_fun(",
+ $e._Pmemfun,
+ ")"
+ )
+ )
+
+ children ( #array(expr: 0, size: 0) )
+}
+std::mem_fun_ref_t<*>|std::mem_fun1_ref_t<*>|std::const_mem_fun_ref_t<*>|std::const_mem_fun1_ref_t<*>{
+ preview (
+ #(
+ "mem_fun_ref(",
+ $e._Pmemfun,
+ ")"
+ )
+ )
+
+ children ( #array(expr: 0, size: 0) )
+}
+
+;------------------------------------------------------------------------------
+; std::auto_ptr from <memory>
+;------------------------------------------------------------------------------
+std::auto_ptr<*>{
+ ; An empty auto_ptr is previewed with "empty".
+ ; Otherwise, it is previewed with "auto_ptr <object>".
+ preview (
+ #if ($e._Myptr == 0) (
+ "empty"
+ ) #else (
+ #(
+ "auto_ptr ",
+ *$e._Myptr
+ )
+ )
+ )
+
+ ; An empty auto_ptr has no children.
+ ; Otherwise, it has a single child, its stored pointer, with a fake name of [ptr].
+ children (
+ #if ($e._Myptr == 0) (
+ #array(expr: 0, size: 0)
+ ) #else (
+ #([ptr] : $e._Myptr)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::basic_string from <string>
+;------------------------------------------------------------------------------
+; basic_string is previewed with its stored string.
+; It has [size] and [capacity] children, followed by [0], [1], [2], etc. children
+; displaying its stored characters.
+; The ($e._Myres) < ($e._BUF_SIZE) test determines whether the Small String Optimization
+; is in effect.
+; NOTE: The parentheses in ($e._Myres) < ($e._BUF_SIZE) are necessary.
+std::basic_string<char,*>{
+ preview ( #if (($e._Myres) < ($e._BUF_SIZE)) ( [$e._Bx._Buf,s] ) #else ( [$e._Bx._Ptr,s] ))
+ stringview ( #if (($e._Myres) < ($e._BUF_SIZE)) ( [$e._Bx._Buf,sb] ) #else ( [$e._Bx._Ptr,sb] ))
+
+ children (
+ #(
+ #([size] : $e._Mysize),
+ #([capacity] : $e._Myres),
+ #if (($e._Myres) < ($e._BUF_SIZE)) (
+ #array(expr: $e._Bx._Buf[$i], size: $e._Mysize)
+ ) #else (
+ #array(expr: $e._Bx._Ptr[$i], size: $e._Mysize)
+ )
+ )
+ )
+}
+std::basic_string<unsigned short,*>|std::basic_string<wchar_t,*>{
+ preview ( #if (($e._Myres) < ($e._BUF_SIZE)) ( [$e._Bx._Buf,su] ) #else ( [$e._Bx._Ptr,su] ))
+ stringview ( #if (($e._Myres) < ($e._BUF_SIZE)) ( [$e._Bx._Buf,sub] ) #else ( [$e._Bx._Ptr,sub] ))
+
+ children (
+ #(
+ #([size] : $e._Mysize),
+ #([capacity] : $e._Myres),
+ #if (($e._Myres) < ($e._BUF_SIZE)) (
+ #array(expr: $e._Bx._Buf[$i], size: $e._Mysize)
+ ) #else (
+ #array(expr: $e._Bx._Ptr[$i], size: $e._Mysize)
+ )
+ )
+ )
+}
+std::_String_iterator<char,*>|std::_String_const_iterator<char,*>{
+ preview ( [$e._Ptr,s] )
+ stringview ( [$e._Ptr,sb] )
+ children ( #([ptr] : $e._Ptr) )
+}
+std::_String_iterator<unsigned short,*>|std::_String_const_iterator<unsigned short,*>|std::_String_iterator<wchar_t,*>|std::_String_const_iterator<wchar_t,*>{
+ preview ( [$e._Ptr,su] )
+ stringview ( [$e._Ptr,sub] )
+ children ( #([ptr] : $e._Ptr) )
+}
+
+;------------------------------------------------------------------------------
+; std::vector<bool> from <vector>
+;------------------------------------------------------------------------------
+; Despite its packed representation, vector<bool> is visualized like vector<T>.
+std::vector<bool,*>{
+ preview (
+ #(
+ "[",
+ $e._Mysize,
+ "](",
+ #array(
+ expr: (bool)(($e._Myvec._Myfirst[$i / _VBITS] >> ($i % _VBITS)) & 1),
+ size: $e._Mysize
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([size] : $e._Mysize),
+ #([capacity] : ($e._Myvec._Myend - $e._Myvec._Myfirst) * _VBITS),
+ #array(
+ expr: (bool)(($e._Myvec._Myfirst[$i / _VBITS] >> ($i % _VBITS)) & 1),
+ size: $e._Mysize
+ )
+ )
+ )
+}
+std::_Vb_reference<*>|std::_Vb_iterator<*>|std::_Vb_const_iterator<*>{
+ preview (
+ (bool)((*$e._Myptr >> $e._Myoff) & 1)
+ )
+
+ children (
+ #(
+ #([ptr] : $e._Myptr),
+ #([offset] : $e._Myoff)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::vector from <vector>
+;------------------------------------------------------------------------------
+; vector is previewed with "[<size>](<elements>)".
+; It has [size] and [capacity] children, followed by its elements.
+; The other containers follow its example.
+std::vector<*>{
+ preview (
+ #(
+ "[",
+ $e._Mylast - $e._Myfirst,
+ "](",
+ #array(
+ expr: $e._Myfirst[$i],
+ size: $e._Mylast - $e._Myfirst
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([size] : $e._Mylast - $e._Myfirst),
+ #([capacity] : $e._Myend - $e._Myfirst),
+ #array(
+ expr: $e._Myfirst[$i],
+ size: $e._Mylast - $e._Myfirst
+ )
+ )
+ )
+}
+std::_Vector_iterator<*>|std::_Vector_const_iterator<*>{
+ preview (
+ *$e._Ptr
+ )
+
+ children (
+ #([ptr] : $e._Ptr)
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::deque from <deque>
+;------------------------------------------------------------------------------
+std::deque<*>{
+ preview (
+ #(
+ "[",
+ $e._Mysize,
+ "](",
+ #array(
+ expr: $e._Map[(($i + $e._Myoff) / $e._EEN_DS) % $e._Mapsize][($i + $e._Myoff) % $e._EEN_DS],
+ size: $e._Mysize
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #array(
+ expr: $e._Map[(($i + $e._Myoff) / $e._EEN_DS) % $e._Mapsize][($i + $e._Myoff) % $e._EEN_DS],
+ size: $e._Mysize
+ )
+ )
+ )
+}
+std::_Deque_iterator<*,*>|std::_Deque_const_iterator<*,*>{
+ preview (
+ #if ($e._Myoff >= ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Myoff + ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Mysize) (
+ "end"
+ ) #else (
+ ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Map[($e._Myoff / ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_EEN_DS) % ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Mapsize][$e._Myoff % ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_EEN_DS]
+ )
+ )
+
+ children (
+ #if ($e._Myoff >= ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Myoff + ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Mysize) (
+ #array(expr: 0, size: 0)
+ ) #else (
+ #(
+ #([index] : $e._Myoff - ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Myoff),
+ #([ptr] : &((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Map[($e._Myoff / ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_EEN_DS) % ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_Mapsize][$e._Myoff % ((std::deque<$T1,$T2> *)$e._Myproxy->_Mycont)->_EEN_DS] )
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::list from <list>
+;------------------------------------------------------------------------------
+std::list<*>{
+ preview (
+ #(
+ "[",
+ $e._Mysize,
+ "](",
+ #list(
+ head: $e._Myhead->_Next,
+ size: $e._Mysize,
+ next: _Next
+ ) : $e._Myval,
+ ")"
+ )
+ )
+
+ children (
+ #list(
+ head: $e._Myhead->_Next,
+ size: $e._Mysize,
+ next: _Next
+ ) : $e._Myval
+ )
+}
+std::_List_iterator<*>|std::_List_const_iterator<*>{
+ preview ( $e._Ptr->_Myval )
+ children ( #([ptr] : &$e._Ptr->_Myval) )
+}
+
+;------------------------------------------------------------------------------
+; std::queue from <queue>
+; std::stack from <stack>
+;------------------------------------------------------------------------------
+std::queue<*>|std::stack<*>{
+ preview ( $e.c )
+ children ( #(c : $e.c) )
+}
+
+;------------------------------------------------------------------------------
+; std::priority_queue from <queue>
+;------------------------------------------------------------------------------
+std::priority_queue<*>{
+ preview ( $e.c )
+
+ children (
+ #(
+ #(c [heap]: $e.c),
+ #(comp : $e.comp)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::map from <map>
+; std::multimap from <map>
+; std::set from <set>
+; std::multiset from <set>
+;------------------------------------------------------------------------------
+std::map<*>|std::multimap<*>|std::set<*>|std::multiset<*>{
+ preview (
+ #(
+ "[",
+ $e._Mysize,
+ "](",
+ #tree(
+ head: $e._Myhead->_Parent,
+ skip: $e._Myhead,
+ left: _Left,
+ right: _Right,
+ size: $e._Mysize
+ ) : $e._Myval,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([comp] : $e.comp),
+ #tree(
+ head: $e._Myhead->_Parent,
+ skip: $e._Myhead,
+ left: _Left,
+ right: _Right,
+ size: $e._Mysize
+ ) : $e._Myval
+ )
+ )
+}
+std::_Tree_iterator<*>|std::_Tree_const_iterator<*>{
+ preview ( $e._Ptr->_Myval )
+ children ( #([ptr] : &$e._Ptr->_Myval) )
+}
+
+;------------------------------------------------------------------------------
+; std::bitset from <bitset>
+;------------------------------------------------------------------------------
+std::bitset<*>{
+ preview (
+ #(
+ "[",
+ $e._EEN_BITS,
+ "](",
+ #array(
+ expr: [($e._Array[$i / $e._Bitsperword] >> ($i % $e._Bitsperword)) & 1,d],
+ size: $e._EEN_BITS
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #array(
+ expr: [($e._Array[$i / $e._Bitsperword] >> ($i % $e._Bitsperword)) & 1,d],
+ size: $e._EEN_BITS
+ )
+ )
+}
+std::bitset<*>::reference{
+ preview (
+ [($e._Pbitset->_Array[$i / $e._Pbitset->_Bitsperword] >> ($e._Mypos % $e._Pbitset->_Bitsperword)) & 1,d]
+ )
+
+ children (
+ #(
+ #([bitset] : $e._Pbitset),
+ #([pos] : $e._Mypos)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::reverse_iterator from <iterator>
+;------------------------------------------------------------------------------
+std::reverse_iterator<std::_String_iterator<*> >|std::reverse_iterator<std::_String_const_iterator<*> >{
+ preview (
+ #(
+ "reverse_iterator to ",
+ $e.current._Ptr[-1]
+ )
+ )
+
+ children (
+ #(
+ #([to] : $e.current._Ptr - 1),
+ #(current : $e.current)
+ )
+ )
+}
+std::reverse_iterator<std::_Vb_iterator<*> >|std::reverse_iterator<std::_Vb_const_iterator<*> >{
+ preview (
+ #(
+ "reverse_iterator to ",
+ #if ($e.current._Myoff != 0) (
+ (bool)((*$e.current._Myptr >> ($e.current._Myoff - 1)) & 1)
+ ) #else (
+ (bool)(($e.current._Myptr[-1] >> (_VBITS - 1)) & 1)
+ )
+ )
+ )
+
+ children (
+ #if ($e.current._Myoff != 0) (
+ #(
+ #([to ptr] : $e.current._Myptr),
+ #([to offset] : $e.current._Myoff - 1),
+ #(current : $e.current)
+ )
+ ) #else (
+ #(
+ #([to ptr] : $e.current._Myptr - 1),
+ #([to offset] : _VBITS - 1),
+ #(current : $e.current)
+ )
+ )
+ )
+}
+std::reverse_iterator<std::_Vector_iterator<*> >|std::reverse_iterator<std::_Vector_const_iterator<*> >{
+ preview (
+ #(
+ "reverse_iterator to ",
+ $e.current._Ptr[-1]
+ )
+ )
+
+ children (
+ #(
+ #([to] : $e.current._Ptr - 1),
+ #(current : $e.current)
+ )
+ )
+}
+std::reverse_iterator<std::_Deque_iterator<*,*> >|std::reverse_iterator<std::_Deque_const_iterator<*,*> >{
+ preview (
+ #(
+ "reverse_iterator to ",
+ #if ($e.current._Myoff == ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_Myoff) (
+ "end"
+ ) #else (
+ ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_Map[(($e.current._Myoff - 1) / ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_EEN_DS) % ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_Mapsize][($e.current._Myoff - 1) % ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_EEN_DS]
+ )
+ )
+ )
+
+ children (
+ #if ($e.current._Myoff == ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_Myoff) (
+ #(current : $e.current)
+ ) #else (
+ #(
+ #([to index] : ($e.current._Myoff - 1) - ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_Myoff),
+ #([to ptr] : &((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_Map[(($e.current._Myoff - 1) / ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_EEN_DS) % ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_Mapsize][($e.current._Myoff - 1) % ((std::deque<$T1,$T2> *)$e.current._Myproxy->_Mycont)->_EEN_DS] ),
+ #(current : $e.current)
+ )
+ )
+ )
+}
+std::reverse_iterator<std::_List_iterator<*> >|std::reverse_iterator<std::_List_const_iterator<*> >{
+ preview (
+ #(
+ "reverse_iterator to ",
+ $e.current._Ptr->_Prev->_Myval
+ )
+ )
+
+ children (
+ #(
+ #([to] : &$e.current._Ptr->_Prev->_Myval),
+ #(current : $e.current)
+ )
+ )
+}
+std::reverse_iterator<std::_Array_iterator<*> >|std::reverse_iterator<std::_Array_const_iterator<*> >{
+ preview (
+ #(
+ "reverse_iterator to ",
+ #if ($e.current._EEN_IDL == 0) (
+ $e.current._Ptr[-1]
+ ) #else (
+ #if ($e.current._Idx == 0) (
+ "end"
+ ) #else (
+ $e.current._Ptr[$e.current._Idx - 1]
+ )
+ )
+ )
+ )
+
+ children (
+ #if ($e.current._EEN_IDL == 0) (
+ #(
+ #([to] : $e.current._Ptr - 1),
+ #(current : $e.current)
+ )
+ ) #else (
+ #if ($e.current._Idx == 0) (
+ #(current : $e.current)
+ ) #else (
+ #(
+ #([to] : $e.current._Ptr + $e.current._Idx - 1),
+ #(current : $e.current)
+ )
+ )
+ )
+ )
+}
+std::reverse_iterator<*>{
+ preview (
+ #(
+ "reverse_iterator current ",
+ $e.current
+ )
+ )
+
+ children (
+ #(current : $e.current)
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::complex from <complex>
+;------------------------------------------------------------------------------
+std::complex<*>{
+ preview (
+ #if ($e._Val[1] == 0) (
+ ; Purely real.
+ $e._Val[0]
+ ) #else (
+ #if ($e._Val[0] == 0) (
+ ; Purely imaginary.
+ #if ($e._Val[1] < 0) (
+ #("-i*", -$e._Val[1])
+ ) #else (
+ #("i*", $e._Val[1])
+ )
+ ) #else (
+ ; Mixed.
+ #if ($e._Val[1] < 0) (
+ #($e._Val[0], "-i*", -$e._Val[1])
+ ) #else (
+ #($e._Val[0], "+i*", $e._Val[1])
+ )
+ )
+ )
+ )
+
+ children (
+ #(
+ #(real : $e._Val[0]),
+ #(imag : $e._Val[1])
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::valarray from <valarray>
+;------------------------------------------------------------------------------
+std::valarray<*>{
+ preview (
+ #(
+ "[",
+ $e._Mysize,
+ "](",
+ #array(
+ expr: $e._Myptr[$i],
+ size: $e._Mysize
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #array(
+ expr: $e._Myptr[$i],
+ size: $e._Mysize
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::reference_wrapper from <functional>
+;------------------------------------------------------------------------------
+std::tr1::reference_wrapper<*>{
+ preview (
+ #if ($e._Callee._EEN_INDIRECT == 1) (
+ ; For ordinary T, reference_wrapper<T> stores a T * _Callee._Ptr
+ ; which is non-null. Actual references are previewed with what they
+ ; refer to, so reference_wrapper<T> is previewed with dereferencing its
+ ; stored pointer.
+ *$e._Callee._Ptr
+ ) #else (
+ ; When T is a pointer to data member type, reference_wrapper<T>
+ ; stores a T _Callee._Object directly.
+ $e._Callee._Object
+ )
+ )
+
+ children (
+ #if ($e._Callee._EEN_INDIRECT == 1) (
+ ; Actual references have the same children as what they refer to.
+ ; Unfortunately, there appears to be no way to imitate this exactly.
+ ; Therefore, we make reference_wrapper<T> appear to have a single
+ ; child, its stored pointer, with a fake name of [ptr].
+ #([ptr] : $e._Callee._Ptr)
+ ) #else (
+ ; When T is a pointer to data member type, T has no children,
+ ; so we make reference_wrapper<T> appear to have no children.
+ #array(expr: 0, size: 0)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::shared_ptr from <memory>
+;------------------------------------------------------------------------------
+std::tr1::_Ref_count<*>{
+ preview ( "default" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ref_count_del<*>{
+ preview ( "custom deleter" )
+ children ( #([deleter] : $e._Dtor) )
+}
+std::tr1::_Ref_count_del_alloc<*>{
+ preview ( "custom deleter, custom allocator" )
+ children (
+ #(
+ #([deleter] : $e._Dtor),
+ #([allocator] : $e._Myal)
+ )
+ )
+}
+std::tr1::_Ref_count_obj<*>{
+ preview ( "make_shared" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ref_count_obj_alloc<*>{
+ preview ( "allocate_shared" )
+ children ( #([allocator] : $e._Myal) )
+}
+std::tr1::shared_ptr<*>{
+ preview (
+ ; shared_ptr<T> stores a T * _Ptr .
+ #if ($e._Ptr == 0) (
+ ; A default-constructed shared_ptr has a null _Ptr and a null _Rep,
+ ; and is formally said to be empty.
+ ; A shared_ptr constructed from a null pointer has a null _Ptr
+ ; and a NON-null _Rep . It is formally said to own the null pointer.
+ ; We preview both with "empty".
+ "empty"
+ ) #else (
+ ; Raw pointers are previewed with "<pointer value> <object>".
+ ; auto_ptr is previewed with "auto_ptr <object>".
+ ; Following these examples, shared_ptr is previewed with
+ ; "shared_ptr <object> [N strong refs, M weak refs]".
+ #(
+ "shared_ptr ",
+ *$e._Ptr,
+ " [",
+ $e._Rep->_Uses,
+ #if ($e._Rep->_Uses == 1) (" strong ref") #else (" strong refs"),
+ #if ($e._Rep->_Weaks - 1 > 0) (
+ #(
+ ", ",
+ $e._Rep->_Weaks - 1,
+ #if ($e._Rep->_Weaks - 1 == 1) (" weak ref") #else (" weak refs")
+ )
+ ),
+ "] [",
+ *$e._Rep,
+ "]"
+ )
+ ; Note: _Rep->_Uses counts how many shared_ptrs share ownership of the object,
+ ; so we directly display it as the strong reference count.
+ ; _Rep->_Weaks counts how many shared_ptrs and weak_ptrs share ownership of
+ ; the "representation object" (or "control block"). All of the shared_ptrs are
+ ; counted as a single owner. That is, _Weaks is initialized to 1, and when
+ ; _Uses falls to 0, _Weaks is decremented. This avoids incrementing and decrementing
+ ; _Weaks every time that a shared_ptr gains or loses ownership. Therefore,
+ ; _Weaks - 1 is the weak reference count, the number of weak_ptrs that are observing
+ ; the shared object.
+ )
+ )
+
+ children (
+ #if ($e._Ptr == 0) (
+ ; We make empty shared_ptrs (and shared_ptrs that own
+ ; the null pointer) appear to have no children.
+ #array(expr: 0, size: 0)
+ ) #else (
+ #(
+ ; We make shared_ptr appear to have two children:
+
+ ; Its stored pointer, with a fake name of [ptr].
+ #([ptr] : $e._Ptr),
+
+ ; Its deleter and allocator, which may be default or custom.
+ #([deleter and allocator] : *$e._Rep)
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::weak_ptr from <memory>
+;------------------------------------------------------------------------------
+std::tr1::weak_ptr<*>{
+ preview (
+ #if ($e._Ptr == 0) (
+ "empty"
+ ) #elif ($e._Rep->_Uses == 0) (
+ ; weak_ptr is just like shared_ptr, except that a weak_ptr can be expired.
+ #(
+ "expired [",
+ *$e._Rep,
+ "]"
+ )
+ ) #else (
+ #(
+ "weak_ptr ",
+ *$e._Ptr,
+ " [",
+ $e._Rep->_Uses,
+ #if ($e._Rep->_Uses == 1) (" strong ref") #else (" strong refs"),
+ #if ($e._Rep->_Weaks - 1 > 0) (
+ #(
+ ", ",
+ $e._Rep->_Weaks - 1,
+ #if ($e._Rep->_Weaks - 1 == 1) (" weak ref") #else (" weak refs")
+ )
+ ),
+ "] [",
+ *$e._Rep,
+ "]"
+ )
+ )
+ )
+
+ children (
+ #if ($e._Ptr == 0) (
+ #array(expr: 0, size: 0)
+ ) #elif ($e._Rep->_Uses == 0) (
+ ; When a weak_ptr is expired, we show its deleter and allocator.
+ ; The deleter has already been used, but the control block has not yet been deallocated.
+ #([deleter and allocator] : *$e._Rep)
+ ) #else (
+ #(
+ #([ptr] : $e._Ptr),
+ #([deleter and allocator] : *$e._Rep)
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::mem_fn() from <functional>
+;------------------------------------------------------------------------------
+; Note that when mem_fn() is given a data member pointer, it returns a _Call_wrapper<_Callable_pmd<*> > .
+; Data member pointers themselves don't have useful previews, so we don't attempt to visualize this.
+; When mem_fn() is given a member function pointer, it returns a _Mem_fn[N], which we can visualize.
+std::tr1::_Mem_fn1<*>|std::tr1::_Mem_fn2<*>|std::tr1::_Mem_fn3<*>|std::tr1::_Mem_fn4<*>|std::tr1::_Mem_fn5<*>|std::tr1::_Mem_fn6<*>|std::tr1::_Mem_fn7<*>|std::tr1::_Mem_fn8<*>|std::tr1::_Mem_fn9<*>|std::tr1::_Mem_fn10<*>{
+ preview (
+ ; We preview the functor returned by mem_fn() with "mem_fn(<stored member function pointer>)".
+ #(
+ "mem_fn(",
+ $e._Callee._Object,
+ ")"
+ )
+ )
+
+ children (
+ ; Member function pointers have no children.
+ #array(expr: 0, size: 0)
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::bind() from <functional>
+;------------------------------------------------------------------------------
+; bind() placeholders are previewed with their names.
+; They have no state, so they have no children.
+std::tr1::_Ph<1>{
+ preview ( "_1" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<2>{
+ preview ( "_2" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<3>{
+ preview ( "_3" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<4>{
+ preview ( "_4" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<5>{
+ preview ( "_5" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<6>{
+ preview ( "_6" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<7>{
+ preview ( "_7" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<8>{
+ preview ( "_8" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<9>{
+ preview ( "_9" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::_Ph<10>{
+ preview ( "_10" )
+ children ( #array(expr: 0, size: 0) )
+}
+
+; The functor returned by bind(f, t1, t2) is previewed with "bind(f, t1, t2)".
+; It has children with the fake names of [f], [t1], [t2], etc.
+std::tr1::_Bind<*,*,std::tr1::_Bind0<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind0<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind1<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind1<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind2<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind2<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind3<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind3<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ", ", $e._Bx._Vx2,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1),
+ #([t3] : $e._Bx._Vx2)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind4<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind4<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ", ", $e._Bx._Vx2,
+ ", ", $e._Bx._Vx3,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1),
+ #([t3] : $e._Bx._Vx2),
+ #([t4] : $e._Bx._Vx3)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind5<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind5<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ", ", $e._Bx._Vx2,
+ ", ", $e._Bx._Vx3,
+ ", ", $e._Bx._Vx4,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1),
+ #([t3] : $e._Bx._Vx2),
+ #([t4] : $e._Bx._Vx3),
+ #([t5] : $e._Bx._Vx4)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind6<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind6<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ", ", $e._Bx._Vx2,
+ ", ", $e._Bx._Vx3,
+ ", ", $e._Bx._Vx4,
+ ", ", $e._Bx._Vx5,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1),
+ #([t3] : $e._Bx._Vx2),
+ #([t4] : $e._Bx._Vx3),
+ #([t5] : $e._Bx._Vx4),
+ #([t6] : $e._Bx._Vx5)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind7<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind7<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ", ", $e._Bx._Vx2,
+ ", ", $e._Bx._Vx3,
+ ", ", $e._Bx._Vx4,
+ ", ", $e._Bx._Vx5,
+ ", ", $e._Bx._Vx6,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1),
+ #([t3] : $e._Bx._Vx2),
+ #([t4] : $e._Bx._Vx3),
+ #([t5] : $e._Bx._Vx4),
+ #([t6] : $e._Bx._Vx5),
+ #([t7] : $e._Bx._Vx6)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind8<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind8<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ", ", $e._Bx._Vx2,
+ ", ", $e._Bx._Vx3,
+ ", ", $e._Bx._Vx4,
+ ", ", $e._Bx._Vx5,
+ ", ", $e._Bx._Vx6,
+ ", ", $e._Bx._Vx7,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1),
+ #([t3] : $e._Bx._Vx2),
+ #([t4] : $e._Bx._Vx3),
+ #([t5] : $e._Bx._Vx4),
+ #([t6] : $e._Bx._Vx5),
+ #([t7] : $e._Bx._Vx6),
+ #([t8] : $e._Bx._Vx7)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind9<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind9<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ", ", $e._Bx._Vx2,
+ ", ", $e._Bx._Vx3,
+ ", ", $e._Bx._Vx4,
+ ", ", $e._Bx._Vx5,
+ ", ", $e._Bx._Vx6,
+ ", ", $e._Bx._Vx7,
+ ", ", $e._Bx._Vx8,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1),
+ #([t3] : $e._Bx._Vx2),
+ #([t4] : $e._Bx._Vx3),
+ #([t5] : $e._Bx._Vx4),
+ #([t6] : $e._Bx._Vx5),
+ #([t7] : $e._Bx._Vx6),
+ #([t8] : $e._Bx._Vx7),
+ #([t9] : $e._Bx._Vx8)
+ )
+ )
+}
+std::tr1::_Bind<*,*,std::tr1::_Bind10<*> >|std::tr1::_Bind_fty<*,*,std::tr1::_Bind10<*> >{
+ preview (
+ #(
+ "bind(", $e._Bx._Callee._Object,
+ ", ", $e._Bx._Vx0,
+ ", ", $e._Bx._Vx1,
+ ", ", $e._Bx._Vx2,
+ ", ", $e._Bx._Vx3,
+ ", ", $e._Bx._Vx4,
+ ", ", $e._Bx._Vx5,
+ ", ", $e._Bx._Vx6,
+ ", ", $e._Bx._Vx7,
+ ", ", $e._Bx._Vx8,
+ ", ", $e._Bx._Vx9,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([f] : $e._Bx._Callee._Object),
+ #([t1] : $e._Bx._Vx0),
+ #([t2] : $e._Bx._Vx1),
+ #([t3] : $e._Bx._Vx2),
+ #([t4] : $e._Bx._Vx3),
+ #([t5] : $e._Bx._Vx4),
+ #([t6] : $e._Bx._Vx5),
+ #([t7] : $e._Bx._Vx6),
+ #([t8] : $e._Bx._Vx7),
+ #([t9] : $e._Bx._Vx8),
+ #([t10] : $e._Bx._Vx9)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::function from <functional>
+;------------------------------------------------------------------------------
+std::tr1::_Impl_no_alloc0<*>|std::tr1::_Impl_no_alloc1<*>|std::tr1::_Impl_no_alloc2<*>|std::tr1::_Impl_no_alloc3<*>|std::tr1::_Impl_no_alloc4<*>|std::tr1::_Impl_no_alloc5<*>|std::tr1::_Impl_no_alloc6<*>|std::tr1::_Impl_no_alloc7<*>|std::tr1::_Impl_no_alloc8<*>|std::tr1::_Impl_no_alloc9<*>|std::tr1::_Impl_no_alloc10<*>{
+ preview ( $e._Callee._Object )
+ children ( #([functor] : $e._Callee._Object) )
+}
+std::tr1::_Impl0<*>|std::tr1::_Impl1<*>|std::tr1::_Impl2<*>|std::tr1::_Impl3<*>|std::tr1::_Impl4<*>|std::tr1::_Impl5<*>|std::tr1::_Impl6<*>|std::tr1::_Impl7<*>|std::tr1::_Impl8<*>|std::tr1::_Impl9<*>|std::tr1::_Impl10<*>{
+ preview ( $e._Callee._Object )
+ children (
+ #(
+ #([functor] : $e._Callee._Object),
+ #([allocator] : $e._Myal)
+ )
+ )
+}
+std::tr1::function<*>{
+ preview (
+ #if ($e._Impl == 0) (
+ ; Detecting empty functions is trivial.
+ "empty"
+ ) #else (
+ *$e._Impl
+ )
+ )
+
+ children (
+ #if ($e._Impl == 0) (
+ ; We make empty functions appear to have no children.
+ #array(expr: 0, size: 0)
+ ) #else (
+ #([functor and allocator] : *$e._Impl)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::tuple from <tuple>
+;------------------------------------------------------------------------------
+; tuple is visualized like pair, except that we have to give fake names to tuple's children.
+std::tr1::tuple<std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ "()"
+ )
+
+ children (
+ #array(expr: 0, size: 0)
+ )
+}
+std::tr1::tuple<*,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,*,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value,
+ [2] : $e._Impl._Tail._Tail._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,*,*,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value,
+ [2] : $e._Impl._Tail._Tail._Value,
+ [3] : $e._Impl._Tail._Tail._Tail._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,*,*,*,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value,
+ [2] : $e._Impl._Tail._Tail._Value,
+ [3] : $e._Impl._Tail._Tail._Tail._Value,
+ [4] : $e._Impl._Tail._Tail._Tail._Tail._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,*,*,*,*,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value,
+ [2] : $e._Impl._Tail._Tail._Value,
+ [3] : $e._Impl._Tail._Tail._Tail._Value,
+ [4] : $e._Impl._Tail._Tail._Tail._Tail._Value,
+ [5] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,*,*,*,*,*,std::tr1::_Nil,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value,
+ [2] : $e._Impl._Tail._Tail._Value,
+ [3] : $e._Impl._Tail._Tail._Tail._Value,
+ [4] : $e._Impl._Tail._Tail._Tail._Tail._Value,
+ [5] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ [6] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,*,*,*,*,*,*,std::tr1::_Nil,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value,
+ [2] : $e._Impl._Tail._Tail._Value,
+ [3] : $e._Impl._Tail._Tail._Tail._Value,
+ [4] : $e._Impl._Tail._Tail._Tail._Tail._Value,
+ [5] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ [6] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ [7] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,*,*,*,*,*,*,*,std::tr1::_Nil>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value,
+ [2] : $e._Impl._Tail._Tail._Value,
+ [3] : $e._Impl._Tail._Tail._Tail._Value,
+ [4] : $e._Impl._Tail._Tail._Tail._Tail._Value,
+ [5] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ [6] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ [7] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ [8] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value
+ )
+ )
+}
+std::tr1::tuple<*,*,*,*,*,*,*,*,*,*>{
+ preview (
+ #(
+ "(", $e._Impl._Value,
+ ", ", $e._Impl._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ", ", $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ [0] : $e._Impl._Value,
+ [1] : $e._Impl._Tail._Value,
+ [2] : $e._Impl._Tail._Tail._Value,
+ [3] : $e._Impl._Tail._Tail._Tail._Value,
+ [4] : $e._Impl._Tail._Tail._Tail._Tail._Value,
+ [5] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Value,
+ [6] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ [7] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ [8] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value,
+ [9] : $e._Impl._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Value
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::array from <array>
+;------------------------------------------------------------------------------
+std::tr1::array<*>{
+ preview (
+ ; An actual array is previewed with its address.
+ ; array<T, N> is previewed like vector<T>.
+ #(
+ "[",
+ $e._EEN_SIZE,
+ "](",
+ #array(expr: $e._Elems[$i], size: $e._EEN_SIZE),
+ ")"
+ )
+ )
+
+ children (
+ ; Just like an actual array.
+ #array(expr: $e._Elems[$i], size: $e._EEN_SIZE)
+ )
+}
+std::_Array_iterator<*>|std::_Array_const_iterator<*>{
+ preview (
+ #if ($e._EEN_IDL == 0) (
+ *$e._Ptr
+ ) #else (
+ #if ($e._Idx == $e._EEN_SIZE) (
+ ; array iterators are represented by _Ptr + _Idx,
+ ; and they know how large their parent arrays are. Therefore, detecting
+ ; end iterators is trivial.
+ "end"
+ ) #else (
+ ; Like vector iterators, array iterators are previewed with what they point to.
+ $e._Ptr[$e._Idx]
+ )
+ )
+ )
+
+ children (
+ #if ($e._EEN_IDL == 0) (
+ #([ptr] : $e._Ptr)
+ ) #else (
+ #if ($e._Idx == $e._EEN_SIZE) (
+ ; We make end iterators appear to have no children.
+ #array(expr: 0, size: 0)
+ ) #else (
+ ; An array iterator is conceptually a pointer, so we make it appear to store one.
+ #([ptr] : $e._Ptr + $e._Idx)
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; stdext::hash_map from <hash_map>
+; stdext::hash_multimap from <hash_map>
+; stdext::hash_set from <hash_set>
+; stdext::hash_multiset from <hash_set>
+;------------------------------------------------------------------------------
+stdext::hash_map<*>|stdext::hash_multimap<*>|stdext::hash_set<*>|stdext::hash_multiset<*>{
+ preview (
+ #(
+ "[",
+ $e._List._Mysize,
+ "](",
+ #list(
+ head: $e._List._Myhead->_Next,
+ size: $e._List._Mysize,
+ next: _Next
+ ) : $e._Myval,
+ ")"
+ )
+ )
+
+ children (
+ #list(
+ head: $e._List._Myhead->_Next,
+ size: $e._List._Mysize,
+ next: _Next
+ ) : $e._Myval
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::unordered_map from <unordered_map>
+; std::tr1::unordered_multimap from <unordered_map>
+; std::tr1::unordered_set from <unordered_set>
+; std::tr1::unordered_multiset from <unordered_set>
+;------------------------------------------------------------------------------
+std::hash<*>{
+ preview ( "hash" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::tr1::unordered_map<*>|std::tr1::unordered_multimap<*>|std::tr1::unordered_set<*>|std::tr1::unordered_multiset<*>{
+ preview (
+ #(
+ "[",
+ $e._List._Mysize,
+ "](",
+ #list(
+ head: $e._List._Myhead->_Next,
+ size: $e._List._Mysize,
+ next: _Next
+ ) : $e._Myval,
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([hash] : $e.comp._Hashobj),
+ #([equal] : $e.comp._Keyeqobj),
+ #list(
+ head: $e._List._Myhead->_Next,
+ size: $e._List._Mysize,
+ next: _Next
+ ) : $e._Myval
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::basic_regex from <regex>
+;------------------------------------------------------------------------------
+std::tr1::basic_regex<*>{
+ preview (
+ #if ($e._Rep == 0) (
+ ; Default construction creates an empty basic_regex.
+ "empty"
+ ) #elif ($e._EEN_VIS == 1) (
+ ; By default, _ENHANCED_REGEX_VISUALIZER is defined to be 1 in debug and 0 in ship.
+ ; When it is 1, basic_regex stores the string from which it was constructed.
+ ; When it is 0, basic_regex stores only the resulting finite state machine.
+ $e._Visualization
+ ) #else (
+ ; basic_regex contains many static const flags, which would be shown in the preview by default.
+ ; Its actual members are _Rep and _Traits. _Rep holds the finite state machine, so we
+ ; use it to preview basic_regex. (It does contain some human-readable information.)
+ *$e._Rep
+ )
+ )
+
+ children (
+ #if ($e._Rep == 0) (
+ ; We make empty basic_regexes appear to have no children.
+ #array(expr: 0, size: 0)
+ ) #elif ($e._EEN_VIS == 1) (
+ ; We want to hide those static const flags.
+ ; We also want to give _Visualization a fake name.
+ #(
+ #([str] : $e._Visualization),
+ #(_Rep : $e._Rep),
+ #(_Traits : $e._Traits)
+ )
+ ) #else (
+ ; We want to hide those static const flags.
+ #(
+ _Rep : $e._Rep,
+ _Traits : $e._Traits
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::sub_match from <regex>
+;------------------------------------------------------------------------------
+std::tr1::sub_match<char const *>|std::tr1::sub_match<wchar_t const *>|std::tr1::sub_match<unsigned short const *>|std::tr1::sub_match<char *>|std::tr1::sub_match<wchar_t *>|std::tr1::sub_match<unsigned short *>{
+ preview (
+ ; It would be nice if we could preview sub_match with its str().
+ ; However, visualizers cannot handle strings represented by pointer pairs.
+ ; Therefore, our preview contains more limited information.
+ #if ($e.matched) (
+ ; If this sub_match participated in a match,
+ ; we preview it with its length().
+ $e.second - $e.first
+ ) #else (
+ ; Otherwise, we preview it with its matched bool (i.e. "false").
+ ; (Why not length() (i.e. "0")? It's meaningful to have
+ ; matched == true and length() == 0.
+ "false"
+ )
+ )
+
+ children (
+ #(
+ ; sub_match's three data members are public, but we list them here
+ ; (a) to display matched before first and second, and
+ ; (b) to gloss over the fact that sub_match derives from std::pair.
+ #(matched : $e.matched),
+ #(first : $e.first),
+ #(second : $e.second)
+ )
+ )
+}
+std::tr1::sub_match<std::_String_const_iterator<*> >|std::tr1::sub_match<std::_String_iterator<*> >{
+ preview (
+ #if ($e.matched) (
+ ; We visualize ssub_match and wssub_match just like csub_match and wcsub_match,
+ ; except that when determining the length(), we can't subtract iterators.
+ ; We have to subtract their stored pointers.
+ $e.second._Ptr - $e.first._Ptr
+ ) #else (
+ "false"
+ )
+ )
+
+ children (
+ #(
+ #(matched : $e.matched),
+ #(first : $e.first),
+ #(second : $e.second)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::match_results from <regex>
+;------------------------------------------------------------------------------
+std::tr1::match_results<*>{
+ preview (
+ ; A match_results object is empty iff its vector _Matches is empty.
+ #if ($e._Matches._Myfirst == $e._Matches._Mylast) (
+ "empty"
+ ) #else (
+ ; We preview a non-empty match_results object with its vector.
+ $e._Matches
+ )
+ )
+
+ children (
+ #if ($e._Matches._Myfirst == $e._Matches._Mylast) (
+ ; We make empty match_results appear to have no children.
+ #array(expr: 0, size: 0)
+ ) #else (
+ ; As match_results has operator[](), prefix(), and suffix() member functions,
+ ; we make it appear to directly contain [0], [1], [2], etc. elements,
+ ; as well as [prefix] and [suffix] elements.
+ #(
+ #array(expr: $e._Matches._Myfirst[$i], size: $e._Matches._Mylast - $e._Matches._Myfirst),
+ #([prefix] : $e._Prefix),
+ #([suffix] : $e._Suffix)
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::regex_iterator from <regex>
+;------------------------------------------------------------------------------
+std::tr1::regex_iterator<*>{
+ preview (
+ #if ($e._MyRe == 0) (
+ ; We represent end-of-sequence regex_iterators with null regex pointers.
+ "end"
+ ) #else (
+ ; Dereferenceable regex_iterators return match_results when dereferenced,
+ ; so we'll preview them with that.
+ $e._MyVal
+ )
+ )
+
+ children (
+ #if ($e._MyRe == 0) (
+ ; We make end-of-sequence regex_iterators appear to have no children.
+ #array(expr: 0, size: 0)
+ ) #else (
+ ; For ease of understanding, we make dereferenceable regex_iterators
+ ; appear to have data members with the "for exposition only" names from TR1.
+ #(
+ #([begin] : $e._Begin),
+ #([end] : $e._End),
+ #([pregex] : $e._MyRe),
+ #([flags] : $e._Flags),
+ #([match] : $e._MyVal)
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::tr1::regex_token_iterator from <regex>
+;------------------------------------------------------------------------------
+std::tr1::regex_token_iterator<*>{
+ preview (
+ #if ($e._Res == 0) (
+ ; We represent end-of-sequence regex_token_iterators with null result pointers.
+ "end"
+ ) #else (
+ ; Dereferenceable regex_token_iterators return *result when dereferenced,
+ ; so we'll preview them with that.
+ *$e._Res
+ )
+ )
+
+ children (
+ #if ($e._Res == 0) (
+ ; We make end-of-sequence regex_token_iterators appear to have no children.
+ #array(expr: 0, size: 0)
+ ) #else (
+ ; For ease of understanding, we make dereferenceable regex_token_iterators
+ ; appear to have data members with the "for exposition only" names from TR1.
+ #(
+ #([position] : $e._Pos),
+ #([result] : $e._Res),
+ #([suffix] : $e._Suffix),
+ #([N] : $e._Cur),
+ #([subs] : $e._Subs)
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::identity, etc. from <functional>
+;------------------------------------------------------------------------------
+std::identity<*>{
+ preview ( "identity" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::bit_and<*>{
+ preview ( "bit_and" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::bit_or<*>{
+ preview ( "bit_or" )
+ children ( #array(expr: 0, size: 0) )
+}
+std::bit_xor<*>{
+ preview ( "bit_xor" )
+ children ( #array(expr: 0, size: 0) )
+}
+
+;------------------------------------------------------------------------------
+; std::unique_ptr from <memory>
+;------------------------------------------------------------------------------
+std::unique_ptr<*>{
+ preview (
+ #if ($e._Myptr == 0) (
+ "empty"
+ ) #else (
+ #(
+ "unique_ptr ",
+ *$e._Myptr
+ )
+ )
+ )
+
+ children (
+ #if ($e._Myptr == 0) (
+ #array(expr: 0, size: 0)
+ ) #else (
+ #([ptr] : $e._Myptr)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; std::forward_list from <forward_list>
+;------------------------------------------------------------------------------
+std::forward_list<*>{
+ preview (
+ #(
+ "(",
+ #list(
+ head: $e._Myhead,
+ next: _Next
+ ) : $e._Myval,
+ ")"
+ )
+ )
+
+ children (
+ #list(
+ head: $e._Myhead,
+ next: _Next
+ ) : $e._Myval
+ )
+}
+std::_Flist_iterator<*>|std::_Flist_const_iterator<*>{
+ preview (
+ #if ($e._Ptr == 0) (
+ "end"
+ ) #else (
+ $e._Ptr->_Myval
+ )
+ )
+
+ children (
+ #if ($e._Ptr == 0) (
+ #array(expr: 0, size: 0)
+ ) #else (
+ #([ptr] : &$e._Ptr->_Myval)
+ )
+ )
+}
+
+
+;------------------------------------------------------------------------------
+; PROPVARIANT
+;------------------------------------------------------------------------------
+; Visualizers for VT_VECTOR C arrays
+tagCAC|tagCAUB|tagCAI|tagCAUI|tagCAL|tagCAUL|tagCAFLT|tagCADBL|tagCACY|tagCADATE|tagCABSTR|tagCABSTRBLOB|tagCABOOL|tagCASCODE|tagCAPROPVARIANT|tagCAH|tagCAUH|tagCALPSTR|tagCALPWSTR|tagCAFILETIME|tagCACLIPDATA|tagCACLSID{
+ preview(
+ #(
+ "[", $e.cElems , "](",
+ #array
+ (
+ expr : ($e.pElems)[$i],
+ size : $e.cElems
+ ),
+ ")"
+ )
+ )
+ children
+ (
+ #array
+ (
+ expr : ($e.pElems)[$i],
+ size : $e.cElems
+ )
+ )
+}
+; Visualizers for SAFE ARRAY
+tagSAFEARRAY|SAFEARRAY{
+ preview(
+ #if ($e.fFeatures & 0x0080) ; FADF_HAVEVARTYPE
+ (
+ ; Switch on the variant type field - which is stored 4 bytes
+ ; before the beginning of the SAFEARRAY type
+ #switch( ((unsigned *)&($e))[-1] )
+ #case 0x2 ; VT_I2 | VT_ARRAY
+ (
+ #(
+ "safearray of I2 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((signed short *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x3 ; VT_I4 | VT_ARRAY
+ (
+ #(
+ "safearray of I4 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((signed int *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x4 ; VT_R4 | VT_ARRAY
+ (
+ #(
+ "safearray of R4 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((float *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x5 ; VT_R8 | VT_ARRAY
+ (
+ #(
+ "safearray of R8 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((double *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x6 ; VT_CY | VT_ARRAY
+ (
+ #(
+ "safearray of CY = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((CY *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x7 ; VT_DATE | VT_ARRAY
+ (
+ #(
+ "safearray of DATE = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((DATE *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x8 ; VT_BSTR | VT_ARRAY
+ (
+ #(
+ "safearray of BSTR = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((wchar_t **)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0xa ; VT_ERROR | VT_ARRAY
+ (
+ #(
+ "safearray of ERROR = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((long *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0xb ; VT_BOOL | VT_ARRAY
+ (
+ #(
+ "safearray of BOOL = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((short *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0xc ; VT_VARIANT | VT_ARRAY
+ (
+ #(
+ "safearray of VARIANT = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((tagVARIANT *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x10 ; VT_I1 | VT_ARRAY
+ (
+ #(
+ "safearray of I1 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((signed char *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x11 ; VT_UI1 | VT_ARRAY
+ (
+ #(
+ "safearray of UI1 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((unsigned char *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x12 ; VT_UI2 | VT_ARRAY
+ (
+ #(
+ "safearray of UI2 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((unsigned short *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x13 ; VT_UI4 | VT_ARRAY
+ (
+ #(
+ "safearray of UI4 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((unsigned int *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x14 ; VT_I8 | VT_ARRAY
+ (
+ #(
+ "safearray of I8 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((signed __int64 *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x15 ; VT_UI8 | VT_ARRAY
+ (
+ #(
+ "safearray of UI8 = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((unsigned __int64 *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x16 ; VT_INT | VT_ARRAY
+ (
+ #(
+ "safearray of INT = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((int *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x17 ; VT_UINT | VT_ARRAY
+ (
+ #(
+ "safearray of UINT = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((unsigned *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x1e ; VT_LPSTR | VT_ARRAY
+ (
+ #(
+ "safearray of LPSTR = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((char **)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x1f ; VT_LPWSTR | VT_ARRAY
+ (
+ #(
+ "safearray of LPWSTR = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((wchar_t **)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x40 ; VT_FILETIME | VT_ARRAY
+ (
+ #(
+ "safearray of FILETIME = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((FILETIME *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x47 ; VT_CLIPDATA | VT_ARRAY
+ (
+ #(
+ "safearray of CLIPDATA = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((CLIPDATA *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ #case 0x48 ; VT_CLSID | VT_ARRAY
+ (
+ #(
+ "safearray of CLSID = [",
+ ; output the rank array
+ #array( expr: $e.rgsabound[$i].cElements, size: $e.cDims),
+ "](",
+ ; output the data elements
+ #array(
+ expr: ((CLSID *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ ),
+ ")"
+ )
+ )
+ )
+ #elif ($e.fFeatures & 0x0100) ; FADF_BSTR
+ (
+ #("safearray of BSTR = ",#array(expr: $e.rgsabound[$i].cElements, size: $e.cDims) : #("[",$e,"]"), "(", #array(expr: ((wchar_t * *)$e.pvData)[$i], size: $e.rgsabound[$r].cElements, rank: $e.cDims, base: $e.rgsabound[$r].lLbound ), ")")
+ )
+ #elif ($e.fFeatures & 0x0200) ; FADF_UNKNOWN
+ (
+ #("safearray of IUnknown* = [",#array(expr: $e.rgsabound[$i].cElements, size: $e.cDims), "](", #array(expr: ((IUnknown * *)$e.pvData)[$i], size: $e.rgsabound[$r].cElements, rank: $e.cDims, base: $e.rgsabound[$r].lLbound ), ")")
+ )
+ #elif ($e.fFeatures & 0x0400) ; FADF_DISPATCH
+ (
+ #("safearray of IDispatch* = [",#array(expr: $e.rgsabound[$i].cElements, size: $e.cDims), "](", #array(expr: ((IDispatch * *)$e.pvData)[$i], size: $e.rgsabound[$r].cElements, rank: $e.cDims, base: $e.rgsabound[$r].lLbound ), ")")
+ )
+ #elif ($e.fFeatures & 0x0800) ; FADF_VARIANT
+ (
+ #("safearray of VARIANT = ",#array(expr: $e.rgsabound[$i].cElements, size: $e.cDims) : #("[",$e,"]"), "(", #array(expr: ((tagVARIANT *)$e.pvData)[$i], size: $e.rgsabound[$r].cElements, rank: $e.cDims, base: $e.rgsabound[$r].lLbound ), ")")
+ )
+ )
+ children
+ (
+ #( ;[actual members]: [$e,!],
+ #if ($e.fFeatures & 0x0080) ; FADF_HAVEVARTYPE
+ (
+ #switch( ((unsigned *)&($e))[-1] ) ; for some reason the VT field is before the SAFEARRAY struct
+ #case 2 ; VT_I2|VT_ARRAY
+ (
+ #array(
+ expr: ((signed short *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 3 ; VT_I4|VT_ARRAY
+ (
+ #array(
+ expr: ((signed int *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 4 ; VT_R4|VT_ARRAY
+ (
+ #array(
+ expr: ((float *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 5 ; VT_R8|VT_ARRAY
+ (
+ #array(
+ expr: ((double *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x10 ; VT_I1|VT_ARRAY
+ (
+ #array(
+ expr: ((signed char *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x11 ; VT_UI1|VT_ARRAY
+ (
+ #array(
+ expr: ((unsigned char *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x12 ; VT_UI2|VT_ARRAY
+ (
+ #array(
+ expr: ((unsigned short *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x13 ; VT_UI4|VT_ARRAY
+ (
+ #array(
+ expr: ((unsigned int *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x14 ; VT_I8|VT_ARRAY
+ (
+ #array(
+ expr: ((signed __int64 *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x15 ; VT_UI8|VT_ARRAY
+ (
+ #array(
+ expr: ((unsigned __int64 *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x1e ; VT_LPSTR|VT_ARRAY
+ (
+ #array(
+ expr: ((char * *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x1f ; VT_LPWSTR|VT_ARRAY
+ (
+ #array(
+ expr: ((wchar_t **)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0xc ; VT_VARIANT|VT_ARRAY
+ (
+ #array(
+ expr: ((tagVARIANT *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0xb ; VT_BOOL|VT_ARRAY
+ (
+ #array(
+ expr: ((short *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0xa ; VT_ERROR|VT_ARRAY
+ (
+ #array(
+ expr: ((long *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 6 ; VT_CY|VT_ARRAY
+ (
+ #array(
+ expr: ((CY *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 7 ; VT_DATE|VT_ARRAY
+ (
+ #array(
+ expr: ((DATE *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x40 ; VT_FILETIME|VT_ARRAY
+ (
+ #array(
+ expr: ((FILETIME *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x48 ; VT_CLSID|VT_ARRAY
+ (
+ #array(
+ expr: ((CLSID *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x47 ; VT_CF|VT_ARRAY
+ (
+ #array(
+ expr: ((CLIPDATA *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 8 ; VT_BSTR|VT_ARRAY
+ (
+ #array(
+ expr: ((wchar_t * *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x16 ; VT_INT|VT_ARRAY
+ (
+ #array(
+ expr: ((int *)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #case 0x17 ; VT_UINT|VT_ARRAY
+ (
+ #array(
+ expr: ((unsigned int*)$e.pvData)[$i],
+ size: $e.rgsabound[$r].cElements,
+ rank: $e.cDims,
+ base: $e.rgsabound[$r].lLbound
+ )
+ )
+ #default
+ (
+ #([actual members]: [$e,!])
+ )
+ #except
+ (
+ #([actual members]: [$e,!])
+ )
+ )
+ #elif ($e.fFeatures & 0x0100) ; FADF_BSTR
+ (
+ #array(expr: ((wchar_t * *)$e.pvData)[$i], size: $e.rgsabound[$r].cElements, rank: $e.cDims, base: $e.rgsabound[$r].lLbound )
+ )
+ #elif ($e.fFeatures & 0x0200) ; FADF_UNKNOWN
+ (
+ #array(expr: ((IUnknown * *)$e.pvData)[$i], size: $e.rgsabound[$r].cElements, rank: $e.cDims, base: $e.rgsabound[$r].lLbound )
+ )
+ #elif ($e.fFeatures & 0x0400) ; FADF_DISPATCH
+ (
+ #array(expr: ((IDispatch * *)$e.pvData)[$i], size: $e.rgsabound[$r].cElements, rank: $e.cDims, base: $e.rgsabound[$r].lLbound )
+ )
+ #elif ($e.fFeatures & 0x0800) ; FADF_VARIANT
+ (
+ #array(expr: ((tagVARIANT *)$e.pvData)[$i], size: $e.rgsabound[$r].cElements, rank: $e.cDims, base: $e.rgsabound[$r].lLbound )
+ )
+ )
+ )
+}
+tagPROPVARIANT|tagVARIANT|PROPVARIANT|VARIANT{
+ preview(
+ #switch ($e.vt)
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Base Types ;;
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ #case 0 ( #("Empty") ) ; VT_EMPTY
+ #case 1 ( #("NULL") ) ; VT_NULL
+ #case 2 ( #("I2 = ", $e.iVal) ) ; VT_I2
+ #case 3 ( #("I4 = ", $e.lVal) ) ; VT_I4
+ #case 4 ( #("R4 = ", $e.fltVal) ) ; VT_R4
+ #case 5 ( #("R8 = ", $e.dblVal) ) ; VT_R8
+ #case 6 ( #("CY = ", $e.cyVal) ) ; VT_CY
+ #case 7 ( #("DATE = ", $e.date) ) ; VT_DATE
+ #case 8 ( #("BSTR = ", $e.bstrVal) ) ; VT_BSTR
+ #case 9 ( #("DISPATCH = ", $e.pdispVal) ) ; VT_DISPATCH
+ #case 10 ( #("ERROR = ", $e.scode) ) ; VT_ERROR
+ #case 0xB ( #("BOOL = ", $e.boolVal) ) ; VT_BOOL
+ #case 0xC ( #("VARIANT ") ) ; VT_VARIANT
+ #case 0xD ( #("UNKNOWN = ", $e.punkVal) ) ; VT_UNKOWN
+ #case 0xE ( #("DECIMAL = ", $e.decVal) ) ; VT_DECIMAL
+ #case 0x10 ( #("I1 = ", $e.cVal) ) ; VT_I1
+ #case 0x11 ( #("UI1 = ", $e.bVal) ) ; VT_UI1
+ #case 0x12 ( #("UI2 = ", $e.uiVal) ) ; VT_UI2
+ #case 0x13 ( #("UI4 = ", $e.ulVal) ) ; VT_UI4
+ #case 0x14 ( #("I8 = ", *(__int64*)&$e.dblVal) ) ; VT_I8
+ #case 0x15 ( #("UI8 = ", *(unsigned __int64*)&$e.dblVal) ) ; VT_UI8
+ #case 0x16 ( #("INT = ", $e.intVal) ) ; VT_INT
+ #case 0x17 ( #("UINT = ", $e.uintVal) ) ; VT_UINT
+ #case 0x18 ( #("VOID ") ) ; VT_VOID
+ #case 0x19 ( #("HRESULT ") ) ; VT_HRESULT
+ #case 0x1A ( #("PTR ") ) ; VT_PTR
+ #case 0x1B ( #("SAFEARRAY ") ) ; VT_SAFEARRAY
+ #case 0x1C ( #("CARRAY ") ) ; VT_CARRAY
+ #case 0x1D ( #("USERDEFINED ") ) ; VT_USERDEFINED
+ #case 0x1E ( #("LPSTR = ", $e.pszVal) ) ; VT_LPSTR
+ #case 0x1F ( #("LPWSTR = ", $e.pwszVal) ) ; VT_LPWSTR
+ #case 0x24 ( #("RECORD ") ) ; VT_RECORD
+ #case 0x26 ( #("UINT_PTR ") ) ; VT_UINT_PTR
+ #case 0x40 ( #("FILETIME = ", $e.filetime) ) ; VT_FILETIME
+ #case 0x42 ( #("STREAM = ", $e.pStream) ) ; VT_STREAM
+ #case 0x43 ( #("STORAGE = ", $e.pStorage) ) ; VT_STORAGE
+ #case 0x44 ( #("STREAMED_OBJECT = ", $e.pStream) ) ; VT_STREAMED_OBJECT
+ #case 0x45 ( #("STORED_OBJECT = ", $e.pStorage) ) ; VT_STORED_OBJECT
+ #case 0x46 ( #("BLOB_OBJECT = ", $e.blob ) ) ; VT_BLOB_OBJECT
+ #case 0x47 ( #("CF = ", $e.pclipdata) ) ; VT_CF
+ #case 0x48 ( #("CLSID = ", $e.puuid) ) ; VT_CLSID
+ #case 0x49 ( #("VERSIONED_STREAM = ", $e.pVersionedStream) ) ; VT_VERSIONED_STREAM
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Vector types ;;
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ #case 0x1002 ( #("vector of I2 = ", $e.cai) ) ; VT_I2|VT_VECTOR
+ #case 0x1003 ( #("vector of I4 = ", $e.cal) ) ; VT_I4|VT_VECTOR
+ #case 0x1004 ( #("vector of R4 = ", $e.caflt) ) ; VT_R4|VT_VECTOR
+ #case 0x1005 ( #("vector of R8 = ", $e.cadbl) ) ; VT_R8|VT_VECTOR
+ #case 0x1010 ( #("vector of I1 = ", $e.cac) ) ; VT_I1|VT_VECTOR
+ #case 0x1011 ( #("vector of UI1 = ", $e.caub) ) ; VT_UI1|VT_VECTOR
+ #case 0x1012 ( #("vector of UI2 = ", $e.caui) ) ; VT_UI2|VT_VECTOR
+ #case 0x1013 ( #("vector of UI4 = ", $e.caul) ) ; VT_UI4|VT_VECTOR
+ #case 0x1014 ( #("vector of I8 = ", $e.cah) ) ; VT_I8|VT_VECTOR
+ #case 0x1015 ( #("vector of UI8 = ", $e.cauh) ) ; VT_UI8|VT_VECTOR
+ #case 0x101E ( #("vector of LPSTR = ", $e.calpstr) ) ; VT_LPSTR|VT_VECTOR
+ #case 0x101F ( #("vector of LPWSTR = ", $e.calpwstr) ) ; VT_LPWSTR|VT_VECTOR
+ #case 0x100C ( #("vector of VARIANT ", $e.capropvar) ) ; VT_VARIANT|VT_VECTOR
+ #case 0x100B ( #("vector of BOOL = ", $e.cabool) ) ; VT_BOOL|VT_VECTOR
+ #case 0x100A ( #("vector of ERROR = ", $e.cascode) ) ; VT_ERROR|VT_VECTOR
+ #case 0x1006 ( #("vector of CY = ", $e.cacy) ) ; VT_CY|VT_VECTOR
+ #case 0x1007 ( #("vector of DATE = ", $e.cadate) ) ; VT_DATE|VT_VECTOR
+ #case 0x1040 ( #("vector of FILETIME = ", $e.cafiletime) ) ; VT_FILETIME|VT_VECTOR
+ #case 0x1048 ( #("vector of CLSID = ", $e.cauuid) ) ; VT_CLSID|VT_VECTOR
+ #case 0x1047 ( #("vector of CF = ", $e.caclipdata) ) ; VT_CF|VT_VECTOR
+ #case 0x1008 ( #("vector of BSTR = ", $e.cabstr) ) ; VT_BSTR|VT_VECTOR
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Byref Types ;;
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ #case 0x4016 ( #("byref of INT = ", $e.pintVal) ) ; VT_INT|VT_BYREF
+ #case 0x4017 ( #("byref of UINT = ", $e.puintVal) ) ; VT_UINT|VT_BYREF
+ #case 0x4002 ( #("byref of I2 = ", $e.piVal) ) ; VT_I2|VT_BYREF
+ #case 0x4003 ( #("byref of I4 = ", $e.plVal) ) ; VT_I4|VT_BYREF
+ #case 0x4004 ( #("byref of R4 = ", $e.pfltVal) ) ; VT_R4|VT_BYREF
+ #case 0x4005 ( #("byref of R8 = ", $e.pdblVal) ) ; VT_R8|VT_BYREF
+ #case 0x4010 ( #("byref of I1 = ", $e.pcVal) ) ; VT_I1|VT_BYREF
+ #case 0x4011 ( #("byref of UI1 = ", $e.pbVal) ) ; VT_UI1|VT_BYREF
+ #case 0x4012 ( #("byref of UI2 = ", $e.puiVal) ) ; VT_UI2|VT_BYREF
+ #case 0x4013 ( #("byref of UI4 = ", $e.pulVal) ) ; VT_UI4|VT_BYREF
+ #case 0x4014 ( #("byref of I8 = ", (__int64*)$e.pdblVal) ) ; VT_I8|VT_BYREF
+ #case 0x4015 ( #("byref of UI8 = ", (unsigned __int64*)$e.pudblVal) ) ; VT_UI8|VT_BYREF
+ #case 0x400C ( #("byref of VARIANT ", $e.pvarVal) ) ; VT_VARIANT|VT_BYREF
+ #case 0x400B ( #("byref of BOOL = ", $e.pboolVal) ) ; VT_BOOL|VT_BYREF
+ #case 0x400A ( #("byref of ERROR = ", $e.pscode) ) ; VT_ERROR|VT_BYREF
+ #case 0x4006 ( #("byref of CY = ", $e.pcyVal) ) ; VT_CY|VT_BYREF
+ #case 0x4007 ( #("byref of DATE = ", $e.pdate) ) ; VT_DATE|VT_BYREF
+ #case 0x4008 ( #("byref of BSTR = ", $e.pbstrVal) ) ; VT_BSTR|VT_BYREF
+ #case 0x400E ( #("byref of DECIMAL = ", $e.pdecVal) ) ; VT_DECIMAL|VT_BYREF
+ #case 0x400D ( #("byref of UNKNOWN = ", $e.ppunkVal) ) ; VT_UNKOWN|VT_BYREF
+ #case 0x4009 ( #("byref of DISPATCH = ", $e.ppdispVal) ) ; VT_DISPATCH|VT_BYREF
+ #case 0x6000 ( #("byref of ARRAY = ", $e.pparray) ) ; VT_ARRAY|VT_BYREF
+ #default
+ (
+ #if ($e.vt & 0x2000) ( $e.parray)
+ #else ( #("Unknown vt type = ", $e.vt))
+ )
+ )
+ children(
+ #(
+ vt: $e.vt,
+ #switch ($e.vt)
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Base Types ;;
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ #case 0x2 ( #(I2 : $e.iVal) ) ; VT_I2
+ #case 0x3 ( #(I4 : $e.lVal) ) ; VT_I4
+ #case 0x4 ( #(R4 : $e.fltVal) ) ; VT_R4
+ #case 0x5 ( #(R8 : $e.dblVal) ) ; VT_R8
+ #case 0x6 ( #(CY : $e.cyVal) ) ; VT_CY
+ #case 0x7 ( #(DATE : $e.date) ) ; VT_DATE
+ #case 0x8 ( #(BSTR : $e.bstrVal) ) ; VT_BSTR
+ #case 0x9 ( #(DISPATCH : $e.pdispVal) ) ; VT_DISPATCH
+ #case 0xA ( #(ERROR : $e.scode) ) ; VT_ERROR
+ #case 0xB ( #(BOOL : $e.boolVal) ) ; VT_BOOL
+ #case 0xD ( #(UNKNOWN : $e.punkVal) ) ; VT_UNKOWN
+ #case 0xE ( #(DECIMAL : $e.decVal) ) ; VT_DECIMAL
+ #case 0x10 ( #(I1 : $e.cVal) ) ; VT_I1
+ #case 0x11 ( #(UI1 : $e.bVal) ) ; VT_UI1
+ #case 0x12 ( #(UI2 : $e.uiVal) ) ; VT_UI2
+ #case 0x13 ( #(UI4 : $e.ulVal) ) ; VT_UI4
+ #case 0x14 ( #(I8 : *(__int64*)&$e.dblVal) ) ; VT_I8
+ #case 0x15 ( #(UI8 : *(unsigned __int64*)&$e.dblVal) ) ; VT_UI8
+ #case 0x16 ( #(INT : $e.intVal) ) ; VT_INT
+ #case 0x17 ( #(UINT : $e.uintVal) ) ; VT_UINT
+ #case 0x1E ( #(LPSTR : $e.pszVal) ) ; VT_LPSTR
+ #case 0x1F ( #(LPWSTR : $e.pwszVal) ) ; VT_LPWSTR
+ #case 0x40 ( #(FILETIME : $e.filetime) ) ; VT_FILETIME
+ #case 0x42 ( #(STREAM : $e.pStream) ) ; VT_STREAM
+ #case 0x43 ( #(STORAGE : $e.pStorage) ) ; VT_STORAGE
+ #case 0x44 ( #(STREAMED_OBJECT : $e.pStream) ) ; VT_STREAMED_OBJECT
+ #case 0x45 ( #(STORED_OBJECT : $e.pStorage) ) ; VT_STORED_OBJECT
+ #case 0x46 ( #(BLOB_OBJECT : $e.blob ) ) ; VT_BLOB_OBJECT
+ #case 0x47 ( #(CF : $e.pclipdata) ) ; VT_CF
+ #case 0x48 ( #(CLSID : $e.puuid) ) ; VT_CLSID
+ #case 0x49 ( #(VERSIONED_STREAM : $e.pVersionedStream) ) ; VT_VERSIONED_STREAM
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Vector types ;;
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ #case 0x1002 ( #(vector of I2 : $e.cai) ) ; VT_I2|VT_VECTOR
+ #case 0x1003 ( #(vector of I4 : $e.cal) ) ; VT_I4|VT_VECTOR
+ #case 0x1004 ( #(vector of R4 : $e.caflt) ) ; VT_R4|VT_VECTOR
+ #case 0x1005 ( #(vector of R8 : $e.cadbl) ) ; VT_R8|VT_VECTOR
+ #case 0x1010 ( #(vector of I1 : $e.cac) ) ; VT_I1|VT_VECTOR
+ #case 0x1011 ( #(vector of UI1 : $e.caub) ) ; VT_UI1|VT_VECTOR
+ #case 0x1012 ( #(vector of UI2 : $e.caui) ) ; VT_UI2|VT_VECTOR
+ #case 0x1013 ( #(vector of UI4 : $e.caul) ) ; VT_UI4|VT_VECTOR
+ #case 0x1014 ( #(vector of I8 : $e.cah) ) ; VT_I8|VT_VECTOR
+ #case 0x1015 ( #(vector of UI8 : $e.cauh) ) ; VT_UI8|VT_VECTOR
+ #case 0x101E ( #(vector of LPSTR : $e.calpstr) ) ; VT_LPSTR|VT_VECTOR
+ #case 0x101F ( #(vector of LPWSTR : $e.calpwstr) ) ; VT_LPWSTR|VT_VECTOR
+ #case 0x100C ( #(vector of VARIANT : $e.capropvar) ) ; VT_VARIANT|VT_VECTOR
+ #case 0x100B ( #(vector of BOOL : $e.cabool) ) ; VT_BOOL|VT_VECTOR
+ #case 0x100A ( #(vector of ERROR : $e.cascode) ) ; VT_ERROR|VT_VECTOR
+ #case 0x1006 ( #(vector of CY : $e.cacy) ) ; VT_CY|VT_VECTOR
+ #case 0x1007 ( #(vector of DATE : $e.cadate) ) ; VT_DATE|VT_VECTOR
+ #case 0x1040 ( #(vector of FILETIME : $e.cafiletime) ) ; VT_FILETIME|VT_VECTOR
+ #case 0x1048 ( #(vector of CLSID : $e.cauuid) ) ; VT_CLSID|VT_VECTOR
+ #case 0x1047 ( #(vector of CF : $e.caclipdata) ) ; VT_CF|VT_VECTOR
+ #case 0x1008 ( #(vector of BSTR : $e.cabstr) ) ; VT_BSTR|VT_VECTOR
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Byref Types ;;
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ #case 0x4016 ( #(byref of INT : $e.pintVal) ) ; VT_INT|VT_BYREF
+ #case 0x4017 ( #(byref of UINT : $e.puintVal) ) ; VT_UINT|VT_BYREF
+ #case 0x4002 ( #(byref of I2 : $e.piVal) ) ; VT_I2|VT_BYREF
+ #case 0x4003 ( #(byref of I4 : $e.plVal) ) ; VT_I4|VT_BYREF
+ #case 0x4004 ( #(byref of R4 : $e.pfltVal) ) ; VT_R4|VT_BYREF
+ #case 0x4005 ( #(byref of R8 : $e.pdblVal) ) ; VT_R8|VT_BYREF
+ #case 0x4010 ( #(byref of I1 : $e.pcVal) ) ; VT_I1|VT_BYREF
+ #case 0x4011 ( #(byref of UI1 : $e.pbVal) ) ; VT_UI1|VT_BYREF
+ #case 0x4012 ( #(byref of UI2 : $e.puiVal) ) ; VT_UI2|VT_BYREF
+ #case 0x4013 ( #(byref of UI4 : $e.pulVal) ) ; VT_UI4|VT_BYREF
+ #case 0x4014 ( #(byref of I8 : (__int64*)$e.pdblVal) ) ; VT_I8|VT_BYREF
+ #case 0x4015 ( #(byref of UI8 : (unsigned __int64*)$e.pdblVal) ) ; VT_UI8|VT_BYREF
+ #case 0x400C ( #(byref of VARIANT : $e.pvarVal) ) ; VT_VARIANT|VT_BYREF
+ #case 0x400B ( #(byref of BOOL : $e.pboolVal) ) ; VT_BOOL|VT_BYREF
+ #case 0x400A ( #(byref of ERROR : $e.pscode) ) ; VT_ERROR|VT_BYREF
+ #case 0x4006 ( #(byref of CY : $e.pcyVal) ) ; VT_CY|VT_BYREF
+ #case 0x4007 ( #(byref of DATE : $e.pdate) ) ; VT_DATE|VT_BYREF
+ #case 0x4008 ( #(byref of BSTR : $e.pbstrVal) ) ; VT_BSTR|VT_BYREF
+ #case 0x400E ( #(byref of DECIMAL : $e.pdecVal) ) ; VT_DECIMAL|VT_BYREF
+ #case 0x400D ( #(byref of UNKNOWN : $e.ppunkVal) ) ; VT_UNKOWN|VT_BYREF
+ #case 0x4009 ( #(byref of DISPATCH : $e.ppdispVal) ) ; VT_DISPATCH|VT_BYREF
+ #case 0x6000 ( #(byref of ARRAY : $e.pparray) ) ; VT_ARRAY|VT_BYREF
+
+ ; the following are either empty or invalid vt values for a variant
+ ; #case 0 ( #(Empty :) ) ; VT_EMPTY
+ ; #case 0x1 ( #(NULL :) ) ; VT_NULL
+ ; #case 0xC ( #(VARIANT :) ) ; VT_VARIANT
+ ; #case 0x18 ( #(VOID :) ) ; VT_VOID
+ ; #case 0x19 ( #(HRESULT :) ) ; VT_HRESULT
+ ; #case 0x1A ( #(PTR :) ) ; VT_PTR
+ ; #case 0x1B ( #(SAFEARRAY :) ) ; VT_SAFEARRAY
+ ; #case 0x1C ( #(CARRAY :) ) ; VT_CARRAY
+ ; #case 0x1D ( #(USERDEFINED :) ) ; VT_USERDEFINED
+ ; #case 0x24 ( #(RECORD :) ) ; VT_RECORD
+ ; #case 0x26 ( #(UINT_PTR :) ) ; VT_UINT_PTR
+ #default
+ (
+ #if ($e.vt & 0x2000 )
+ ( #(safearray: $e.parray))
+ #else
+ (
+ #(
+ [raw members]: [$e,!] ; unformatted data members
+ )
+ )
+ )
+ #except
+ (
+ #(
+ [raw members]: [$e,!] ; unformatted data members
+ )
+ )
+ )
+ )
+}
+
+; Visualizers for data structures in namespace Concurrency
+;------------------------------------------------------------------------------
+; Concurrency::message from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::message<*>{
+ preview (
+ #(
+ $e.payload
+ )
+ )
+
+ children (
+ #(
+ #(payload: $e.payload),
+ #([msg_id]: $e._M_id)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::multi_link_registry from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::multi_link_registry<*>{
+ preview (
+ #(
+ "[",
+ $e._M_vector._M_index,
+ "](",
+ #array(
+ expr: *($e._M_vector._M_array[$i]),
+ size: $e._M_vector._M_index
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([size]: $e._M_vector._M_index),
+ #array(
+ expr: *($e._M_vector._M_array[$i]),
+ size: $e._M_vector._M_index
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::details::_Queue from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::details::_Queue<*>{
+ preview (
+ #(
+ "[",
+ $e._M_count,
+ "](",
+ #list(
+ head: $e._M_pHead,
+ next: _M_pNext,
+ size: _M_count
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([size]: $e._M_count),
+ #list(
+ head: $e._M_pHead,
+ next: _M_pNext,
+ size: _M_count
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::unbounded_buffer from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::unbounded_buffer<*>{
+ preview (
+ #(
+ $e._M_messageBuffer
+ )
+ )
+
+ children (
+ #(
+ #(unprocessed_messages: $e._M_messageProcessor._M_queuedMessages._M_queue),
+ #(messages: $e._M_messageBuffer),
+ #(message_filter: *($e._M_pFilter)),
+ #(linked_sources: $e._M_connectedSources._M_links),
+ #(linked_targets: $e._M_connectedTargets),
+ #(reserving_target: *($e._M_pReservedFor)),
+ #(Scheduler: *($e._M_messageProcessor._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_messageProcessor._M_pScheduleGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::overwrite_buffer from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::overwrite_buffer<*>{
+ preview (
+ #(
+ $e._M_pMessage
+ )
+ )
+
+ children (
+ #(
+ #(value: *($e._M_pMessage)),
+ #(unprocessed_messages: $e._M_messageProcessor._M_queuedMessages._M_queue),
+ #(message_filter: *($e._M_pFilter)),
+ #(linked_sources: $e._M_connectedSources._M_links),
+ #(linked_targets: $e._M_connectedTargets),
+ #(reserving_target: *($e._M_pReservedFor)),
+ #(reserved_message: *($e._M_pReservedMessage)),
+ #(Scheduler: *($e._M_messageProcessor._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_messageProcessor._M_pScheduleGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::single_assignment from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::single_assignment<*>{
+ preview (
+ #(
+ $e._M_pMessage
+ )
+ )
+
+ children (
+ #(
+ #(value: *($e._M_pMessage)),
+ #(unprocessed_messages: $e._M_messageProcessor._M_queuedMessages._M_queue),
+ #(message_filter: *($e._M_pFilter)),
+ #(linked_sources: $e._M_connectedSources._M_links),
+ #(linked_targets: $e._M_connectedTargets),
+ #(reserving_target: *($e._M_pReservedFor)),
+ #(Scheduler: *($e._M_messageProcessor._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_messageProcessor._M_pScheduleGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::call from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::call<*>{
+ preview (
+ #(
+ $e._M_pFunc
+ )
+ )
+
+ children (
+ #(
+ #(call_method: $e._M_pFunc),
+ #(unprocessed_messages: $e._M_messageProcessor._M_queuedMessages._M_queue),
+ #(message_filter: *($e._M_pFilter)),
+ #(linked_sources: $e._M_connectedSources._M_links),
+ #(Scheduler: *($e._M_messageProcessor._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_messageProcessor._M_pScheduleGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::transformer from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::transformer<*>{
+ preview (
+ #(
+ $e._M_pFunc
+ )
+ )
+
+ children (
+ #(
+ #(transform_method: $e._M_pFunc),
+ #(unprocessed_messages: $e._M_messageProcessor._M_queuedMessages._M_queue),
+ #(messages: $e._M_messageBuffer),
+ #(message_filter: *($e._M_pFilter)),
+ #(linked_sources: $e._M_connectedSources._M_links),
+ #(linked_target: *($e._M_connectedTargets._M_connectedLink)),
+ #(reserving_target: *($e._M_pReservedFor)),
+ #(Scheduler: *($e._M_messageProcessor._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_messageProcessor._M_pScheduleGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::choice from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::choice<*>{
+ preview (
+ #(
+ "[",
+ #if ($e._M_pSingleAssignment->_M_fIsInitialized) ("initialized")
+ #else ("not_initialized"),
+ "] ",
+ $e._M_sourceTuple
+ )
+ )
+
+ children (
+ #(
+ #([input_count]: $e._M_pSingleAssignment->_M_connectedSources._M_links._M_vector._M_index),
+ #(index: $e._M_pSingleAssignment->_M_pMessage->payload),
+ #(source_tuple: $e._M_sourceTuple),
+ #(linked_sources: $e._M_pSingleAssignment->_M_connectedSources._M_links),
+ #(linked_targets: $e._M_pSingleAssignment->_M_connectedTargets),
+ #(reserving_target: *($e._M_pSingleAssignment->_M_pReservedFor)),
+ #(Scheduler: *($e._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_pScheduleGroup)),
+ #([raw _M_pSourceChoices] : $e._M_pSourceChoices)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::join<*,*>::_MessageArray from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::join<*,*>::_MessageArray{
+ preview (
+ #(
+ "[",
+ $e._M_count,
+ "](",
+ #array(
+ expr: *(((Concurrency::message<$T1>**)$e._M_messages)[$i]),
+ size: $e._M_count
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([size]: $e._M_count),
+ #array(
+ expr: *(((Concurrency::message<$T1>**)$e._M_messages)[$i]),
+ size: $e._M_count
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::join<*,*>::_SavedMessageIdArray from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::join<*,*>::_SavedMessageIdArray{
+ preview (
+ #(
+ "[",
+ $e._M_count,
+ "](",
+ #array(
+ expr: ((int*)$e._M_savedIds)[$i],
+ size: $e._M_count
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([size]: $e._M_count),
+ #array(
+ expr: ((int*)$e._M_savedIds)[$i],
+ size: $e._M_count
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::join from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::join<*,*>{
+ preview (
+ #(
+ "[",
+ $e._M_messageArray._M_count - $e._M_messagesRemaining,
+ "/",
+ $e._M_messageArray._M_count,
+ "](",
+ #array(
+ expr: *($e._M_connectedSources._M_links._M_vector._M_array[$i]),
+ size: $e._M_connectedSources._M_links._M_vector._M_index
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([join_type]: (Concurrency::join_type)$T2),
+ #([offer_count]: $e._M_messageArray._M_count - $e._M_messagesRemaining),
+ #(offer_IDs: $e._M_savedMessageIdArray),
+ #([input_count]: $e._M_messageArray._M_count),
+ #(input_values: $e._M_messageArray),
+ #(messages: $e._M_messageBuffer),
+ #(message_filter: *($e._M_pFilter)),
+ #(linked_sources: $e._M_connectedSources._M_links),
+ #(linked_target: $e._M_connectedTargets._M_connectedLink),
+ #(reserving_target: *($e._M_pReservedFor)),
+ #(Scheduler: *($e._M_messageProcessor._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_messageProcessor._M_pScheduleGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::multitype_join from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::multitype_join<*,*>{
+ preview (
+ #(
+ "[",
+ $e._M_pJoinNode->_M_connectedSources._M_links._M_vector._M_index - $e._M_pJoinNode->_M_counter,
+ "/",
+ $e._M_pJoinNode->_M_connectedSources._M_links._M_vector._M_index,
+ "]",
+ $e._M_sourceTuple
+ )
+ )
+
+ children (
+ #(
+ #([join_type]: (Concurrency::join_type)$T2),
+ #([offer_count]: $e._M_pJoinNode->_M_connectedSources._M_links._M_vector._M_index - $e._M_pJoinNode->_M_counter),
+ #([input_count]: $e._M_pJoinNode->_M_connectedSources._M_links._M_vector._M_index),
+ #(source_tuple: $e._M_sourceTuple),
+ #(messages: $e._M_pJoinNode->_M_messageBuffer),
+ #(linked_sources: $e._M_pJoinNode->_M_connectedSources._M_links),
+ #(linked_target: $e._M_pJoinNode->_M_connectedTargets._M_connectedLink),
+ #(reserving_target: *($e._M_pJoinNode->_M_pReservedFor)),
+ #(Scheduler: *($e._M_pJoinNode->_M_messageProcessor._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_pJoinNode->_M_messageProcessor._M_pScheduleGroup)),
+ #([raw _M_pSourceJoins] : $e._M_pSourceJoins)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::timer from <agents.h>
+;------------------------------------------------------------------------------
+Concurrency::timer<*>{
+ preview (
+ #(
+ $e._M_state
+ )
+ )
+
+ children (
+ #(
+ #(state: $e._M_state),
+ #(value: $e._M_value),
+ #(repeating: $e._M_fRepeating),
+ #(interval_ms: $e._M_ms),
+ #(linked_target: *($e._M_connectedTargets._M_connectedLink)),
+ #(reserving_target: *($e._M_pReservedFor)),
+ #(Scheduler: *($e._M_messageProcessor._M_pScheduler)),
+ #(ScheduleGroup: *($e._M_messageProcessor._M_pScheduleGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::details::SchedulerBase from <SchedulerBase.h>
+; Concurrency::details::ThreadScheduler from <ThreadScheduler.h>
+; Concurrency::details::UMSThreadScheduler from <UMSThreadScheduler.h>
+;------------------------------------------------------------------------------
+Concurrency::details::SchedulerBase|Concurrency::details::ThreadScheduler|Concurrency::details::UMSThreadScheduler{
+ preview (
+ #(
+ "[",
+ $e.m_id,
+ "] ",
+ #if ($e.m_schedulerKind == 0) ("ThreadScheduler")
+ #else ("UmsScheduler"),
+ #if ($e.m_id == $e.s_pDefaultScheduler->m_id) (", default")
+ #else ("")
+ )
+ )
+
+ children (
+ #(
+ #(ID: $e.m_id),
+ #(SchedulerPolicy: $e.m_policy),
+ #(VirtualProcessorCount: $e.m_virtualProcessorCount),
+ #(ReferenceCount: $e.m_refCount),
+ #([isDefaultScheduler]: $e.m_id == $e.s_pDefaultScheduler->m_id)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::details::ScheduleGroupBase from <ScheduleGroupBase.h>
+; Concurrency::details::CacheLocalScheduleGroup from <CacheLocalScheduleGroup.h>
+; Concurrency::details::FairScheduleGroup from <FairScheduleGroup.h>
+;------------------------------------------------------------------------------
+Concurrency::details::ScheduleGroupBase|Concurrency::details::CacheLocalScheduleGroup|Concurrency::details::FairScheduleGroup{
+ preview (
+ #(
+ "[",
+ $e.m_id,
+ "]",
+ #if ($e.m_kind & 4) (" AnonymousScheduleGroup")
+ #else ("")
+ )
+ )
+
+ children (
+ #(
+ #(ID: $e.m_id),
+ #(Scheduler: *($e.m_pScheduler))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::details::ContextBase from <ContextBase.h>
+; Concurrency::details::InternalContextBase from <InternalContextBase.h>
+; Concurrency::details::ThreadInternalContext from <ThreadInternalContext.h>
+; Concurrency::details::UMSThreadInternalContext from <UMSThreadInternalContext.h>
+;------------------------------------------------------------------------------
+Concurrency::details::ContextBase|Concurrency::details::InternalContextBase|Concurrency::details::ThreadInternalContext|Concurrency::details::UMSThreadInternalContext{
+ preview (
+ #(
+ "[",
+ $e.m_threadId,
+ "] ",
+ #if ($e.m_blockedState == 0) ("not_concrt_blocked")
+ #elif ($e.m_blockedState == 1) ("concrt_blocked")
+ #elif ($e.m_blockedState == 2) ("ums_sync_blocked")
+ #elif ($e.m_blockedState == 4) ("ums_async_blocked")
+ #else ("")
+ )
+ )
+
+ children (
+ #(
+ #(ID: $e.m_id),
+ #(ThreadID: $e.m_threadId),
+ #(Scheduler: *($e.m_pScheduler)),
+ #(ScheduleGroup: *($e.m_pGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::details::ExternalContextBase from <ExternalContextBase.h>
+;------------------------------------------------------------------------------
+Concurrency::details::ExternalContextBase{
+ preview (
+ #(
+ "[",
+ $e.m_threadId,
+ "] ",
+ #if ($e.m_contextSwitchingFence == 1) ("concrt_blocked")
+ #else ("not_concrt_blocked")
+ )
+ )
+
+ children (
+ #(
+ #(ID: $e.m_id),
+ #(ThreadID: $e.m_threadId),
+ #(Scheduler: *($e.m_pScheduler)),
+ #(ScheduleGroup: *($e.m_pGroup))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::SchedulerPolicy from <concrt.h>
+;------------------------------------------------------------------------------
+Concurrency::SchedulerPolicy{
+ preview (
+ #(
+ $e._M_pPolicyBag->_M_values._M_specificValues._M_schedulerKind,
+ ", Min=",
+ $e._M_pPolicyBag->_M_values._M_specificValues._M_minConcurrency,
+ ", Max=",
+ $e._M_pPolicyBag->_M_values._M_specificValues._M_maxConcurrency
+ )
+ )
+
+ children (
+ #(
+ #(SchedulerKind: $e._M_pPolicyBag->_M_values._M_specificValues._M_schedulerKind),
+ #(MinConcurrency: $e._M_pPolicyBag->_M_values._M_specificValues._M_minConcurrency),
+ #(MaxConcurrency: $e._M_pPolicyBag->_M_values._M_specificValues._M_maxConcurrency),
+ #(TargetOversubscriptionFactor: $e._M_pPolicyBag->_M_values._M_specificValues._M_targetOversubscriptionFactor),
+ #(LocalContextCacheSize: $e._M_pPolicyBag->_M_values._M_specificValues._M_localContextCacheSize),
+ #(ContextStackSize: $e._M_pPolicyBag->_M_values._M_specificValues._M_contextStackSize),
+ #(ContextPriority: $e._M_pPolicyBag->_M_values._M_specificValues._M_contextPriority),
+ #(SchedulingProtocol: $e._M_pPolicyBag->_M_values._M_specificValues._M_schedulingProtocol),
+ #(DynamicProgressFeedback: $e._M_pPolicyBag->_M_values._M_specificValues._M_dynamicProgressFeedback)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::event from <concrt.h>
+;------------------------------------------------------------------------------
+Concurrency::event{
+ preview (
+ #(
+ #if ($e._M_pWaitChain == 1) ("set")
+ #else ("not_set")
+ )
+ )
+
+ children (
+ #(
+ #([is_set]: ($e._M_pWaitChain == 1)),
+ #([has_waiters]: (($e._M_pWaitChain != 0) && ($e._M_pWaitChain != 1)))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::critical_section from <concrt.h>
+;------------------------------------------------------------------------------
+Concurrency::critical_section{
+ preview (
+ #(
+ #if ($e._M_pHead != 0) ("locked")
+ #else ("not_locked")
+ )
+ )
+
+ children (
+ #(
+ #([is_locked]: ($e._M_pHead != 0)),
+ #(OwningContext: *((Concurrency::Context*)($e._M_activeNode[0])))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::critical_section::scoped_lock from <concrt.h>
+;------------------------------------------------------------------------------
+Concurrency::critical_section::scoped_lock{
+ preview (
+ #(
+ $e._M_critical_section
+ )
+ )
+
+ children (
+ #(
+ CriticalSection: $e._M_critical_section
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::reader_writer_lock from <concrt.h>
+;------------------------------------------------------------------------------
+Concurrency::reader_writer_lock{
+ preview (
+ #(
+ #if (($e._M_lockState < 8) && ($e._M_lockState & 2)) ("held_by_writer")
+ #elif ($e._M_lockState >= 8) (
+ #(
+ "held_by_reader(s) [",
+ ($e._M_lockState / 8),
+ "]"
+ )
+ )
+ #else ("not_held")
+ )
+ )
+
+ children (
+ #(
+ #([is_reader_lock_held]: ($e._M_lockState >= 8)),
+ #([num_reader_lock_holders]: ($e._M_lockState / 8)),
+ #([is_writer_lock_held]: ($e._M_lockState < 8) && ($e._M_lockState & 2)),
+ #(OwningWriterContext: *((Concurrency::Context*)($e._M_activeWriter[0])))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::reader_writer_lock::scoped_lock from <concrt.h>
+; Concurrency::reader_writer_lock::scoped_lock_read from <concrt.h>
+;------------------------------------------------------------------------------
+Concurrency::reader_writer_lock::scoped_lock|Concurrency::reader_writer_lock::scoped_lock_read{
+ preview (
+ #(
+ $e._M_reader_writer_lock
+ )
+ )
+
+ children (
+ #(
+ ReaderWriterLock: $e._M_reader_writer_lock
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::details::_TaskCollectionBase from <concrt.h>
+;------------------------------------------------------------------------------
+Concurrency::details::_TaskCollectionBase{
+ preview (
+ #(
+ #if ((((int)$e._M_pException & ~0x3) != 0) && (((int)$e._M_pException & ~0x3) != 0xC)) ("exception")
+ #else ("no_exception")
+ )
+ )
+
+ children (
+ #(
+ #([has_exception]: (((int)$e._M_pException & ~0x3) != 0) && (((int)$e._M_pException & ~0x3) != 0xC)),
+ #(CreatingContext: *((Concurrency::Context*)$e._M_pOwningContext))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::task_group from <ppl.h>
+; Concurrency::structured_task_group from <ppl.h>
+;------------------------------------------------------------------------------
+Concurrency::task_group|Concurrency::structured_task_group{
+ preview (
+ #(
+ #if ((((int)$e._M_task_collection._M_pException & ~0x3) != 0) && (((int)$e._M_task_collection._M_pException & ~0x3) != 0xC)) ("exception")
+ #else ("no_exception")
+ )
+ )
+
+ children (
+ #(
+ #([has_exception]: (((int)$e._M_task_collection._M_pException & ~0x3) != 0) && (((int)$e._M_task_collection._M_pException & ~0x3) != 0xC)),
+ #(CreatingContext: *((Concurrency::Context*)$e._M_task_collection._M_pOwningContext))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::task_handle from <ppl.h>
+;------------------------------------------------------------------------------
+Concurrency::task_handle<*>{
+ preview (
+ #(
+ $e._M_function
+ )
+ )
+
+ children (
+ #(
+ #(Function: $e._M_function),
+ #(RuntimeOwnsLifetime: $e._M_fRuntimeOwnsLifetime),
+ #(TaskCollection: *($e._M_pTaskCollection))
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::combinable from <ppl.h>
+;------------------------------------------------------------------------------
+Concurrency::combinable<*>{
+ preview(
+ #(
+ "(",
+ #array(
+ expr: *($e._M_buckets[$i]),
+ size: $e._M_size
+ ) : #list(
+ head: $e,
+ next: _M_chain
+ ) : $e._M_value,
+ ")"
+ )
+ )
+ children(
+ #(
+ #array(
+ expr: *($e._M_buckets[$i]),
+ size: $e._M_size
+ ) : #list(
+ head: $e,
+ next: _M_chain
+ ) : $e._M_value,
+ #(InitFunction : $e._M_fnInitialize)
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::concurrent_vector from <concurrent_vector.h>
+;------------------------------------------------------------------------------
+Concurrency::concurrent_vector<*,*>{
+ preview(
+ #(
+ "[",
+ $e._My_early_size._M_value,
+ "](",
+ #array (
+ expr: #(
+ #if (($i >> 1) == 0) ((($T1*)$e._My_segment._M_value[0]._My_array)[$i])
+ #else ((($T1*)$e._My_segment._M_value[__log2($i)]._My_array)[$i - (0x1 << __log2($i))])
+ ),
+ size: $e._My_early_size._M_value
+ ),
+ ")"
+ )
+ )
+ children(
+ #(
+ [size] : $e._My_early_size._M_value,
+ #array (
+ expr: #(
+ #if (($i >> 1) == 0) ((($T1*)$e._My_segment._M_value[0]._My_array)[$i])
+ #else ((($T1*)$e._My_segment._M_value[__log2($i)]._My_array)[$i - (0x1 << __log2($i))])
+ ),
+ size: $e._My_early_size._M_value
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::details::_Vector_iterator from <concurrent_vector.h>
+;------------------------------------------------------------------------------
+Concurrency::details::_Vector_iterator<Concurrency::concurrent_vector<*,*>,*>{
+ preview(
+ #(
+ #if (($e._My_index >> 1) == 0) ((($T1*)$e._My_vector->_My_segment._M_value[0]._My_array)[$e._My_index])
+ #else ((($T1*)$e._My_vector->_My_segment._M_value[__log2($e._My_index)]._My_array)[$e._My_index - (0x1 << __log2($e._My_index))])
+ )
+ )
+ children(
+ #(
+ [ptr]: #if (($e._My_index >> 1) == 0) (&((($T1*)$e._My_vector->_My_segment._M_value[0]._My_array)[$e._My_index]))
+ #else (&((($T1*)$e._My_vector->_My_segment._M_value[__log2($e._My_index)]._My_array)[$e._My_index - (0x1 << __log2($e._My_index))]))
+
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::concurrent_queue from <concurrent_queue.h>
+;------------------------------------------------------------------------------
+Concurrency::concurrent_queue<*,*>{
+ preview
+ (
+ #(
+ "[",
+ $e._My_rep->_Tail_counter._M_value - $e._My_rep->_Head_counter._M_value,
+ "](",
+ #array
+ (
+ expr : #if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 0) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 1) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 2) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 3) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 4) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 5) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 6) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 7) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 8) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 9) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 10) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 11) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 12) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 13) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 14) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 15) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 16) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 17) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 18) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 19) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])))))))))))))))))))),
+ size : #if ($e._My_rep->_Tail_counter._M_value-$e._My_rep->_Head_counter._M_value < 20*8*$e._Items_per_page) ($e._My_rep->_Tail_counter._M_value-$e._My_rep->_Head_counter._M_value)
+ #else (20*8*$e._Items_per_page)
+ ),
+ ")"
+ )
+ )
+ children
+ (
+ #(
+ #([unsafe_size]: $e._My_rep->_Tail_counter._M_value-$e._My_rep->_Head_counter._M_value),
+ #array
+ (
+ expr : #if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 0) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 1) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 2) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 3) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 4) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 5) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 6) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 7) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 8) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 9) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 10) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 11) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 12) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 13) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 14) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 15) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 16) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 17) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 18) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else (#if (($i+($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)))/(8*$e._Items_per_page)-($e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>(8*($e._Items_per_page-1)))*($i%8+$e._My_rep->_Head_counter._M_value%(8*$e._Items_per_page)>=(8*$e._Items_per_page)) == 19) ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])
+ #else ((($T1*)(($e._My_rep->_Array[(($i+$e._My_rep->_Head_counter._M_value)*3%8)]._Head_page._M_value->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next->_Next) + 1))[(($i+$e._My_rep->_Head_counter._M_value)&-8)/8&$e._Items_per_page-1])))))))))))))))))))),
+ size : #if ($e._My_rep->_Tail_counter._M_value-$e._My_rep->_Head_counter._M_value < 20*8*$e._Items_per_page) ($e._My_rep->_Tail_counter._M_value-$e._My_rep->_Head_counter._M_value)
+ #else (20*8*$e._Items_per_page)
+ )
+ )
+ )
+}
+
+;------------------------------------------------------------------------------
+; Concurrency::details::_Concurrent_queue_iterator from <concurrent_queue.h>
+;------------------------------------------------------------------------------
+Concurrency::details::_Concurrent_queue_iterator<Concurrency::concurrent_queue<*,*>,*>{
+ preview(
+ #(
+ *(($T1*)$e._My_item)
+ )
+ )
+ children(
+ #(
+ [ptr]: (($T1*)$e._My_item)
+
+ )
+ )
+}
+
+; This section lets you define your own errors for the HRESULT display.
+; You need to list the error code in unsigned decimal, followed by the message.
+; Changes will take effect the next time you redisplay the variable.
+
+[hresult]
+;1234=my custom error code
+
+[Visualizer]
+
+glm::detail::tvec2<*>{
+ preview (
+ #(#($c.x,$c.y))
+ )
+ children (
+ #([x]: $c.x,[y]: $c.y)
+ )
+}
+
+glm::detail::tvec3<*>{
+ preview (
+ #($e.x,$e.y,$e.z)
+ )
+ children (
+ #([x]: $e.x,[y]: $e.y,[z]: $e.z)
+ )
+}
+
+glm::detail::tvec4<*>{
+ preview (
+ #($c.x,$c.y,$c.z,$c.w)
+ )
+ children (
+ #([x]: $e.x,[y]: $e.y,[z]: $e.z, #([w]: $e.w))
+ )
+}
diff --git a/3rdparty/glm/source/util/glm.natvis b/3rdparty/glm/source/util/glm.natvis
new file mode 100644
index 0000000..4db2418
--- /dev/null
+++ b/3rdparty/glm/source/util/glm.natvis
@@ -0,0 +1,555 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ GLM debugger visualizers for Visual Studio
+
+ Makes debugging code using GLM easier by making data more easily accessible
+ from the debugger watch windows.
+
+ For example, a variable declared like this:
+
+ glm::vec4 v = glm::vec4(1, 2, 3, 4);
+
+ Will show up like this in the default debugger windows:
+
+ Name Value
+ ..............................................................
+ v {x=1.000000 r=1.000000 s=1.000000 y=2.000000 ...}
+
+ But if you use this file, it will show up like this:
+
+ Name Value
+ ..................
+ v [1 2 3 4]
+
+ === How to Use ===
+
+ Copy this file to the project directory of each project using GLM, or just copy it to
+
+ %USERPROFILE%\Douments\Visual Studio 2019\Visualizers\ (replace '2019' when necessary)
+ or
+ %VSINSTALLDIR%\Common7\Packages\Debugger\Visualizers\ (requires admin access)
+
+ if you want to use this from every project.
+ See also https://docs.microsoft.com/en-us/visualstudio/debugger/create-custom-views-of-native-objects
+-->
+
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+
+ <Type Name="glm::vec&lt;1,*,*&gt;">
+ <DisplayString>[{x,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;2,*,*&gt;">
+ <DisplayString>[{x,g} {y,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;3,*,*&gt;">
+ <DisplayString>[{x,g} {y,g} {z,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;4,*,*&gt;">
+ <DisplayString>[{x,g} {y,g} {z,g} {w,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ <Item Name="w">w,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;1,bool,*&gt;" Priority="High">
+ <DisplayString>[{(int)x}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;2,bool,*&gt;" Priority="High">
+ <DisplayString>[{(int)x} {(int)y}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x</Item>
+ <Item Name="y">y</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;3,bool,*&gt;" Priority="High">
+ <DisplayString>[{(int)x,g} {(int)y,g} {(int)z,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x</Item>
+ <Item Name="y">y</Item>
+ <Item Name="z">z</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;4,bool,*&gt;" Priority="High">
+ <DisplayString>[{(int)x,g} {(int)y,g} {(int)z,g} {(int)w,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x</Item>
+ <Item Name="y">y</Item>
+ <Item Name="z">z</Item>
+ <Item Name="w">w</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;2,float,*&gt;" Priority="High">
+ <DisplayString>[{x,g} {y,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <CustomListItems MaxItemsPerView="1">
+ <!-- calculate length using fast inverse sqrt -->
+ <Variable Name="k" InitialValue="x*x+y*y"/>
+ <Variable Name="n" InitialValue="k/2"/>
+ <Variable Name="i" InitialValue="0x5F3759DF - ((*(int *)&amp;k) &gt;&gt; 1)"/>
+ <If Condition="k != 0">
+ <Exec>k = *(float *)&amp;i</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Item Name="[len]">1/k,g</Item>
+ </If>
+ <If Condition="k == 0">
+ <Item Name="[len]">0.0f,g</Item>
+ </If>
+ </CustomListItems>
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;3,float,*&gt;" Priority="High">
+ <DisplayString>[{x,g} {y,g} {z,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <CustomListItems MaxItemsPerView="1">
+ <!-- calculate length using fast inverse sqrt -->
+ <Variable Name="k" InitialValue="x*x+y*y+z*z"/>
+ <Variable Name="n" InitialValue="k/2"/>
+ <Variable Name="i" InitialValue="0x5F3759DF - ((*(int *)&amp;k) &gt;&gt; 1)"/>
+ <If Condition="k != 0">
+ <Exec>k = *(float *)&amp;i</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Item Name="[len]">1/k,g</Item>
+ </If>
+ <If Condition="k == 0">
+ <Item Name="[len]">0.0f,g</Item>
+ </If>
+ </CustomListItems>
+ <Synthetic Name="[rgba]">
+ <DisplayString>
+ <!-- hex RGBA color - alpha is assumed to be 255 -->
+ #{
+ (unsigned((x&lt;0?0:(x&gt;1?1:x))*255.5f) &lt;&lt; 24) |
+ (unsigned((y&lt;0?0:(y&gt;1?1:y))*255.5f) &lt;&lt; 16) |
+ (unsigned((z&lt;0?0:(z&gt;1?1:z))*255.5f) &lt;&lt; 8) | 0xFF,Xb
+ }
+ </DisplayString>
+ </Synthetic>
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;4,float,*&gt;" Priority="High">
+ <DisplayString>[{x,g} {y,g} {z,g} {w,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <CustomListItems MaxItemsPerView="1">
+ <!-- calculate length using fast inverse sqrt -->
+ <Variable Name="k" InitialValue="x*x+y*y+z*z+w*w"/>
+ <Variable Name="n" InitialValue="k/2"/>
+ <Variable Name="i" InitialValue="0x5F3759DF - ((*(int *)&amp;k) &gt;&gt; 1)"/>
+ <If Condition="k != 0">
+ <Exec>k = *(float *)&amp;i</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Item Name="[len]">1/k,g</Item>
+ </If>
+ <If Condition="k == 0">
+ <Item Name="[len]">0.0f,g</Item>
+ </If>
+ </CustomListItems>
+ <Synthetic Name="[rgba]">
+ <DisplayString>
+ <!-- hex RGBA color -->
+ #{
+ (unsigned((x&lt;0?0:(x&gt;1?1:x))*255.5f) &lt;&lt; 24) |
+ (unsigned((y&lt;0?0:(y&gt;1?1:y))*255.5f) &lt;&lt; 16) |
+ (unsigned((z&lt;0?0:(z&gt;1?1:z))*255.5f) &lt;&lt; 8) |
+ (unsigned((w&lt;0?0:(w&gt;1?1:w))*255.5f) &lt;&lt; 0),Xb
+ }
+ </DisplayString>
+ </Synthetic>
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ <Item Name="w">w,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;2,double,*&gt;" Priority="High">
+ <DisplayString>[{x,g} {y,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <CustomListItems MaxItemsPerView="1">
+ <!-- calculate length using fast inverse sqrt -->
+ <Variable Name="k" InitialValue="x*x+y*y"/>
+ <Variable Name="n" InitialValue="k/2"/>
+ <Variable Name="i" InitialValue="0x5FE6EB50C7B537A9 - ((*(long long *)&amp;k) &gt;&gt; 1)"/>
+ <If Condition="k != 0">
+ <Exec>k = *(double *)&amp;i</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Item Name="[len]">1/k,g</Item>
+ </If>
+ <If Condition="k == 0">
+ <Item Name="[len]">0.0,g</Item>
+ </If>
+ </CustomListItems>
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;3,double,*&gt;" Priority="High">
+ <DisplayString>[{x,g} {y,g} {z,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <CustomListItems MaxItemsPerView="1">
+ <!-- calculate length using fast inverse sqrt -->
+ <Variable Name="k" InitialValue="x*x+y*y+z*z"/>
+ <Variable Name="n" InitialValue="k/2"/>
+ <Variable Name="i" InitialValue="0x5FE6EB50C7B537A9 - ((*(long long *)&amp;k) &gt;&gt; 1)"/>
+ <If Condition="k != 0">
+ <Exec>k = *(double *)&amp;i</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Item Name="[len]">1/k,g</Item>
+ </If>
+ <If Condition="k == 0">
+ <Item Name="[len]">0.0,g</Item>
+ </If>
+ </CustomListItems>
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::vec&lt;4,double,*&gt;" Priority="High">
+ <DisplayString>[{x,g} {y,g} {z,g} {w,g}]</DisplayString>
+ <Expand HideRawView="1">
+ <CustomListItems MaxItemsPerView="1">
+ <!-- calculate length using fast inverse sqrt -->
+ <Variable Name="k" InitialValue="x*x+y*y+z*z+w*w"/>
+ <Variable Name="n" InitialValue="k/2"/>
+ <Variable Name="i" InitialValue="0x5FE6EB50C7B537A9 - ((*(long long *)&amp;k) &gt;&gt; 1)"/>
+ <If Condition="k != 0">
+ <Exec>k = *(double *)&amp;i</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Item Name="[len]">1/k,g</Item>
+ </If>
+ <If Condition="k == 0">
+ <Item Name="[len]">0.0,g</Item>
+ </If>
+ </CustomListItems>
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ <Item Name="w">w,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::qua&lt;*,*&gt;">
+ <DisplayString>{w,g} + {x,g}i + {y,g}j + {z,g}k</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ <Item Name="w">w,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::qua&lt;float,*&gt;" Priority="High">
+ <DisplayString>{w,g} + {x,g}i + {y,g}j + {z,g}k</DisplayString>
+ <Expand HideRawView="1">
+ <CustomListItems MaxItemsPerView="1">
+ <!-- calculate length using fast inverse sqrt -->
+ <Variable Name="k" InitialValue="x*x+y*y+z*z+w*w"/>
+ <Variable Name="n" InitialValue="k/2"/>
+ <Variable Name="i" InitialValue="0x5F3759DF - ((*(int *)&amp;k) &gt;&gt; 1)"/>
+ <If Condition="k != 0">
+ <Exec>k = *(float *)&amp;i</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Exec>k = k * (1.5f - (n * k * k))</Exec>
+ <Item Name="[len]">1/k,g</Item>
+ </If>
+ <If Condition="k == 0">
+ <Item Name="[len]">0.0f,g</Item>
+ </If>
+ </CustomListItems>
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ <Item Name="w">w,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::qua&lt;double,*&gt;" Priority="High">
+ <DisplayString>{w,g} + {x,g}i + {y,g}j + {z,g}k</DisplayString>
+ <Expand HideRawView="1">
+ <CustomListItems MaxItemsPerView="1">
+ <!-- calculate length using fast inverse sqrt -->
+ <Variable Name="k" InitialValue="x*x+y*y+z*z+w*w"/>
+ <Variable Name="n" InitialValue="k/2"/>
+ <Variable Name="i" InitialValue="0x5FE6EB50C7B537A9 - ((*(long long *)&amp;k) &gt;&gt; 1)"/>
+ <If Condition="k != 0">
+ <Exec>k = *(double *)&amp;i</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Exec>k = k * (1.5 - (n * k * k))</Exec>
+ <Item Name="[len]">1/k,g</Item>
+ </If>
+ <If Condition="k == 0">
+ <Item Name="[len]">0.0,g</Item>
+ </If>
+ </CustomListItems>
+ <Item Name="x">x,g</Item>
+ <Item Name="y">y,g</Item>
+ <Item Name="z">z,g</Item>
+ <Item Name="w">w,g</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat&lt;2,2,*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat&lt;2,3,*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 3">
+ <DisplayString>[{value[0].z,g} {value[1].z,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat&lt;2,4,*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 3">
+ <DisplayString>[{value[0].z,g} {value[1].z,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 4">
+ <DisplayString>[{value[0].w,g} {value[1].w,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat&lt;3,2*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]} {value[2]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g} {value[2].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g} {value[2].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ <Item Name="col 3">value[2]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat&lt;3,3,*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]} {value[2]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g} {value[2].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g} {value[2].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 3">
+ <DisplayString>[{value[0].z,g} {value[1].z,g} {value[2].z,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ <Item Name="col 3">value[2]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat&lt;3,4,*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]} {value[2]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g} {value[2].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g} {value[2].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 3">
+ <DisplayString>[{value[0].z,g} {value[1].z,g} {value[2].z,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 4">
+ <DisplayString>[{value[0].w,g} {value[1].w,g} {value[2].w,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ <Item Name="col 3">value[2]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat4x2&lt;4,2,*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]} {value[2]} {value[3]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g} {value[2].x,g} {value[3].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g} {value[2].y,g} {value[3].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ <Item Name="col 3">value[2]</Item>
+ <Item Name="col 4">value[3]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat4x3&lt;4,3,*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]} {value[2]} {value[3]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g} {value[2].x,g} {value[3].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g} {value[2].y,g} {value[3].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 3">
+ <DisplayString>[{value[0].z,g} {value[1].z,g} {value[2].z,g} {value[3].z,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ <Item Name="col 3">value[2]</Item>
+ <Item Name="col 4">value[3]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::mat&lt;4,4,*,*&gt;">
+ <DisplayString>[{value[0]} {value[1]} {value[2]} {value[3]}]</DisplayString>
+ <Expand HideRawView="1">
+ <!-- display matrix in row major order - it makes more sense -->
+ <Synthetic Name="row 1">
+ <DisplayString>[{value[0].x,g} {value[1].x,g} {value[2].x,g} {value[3].x,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 2">
+ <DisplayString>[{value[0].y,g} {value[1].y,g} {value[2].y,g} {value[3].y,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 3">
+ <DisplayString>[{value[0].z,g} {value[1].z,g} {value[2].z,g} {value[3].z,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="row 4">
+ <DisplayString>[{value[0].w,g} {value[1].w,g} {value[2].w,g} {value[3].w,g}]</DisplayString>
+ </Synthetic>
+ <Synthetic Name="columns">
+ <Expand>
+ <Item Name="col 1">value[0]</Item>
+ <Item Name="col 2">value[1]</Item>
+ <Item Name="col 3">value[2]</Item>
+ <Item Name="col 4">value[3]</Item>
+ </Expand>
+ </Synthetic>
+ </Expand>
+ </Type>
+
+ <Type Name="glm::tdualquat&lt;*&gt;">
+ <DisplayString>[r: {real}] [d: {dual}]</DisplayString>
+ <Expand HideRawView="1">
+ <Item Name="real">real</Item>
+ <Item Name="dual">dual</Item>
+ </Expand>
+ </Type>
+
+</AutoVisualizer> \ No newline at end of file
diff --git a/3rdparty/glm/source/util/usertype.dat b/3rdparty/glm/source/util/usertype.dat
new file mode 100644
index 0000000..cb44de3
--- /dev/null
+++ b/3rdparty/glm/source/util/usertype.dat
@@ -0,0 +1,407 @@
+attribute
+const
+uniform
+varying
+break
+continue
+do
+for
+while
+if
+else
+in
+out
+inout
+float
+int
+void
+bool
+true
+false
+discard
+return
+mat2
+mat3
+mat4
+mat2x2
+mat3x3
+mat4x4
+mat2x3
+mat3x2
+mat2x4
+mat4x2
+mat3x4
+mat4x3
+vec2
+vec3
+vec4
+ivec2
+ivec3
+ivec4
+uvec2
+uvec3
+uvec4
+bvec2
+bvec3
+bvec4
+sampler1D
+sampler2D
+sampler3D
+samplerCube
+sampler1DShadow
+sampler2DShadow
+struct
+
+asm
+class
+union
+enum
+typedef
+template
+this
+packed
+goto
+switch
+default
+inline
+noinline
+volatile
+public
+static
+extern
+external
+interface
+long
+short
+double
+half
+fixed
+unsigned
+input
+output
+sampler2DRect
+sampler3DRect
+sampler2DRectShadow
+sizeof
+cast
+namespace
+using
+
+layout
+location
+smooth
+flat
+noperspective
+centroid
+invariant
+lowp
+mediump
+highp
+precision
+patch
+sample
+subroutine
+
+hvec2
+hvec3
+hvec4
+fvec2
+fvec3
+fvec4
+dvec2
+dvec3
+dvec4
+
+on
+
+final
+abstract
+limited
+access
+self
+
+uchar
+schar
+uint
+sint
+
+int8
+int16
+int32
+int64
+
+sint8
+sint16
+sint32
+sint64
+
+uint8
+uint16
+uint32
+uint64
+
+float16
+float32
+float64
+
+quat
+hquat
+fquat
+dquat
+
+handle
+handle8
+handle16
+handle32
+handle64
+
+flag
+flag8
+flag16
+flag32
+flag64
+
+import
+export
+
+hmat2
+hmat3
+hmat4
+
+fmat2
+fmat3
+fmat4
+
+dmat2
+dmat3
+dmat4
+
+hmat2x3
+hmat3x2
+hmat2x4
+hmat4x2
+hmat3x4
+hmat4x3
+
+fmat2x3
+fmat3x2
+fmat2x4
+fmat4x2
+fmat3x4
+fmat4x3
+
+dmat2x3
+dmat3x2
+dmat2x4
+dmat4x2
+dmat3x4
+dmat4x3
+
+null
+pi
+epsilon
+infinite
+self
+
+byte
+word
+dword
+qword
+
+new_object
+new_array
+delete_object
+delete_array
+
+int8
+int16
+int32
+int64
+
+i8
+i16
+i32
+i64
+
+i8vec2
+i8vec3
+i8vec4
+
+i16vec2
+i16vec3
+i16vec4
+
+i32vec2
+i32vec3
+i32vec4
+
+i64vec2
+i64vec3
+i64vec4
+
+uint8
+uint16
+uint32
+uint64
+
+u8
+u16
+u32
+u64
+
+u8vec2
+u8vec3
+u8vec4
+
+u16vec2
+u16vec3
+u16vec4
+
+u32vec2
+u32vec3
+u32vec4
+
+u64vec2
+u64vec3
+u64vec4
+
+float16
+float32
+float64
+
+f16
+f32
+f64
+
+f16vec2
+f16vec3
+f16vec4
+
+f32vec2
+f32vec3
+f32vec4
+
+f64vec2
+f64vec3
+f64vec4
+
+f16mat2
+f16mat3
+f16mat4
+
+f16mat2x3
+f16mat2x4
+f16mat3x2
+f16mat3x4
+f16mat4x2
+f16mat4x3
+
+f32mat2
+f32mat3
+f32mat4
+
+f32mat2x3
+f32mat2x4
+f32mat3x2
+f32mat3x4
+f32mat4x2
+f32mat4x3
+
+f64mat2
+f64mat3
+f64mat4
+
+f64mat2x3
+f64mat2x4
+f64mat3x2
+f64mat3x4
+f64mat4x2
+f64mat4x3
+
+f16quat
+f32quat
+f64quat
+
+bool1
+bool2
+bool3
+bool4
+
+bool1x1
+bool2x2
+bool3x3
+bool4x4
+
+bool2x3
+bool2x4
+bool3x2
+bool3x4
+bool4x2
+bool4x3
+
+int1
+int2
+int3
+int4
+
+int1x1
+int2x2
+int3x3
+int4x4
+
+int2x3
+int2x4
+int3x2
+int3x4
+int4x2
+int4x3
+
+half1
+half2
+half3
+half4
+
+half2x2
+half3x3
+half4x4
+
+half2x3
+half2x4
+half3x2
+half3x4
+half4x2
+half4x3
+
+float1
+float2
+float3
+float4
+
+float1x1
+float2x2
+float3x3
+float4x4
+
+float2x3
+float2x4
+float3x2
+float3x4
+float4x2
+float4x3
+
+double1
+double2
+double3
+double4
+
+double1x1
+double2x2
+double3x3
+double4x4
+
+double2x3
+double2x4
+double3x2
+double3x4
+double4x2
+double4x3
diff --git a/3rdparty/imguicolortextedit/CMakeLists.txt b/3rdparty/imguicolortextedit/CMakeLists.txt
index f776171..8d940f7 100644
--- a/3rdparty/imguicolortextedit/CMakeLists.txt
+++ b/3rdparty/imguicolortextedit/CMakeLists.txt
@@ -1,5 +1,6 @@
add_library(ImGuiColorTextEdit
- source/TextEditor.cpp
+ TextEditor.h
+ TextEditor.cpp
)
target_include_directories(ImGuiColorTextEdit PUBLIC
${CMAKE_SOURCE_DIR}/3rdparty/imgui/source
diff --git a/3rdparty/imguicolortextedit/TextEditor.cpp b/3rdparty/imguicolortextedit/TextEditor.cpp
new file mode 100644
index 0000000..02966f0
--- /dev/null
+++ b/3rdparty/imguicolortextedit/TextEditor.cpp
@@ -0,0 +1,3160 @@
+#include <algorithm>
+#include <chrono>
+#include <string>
+#include <regex>
+#include <cmath>
+
+#include "TextEditor.h"
+
+#define IMGUI_DEFINE_MATH_OPERATORS
+#include "imgui.h" // for imGui::GetCurrentWindow()
+
+// TODO
+// - multiline comments vs single-line: latter is blocking start of a ML
+
+template<class InputIt1, class InputIt2, class BinaryPredicate>
+bool equals(InputIt1 first1, InputIt1 last1,
+ InputIt2 first2, InputIt2 last2, BinaryPredicate p)
+{
+ for (; first1 != last1 && first2 != last2; ++first1, ++first2)
+ {
+ if (!p(*first1, *first2))
+ return false;
+ }
+ return first1 == last1 && first2 == last2;
+}
+
+TextEditor::TextEditor()
+ : mLineSpacing(1.0f)
+ , mUndoIndex(0)
+ , mTabSize(4)
+ , mOverwrite(false)
+ , mReadOnly(false)
+ , mWithinRender(false)
+ , mScrollToCursor(false)
+ , mScrollToTop(false)
+ , mTextChanged(false)
+ , mColorizerEnabled(true)
+ , mTextStart(20.0f)
+ , mLeftMargin(10)
+ , mCursorPositionChanged(false)
+ , mColorRangeMin(0)
+ , mColorRangeMax(0)
+ , mSelectionMode(SelectionMode::Normal)
+ , mCheckComments(true)
+ , mLastClick(-1.0f)
+ , mHandleKeyboardInputs(true)
+ , mHandleMouseInputs(true)
+ , mIgnoreImGuiChild(false)
+ , mShowWhitespaces(true)
+ , mStartTime(std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count())
+{
+ SetPalette(GetDarkPalette());
+ SetLanguageDefinition(LanguageDefinition::HLSL());
+ mLines.push_back(Line());
+}
+
+TextEditor::~TextEditor()
+{
+}
+
+void TextEditor::SetLanguageDefinition(const LanguageDefinition & aLanguageDef)
+{
+ mLanguageDefinition = aLanguageDef;
+ mRegexList.clear();
+
+ for (auto& r : mLanguageDefinition.mTokenRegexStrings)
+ mRegexList.push_back(std::make_pair(std::regex(r.first, std::regex_constants::optimize), r.second));
+
+ Colorize();
+}
+
+void TextEditor::SetPalette(const Palette & aValue)
+{
+ mPaletteBase = aValue;
+}
+
+std::string TextEditor::GetText(const Coordinates & aStart, const Coordinates & aEnd) const
+{
+ std::string result;
+
+ auto lstart = aStart.mLine;
+ auto lend = aEnd.mLine;
+ auto istart = GetCharacterIndex(aStart);
+ auto iend = GetCharacterIndex(aEnd);
+ size_t s = 0;
+
+ for (size_t i = lstart; i < lend; i++)
+ s += mLines[i].size();
+
+ result.reserve(s + s / 8);
+
+ while (istart < iend || lstart < lend)
+ {
+ if (lstart >= (int)mLines.size())
+ break;
+
+ auto& line = mLines[lstart];
+ if (istart < (int)line.size())
+ {
+ result += line[istart].mChar;
+ istart++;
+ }
+ else
+ {
+ istart = 0;
+ ++lstart;
+ result += '\n';
+ }
+ }
+
+ return result;
+}
+
+TextEditor::Coordinates TextEditor::GetActualCursorCoordinates() const
+{
+ return SanitizeCoordinates(mState.mCursorPosition);
+}
+
+TextEditor::Coordinates TextEditor::SanitizeCoordinates(const Coordinates & aValue) const
+{
+ auto line = aValue.mLine;
+ auto column = aValue.mColumn;
+ if (line >= (int)mLines.size())
+ {
+ if (mLines.empty())
+ {
+ line = 0;
+ column = 0;
+ }
+ else
+ {
+ line = (int)mLines.size() - 1;
+ column = GetLineMaxColumn(line);
+ }
+ return Coordinates(line, column);
+ }
+ else
+ {
+ column = mLines.empty() ? 0 : std::min(column, GetLineMaxColumn(line));
+ return Coordinates(line, column);
+ }
+}
+
+// https://en.wikipedia.org/wiki/UTF-8
+// We assume that the char is a standalone character (<128) or a leading byte of an UTF-8 code sequence (non-10xxxxxx code)
+static int UTF8CharLength(TextEditor::Char c)
+{
+ if ((c & 0xFE) == 0xFC)
+ return 6;
+ if ((c & 0xFC) == 0xF8)
+ return 5;
+ if ((c & 0xF8) == 0xF0)
+ return 4;
+ else if ((c & 0xF0) == 0xE0)
+ return 3;
+ else if ((c & 0xE0) == 0xC0)
+ return 2;
+ return 1;
+}
+
+// "Borrowed" from ImGui source
+static inline int ImTextCharToUtf8(char* buf, int buf_size, unsigned int c)
+{
+ if (c < 0x80)
+ {
+ buf[0] = (char)c;
+ return 1;
+ }
+ if (c < 0x800)
+ {
+ if (buf_size < 2) return 0;
+ buf[0] = (char)(0xc0 + (c >> 6));
+ buf[1] = (char)(0x80 + (c & 0x3f));
+ return 2;
+ }
+ if (c >= 0xdc00 && c < 0xe000)
+ {
+ return 0;
+ }
+ if (c >= 0xd800 && c < 0xdc00)
+ {
+ if (buf_size < 4) return 0;
+ buf[0] = (char)(0xf0 + (c >> 18));
+ buf[1] = (char)(0x80 + ((c >> 12) & 0x3f));
+ buf[2] = (char)(0x80 + ((c >> 6) & 0x3f));
+ buf[3] = (char)(0x80 + ((c) & 0x3f));
+ return 4;
+ }
+ //else if (c < 0x10000)
+ {
+ if (buf_size < 3) return 0;
+ buf[0] = (char)(0xe0 + (c >> 12));
+ buf[1] = (char)(0x80 + ((c >> 6) & 0x3f));
+ buf[2] = (char)(0x80 + ((c) & 0x3f));
+ return 3;
+ }
+}
+
+void TextEditor::Advance(Coordinates & aCoordinates) const
+{
+ if (aCoordinates.mLine < (int)mLines.size())
+ {
+ auto& line = mLines[aCoordinates.mLine];
+ auto cindex = GetCharacterIndex(aCoordinates);
+
+ if (cindex + 1 < (int)line.size())
+ {
+ auto delta = UTF8CharLength(line[cindex].mChar);
+ cindex = std::min(cindex + delta, (int)line.size() - 1);
+ }
+ else
+ {
+ ++aCoordinates.mLine;
+ cindex = 0;
+ }
+ aCoordinates.mColumn = GetCharacterColumn(aCoordinates.mLine, cindex);
+ }
+}
+
+void TextEditor::DeleteRange(const Coordinates & aStart, const Coordinates & aEnd)
+{
+ assert(aEnd >= aStart);
+ assert(!mReadOnly);
+
+ //printf("D(%d.%d)-(%d.%d)\n", aStart.mLine, aStart.mColumn, aEnd.mLine, aEnd.mColumn);
+
+ if (aEnd == aStart)
+ return;
+
+ auto start = GetCharacterIndex(aStart);
+ auto end = GetCharacterIndex(aEnd);
+
+ if (aStart.mLine == aEnd.mLine)
+ {
+ auto& line = mLines[aStart.mLine];
+ auto n = GetLineMaxColumn(aStart.mLine);
+ if (aEnd.mColumn >= n)
+ line.erase(line.begin() + start, line.end());
+ else
+ line.erase(line.begin() + start, line.begin() + end);
+ }
+ else
+ {
+ auto& firstLine = mLines[aStart.mLine];
+ auto& lastLine = mLines[aEnd.mLine];
+
+ firstLine.erase(firstLine.begin() + start, firstLine.end());
+ lastLine.erase(lastLine.begin(), lastLine.begin() + end);
+
+ if (aStart.mLine < aEnd.mLine)
+ firstLine.insert(firstLine.end(), lastLine.begin(), lastLine.end());
+
+ if (aStart.mLine < aEnd.mLine)
+ RemoveLine(aStart.mLine + 1, aEnd.mLine + 1);
+ }
+
+ mTextChanged = true;
+}
+
+int TextEditor::InsertTextAt(Coordinates& /* inout */ aWhere, const char * aValue)
+{
+ assert(!mReadOnly);
+
+ int cindex = GetCharacterIndex(aWhere);
+ int totalLines = 0;
+ while (*aValue != '\0')
+ {
+ assert(!mLines.empty());
+
+ if (*aValue == '\r')
+ {
+ // skip
+ ++aValue;
+ }
+ else if (*aValue == '\n')
+ {
+ if (cindex < (int)mLines[aWhere.mLine].size())
+ {
+ auto& newLine = InsertLine(aWhere.mLine + 1);
+ auto& line = mLines[aWhere.mLine];
+ newLine.insert(newLine.begin(), line.begin() + cindex, line.end());
+ line.erase(line.begin() + cindex, line.end());
+ }
+ else
+ {
+ InsertLine(aWhere.mLine + 1);
+ }
+ ++aWhere.mLine;
+ aWhere.mColumn = 0;
+ cindex = 0;
+ ++totalLines;
+ ++aValue;
+ }
+ else
+ {
+ auto& line = mLines[aWhere.mLine];
+ auto d = UTF8CharLength(*aValue);
+ while (d-- > 0 && *aValue != '\0')
+ line.insert(line.begin() + cindex++, Glyph(*aValue++, PaletteIndex::Default));
+ ++aWhere.mColumn;
+ }
+
+ mTextChanged = true;
+ }
+
+ return totalLines;
+}
+
+void TextEditor::AddUndo(UndoRecord& aValue)
+{
+ assert(!mReadOnly);
+ //printf("AddUndo: (@%d.%d) +\'%s' [%d.%d .. %d.%d], -\'%s', [%d.%d .. %d.%d] (@%d.%d)\n",
+ // aValue.mBefore.mCursorPosition.mLine, aValue.mBefore.mCursorPosition.mColumn,
+ // aValue.mAdded.c_str(), aValue.mAddedStart.mLine, aValue.mAddedStart.mColumn, aValue.mAddedEnd.mLine, aValue.mAddedEnd.mColumn,
+ // aValue.mRemoved.c_str(), aValue.mRemovedStart.mLine, aValue.mRemovedStart.mColumn, aValue.mRemovedEnd.mLine, aValue.mRemovedEnd.mColumn,
+ // aValue.mAfter.mCursorPosition.mLine, aValue.mAfter.mCursorPosition.mColumn
+ // );
+
+ mUndoBuffer.resize((size_t)(mUndoIndex + 1));
+ mUndoBuffer.back() = aValue;
+ ++mUndoIndex;
+}
+
+TextEditor::Coordinates TextEditor::ScreenPosToCoordinates(const ImVec2& aPosition) const
+{
+ ImVec2 origin = ImGui::GetCursorScreenPos();
+ ImVec2 local(aPosition.x - origin.x, aPosition.y - origin.y);
+
+ int lineNo = std::max(0, (int)floor(local.y / mCharAdvance.y));
+
+ int columnCoord = 0;
+
+ if (lineNo >= 0 && lineNo < (int)mLines.size())
+ {
+ auto& line = mLines.at(lineNo);
+
+ int columnIndex = 0;
+ float columnX = 0.0f;
+
+ while ((size_t)columnIndex < line.size())
+ {
+ float columnWidth = 0.0f;
+
+ if (line[columnIndex].mChar == '\t')
+ {
+ float spaceSize = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, " ").x;
+ float oldX = columnX;
+ float newColumnX = (1.0f + std::floor((1.0f + columnX) / (float(mTabSize) * spaceSize))) * (float(mTabSize) * spaceSize);
+ columnWidth = newColumnX - oldX;
+ if (mTextStart + columnX + columnWidth * 0.5f > local.x)
+ break;
+ columnX = newColumnX;
+ columnCoord = (columnCoord / mTabSize) * mTabSize + mTabSize;
+ columnIndex++;
+ }
+ else
+ {
+ char buf[7];
+ auto d = UTF8CharLength(line[columnIndex].mChar);
+ int i = 0;
+ while (i < 6 && d-- > 0)
+ buf[i++] = line[columnIndex++].mChar;
+ buf[i] = '\0';
+ columnWidth = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, buf).x;
+ if (mTextStart + columnX + columnWidth * 0.5f > local.x)
+ break;
+ columnX += columnWidth;
+ columnCoord++;
+ }
+ }
+ }
+
+ return SanitizeCoordinates(Coordinates(lineNo, columnCoord));
+}
+
+TextEditor::Coordinates TextEditor::FindWordStart(const Coordinates & aFrom) const
+{
+ Coordinates at = aFrom;
+ if (at.mLine >= (int)mLines.size())
+ return at;
+
+ auto& line = mLines[at.mLine];
+ auto cindex = GetCharacterIndex(at);
+
+ if (cindex >= (int)line.size())
+ return at;
+
+ while (cindex > 0 && isspace(line[cindex].mChar))
+ --cindex;
+
+ auto cstart = (PaletteIndex)line[cindex].mColorIndex;
+ while (cindex > 0)
+ {
+ auto c = line[cindex].mChar;
+ if ((c & 0xC0) != 0x80) // not UTF code sequence 10xxxxxx
+ {
+ if (c <= 32 && isspace(c))
+ {
+ cindex++;
+ break;
+ }
+ if (cstart != (PaletteIndex)line[size_t(cindex - 1)].mColorIndex)
+ break;
+ }
+ --cindex;
+ }
+ return Coordinates(at.mLine, GetCharacterColumn(at.mLine, cindex));
+}
+
+TextEditor::Coordinates TextEditor::FindWordEnd(const Coordinates & aFrom) const
+{
+ Coordinates at = aFrom;
+ if (at.mLine >= (int)mLines.size())
+ return at;
+
+ auto& line = mLines[at.mLine];
+ auto cindex = GetCharacterIndex(at);
+
+ if (cindex >= (int)line.size())
+ return at;
+
+ bool prevspace = (bool)isspace(line[cindex].mChar);
+ auto cstart = (PaletteIndex)line[cindex].mColorIndex;
+ while (cindex < (int)line.size())
+ {
+ auto c = line[cindex].mChar;
+ auto d = UTF8CharLength(c);
+ if (cstart != (PaletteIndex)line[cindex].mColorIndex)
+ break;
+
+ if (prevspace != !!isspace(c))
+ {
+ if (isspace(c))
+ while (cindex < (int)line.size() && isspace(line[cindex].mChar))
+ ++cindex;
+ break;
+ }
+ cindex += d;
+ }
+ return Coordinates(aFrom.mLine, GetCharacterColumn(aFrom.mLine, cindex));
+}
+
+TextEditor::Coordinates TextEditor::FindNextWord(const Coordinates & aFrom) const
+{
+ Coordinates at = aFrom;
+ if (at.mLine >= (int)mLines.size())
+ return at;
+
+ // skip to the next non-word character
+ auto cindex = GetCharacterIndex(aFrom);
+ bool isword = false;
+ bool skip = false;
+ if (cindex < (int)mLines[at.mLine].size())
+ {
+ auto& line = mLines[at.mLine];
+ isword = isalnum(line[cindex].mChar);
+ skip = isword;
+ }
+
+ while (!isword || skip)
+ {
+ if (at.mLine >= mLines.size())
+ {
+ auto l = std::max(0, (int) mLines.size() - 1);
+ return Coordinates(l, GetLineMaxColumn(l));
+ }
+
+ auto& line = mLines[at.mLine];
+ if (cindex < (int)line.size())
+ {
+ isword = isalnum(line[cindex].mChar);
+
+ if (isword && !skip)
+ return Coordinates(at.mLine, GetCharacterColumn(at.mLine, cindex));
+
+ if (!isword)
+ skip = false;
+
+ cindex++;
+ }
+ else
+ {
+ cindex = 0;
+ ++at.mLine;
+ skip = false;
+ isword = false;
+ }
+ }
+
+ return at;
+}
+
+int TextEditor::GetCharacterIndex(const Coordinates& aCoordinates) const
+{
+ if (aCoordinates.mLine >= mLines.size())
+ return -1;
+ auto& line = mLines[aCoordinates.mLine];
+ int c = 0;
+ int i = 0;
+ for (; i < line.size() && c < aCoordinates.mColumn;)
+ {
+ if (line[i].mChar == '\t')
+ c = (c / mTabSize) * mTabSize + mTabSize;
+ else
+ ++c;
+ i += UTF8CharLength(line[i].mChar);
+ }
+ return i;
+}
+
+int TextEditor::GetCharacterColumn(int aLine, int aIndex) const
+{
+ if (aLine >= mLines.size())
+ return 0;
+ auto& line = mLines[aLine];
+ int col = 0;
+ int i = 0;
+ while (i < aIndex && i < (int)line.size())
+ {
+ auto c = line[i].mChar;
+ i += UTF8CharLength(c);
+ if (c == '\t')
+ col = (col / mTabSize) * mTabSize + mTabSize;
+ else
+ col++;
+ }
+ return col;
+}
+
+int TextEditor::GetLineCharacterCount(int aLine) const
+{
+ if (aLine >= mLines.size())
+ return 0;
+ auto& line = mLines[aLine];
+ int c = 0;
+ for (unsigned i = 0; i < line.size(); c++)
+ i += UTF8CharLength(line[i].mChar);
+ return c;
+}
+
+int TextEditor::GetLineMaxColumn(int aLine) const
+{
+ if (aLine >= mLines.size())
+ return 0;
+ auto& line = mLines[aLine];
+ int col = 0;
+ for (unsigned i = 0; i < line.size(); )
+ {
+ auto c = line[i].mChar;
+ if (c == '\t')
+ col = (col / mTabSize) * mTabSize + mTabSize;
+ else
+ col++;
+ i += UTF8CharLength(c);
+ }
+ return col;
+}
+
+bool TextEditor::IsOnWordBoundary(const Coordinates & aAt) const
+{
+ if (aAt.mLine >= (int)mLines.size() || aAt.mColumn == 0)
+ return true;
+
+ auto& line = mLines[aAt.mLine];
+ auto cindex = GetCharacterIndex(aAt);
+ if (cindex >= (int)line.size())
+ return true;
+
+ if (mColorizerEnabled)
+ return line[cindex].mColorIndex != line[size_t(cindex - 1)].mColorIndex;
+
+ return isspace(line[cindex].mChar) != isspace(line[cindex - 1].mChar);
+}
+
+void TextEditor::RemoveLine(int aStart, int aEnd)
+{
+ assert(!mReadOnly);
+ assert(aEnd >= aStart);
+ assert(mLines.size() > (size_t)(aEnd - aStart));
+
+ ErrorMarkers etmp;
+ for (auto& i : mErrorMarkers)
+ {
+ ErrorMarkers::value_type e(i.first >= aStart ? i.first - 1 : i.first, i.second);
+ if (e.first >= aStart && e.first <= aEnd)
+ continue;
+ etmp.insert(e);
+ }
+ mErrorMarkers = std::move(etmp);
+
+ Breakpoints btmp;
+ for (auto i : mBreakpoints)
+ {
+ if (i >= aStart && i <= aEnd)
+ continue;
+ btmp.insert(i >= aStart ? i - 1 : i);
+ }
+ mBreakpoints = std::move(btmp);
+
+ mLines.erase(mLines.begin() + aStart, mLines.begin() + aEnd);
+ assert(!mLines.empty());
+
+ mTextChanged = true;
+}
+
+void TextEditor::RemoveLine(int aIndex)
+{
+ assert(!mReadOnly);
+ assert(mLines.size() > 1);
+
+ ErrorMarkers etmp;
+ for (auto& i : mErrorMarkers)
+ {
+ ErrorMarkers::value_type e(i.first > aIndex ? i.first - 1 : i.first, i.second);
+ if (e.first - 1 == aIndex)
+ continue;
+ etmp.insert(e);
+ }
+ mErrorMarkers = std::move(etmp);
+
+ Breakpoints btmp;
+ for (auto i : mBreakpoints)
+ {
+ if (i == aIndex)
+ continue;
+ btmp.insert(i >= aIndex ? i - 1 : i);
+ }
+ mBreakpoints = std::move(btmp);
+
+ mLines.erase(mLines.begin() + aIndex);
+ assert(!mLines.empty());
+
+ mTextChanged = true;
+}
+
+TextEditor::Line& TextEditor::InsertLine(int aIndex)
+{
+ assert(!mReadOnly);
+
+ auto& result = *mLines.insert(mLines.begin() + aIndex, Line());
+
+ ErrorMarkers etmp;
+ for (auto& i : mErrorMarkers)
+ etmp.insert(ErrorMarkers::value_type(i.first >= aIndex ? i.first + 1 : i.first, i.second));
+ mErrorMarkers = std::move(etmp);
+
+ Breakpoints btmp;
+ for (auto i : mBreakpoints)
+ btmp.insert(i >= aIndex ? i + 1 : i);
+ mBreakpoints = std::move(btmp);
+
+ return result;
+}
+
+std::string TextEditor::GetWordUnderCursor() const
+{
+ auto c = GetCursorPosition();
+ return GetWordAt(c);
+}
+
+std::string TextEditor::GetWordAt(const Coordinates & aCoords) const
+{
+ auto start = FindWordStart(aCoords);
+ auto end = FindWordEnd(aCoords);
+
+ std::string r;
+
+ auto istart = GetCharacterIndex(start);
+ auto iend = GetCharacterIndex(end);
+
+ for (auto it = istart; it < iend; ++it)
+ r.push_back(mLines[aCoords.mLine][it].mChar);
+
+ return r;
+}
+
+ImU32 TextEditor::GetGlyphColor(const Glyph & aGlyph) const
+{
+ if (!mColorizerEnabled)
+ return mPalette[(int)PaletteIndex::Default];
+ if (aGlyph.mComment)
+ return mPalette[(int)PaletteIndex::Comment];
+ if (aGlyph.mMultiLineComment)
+ return mPalette[(int)PaletteIndex::MultiLineComment];
+ auto const color = mPalette[(int)aGlyph.mColorIndex];
+ if (aGlyph.mPreprocessor)
+ {
+ const auto ppcolor = mPalette[(int)PaletteIndex::Preprocessor];
+ const int c0 = ((ppcolor & 0xff) + (color & 0xff)) / 2;
+ const int c1 = (((ppcolor >> 8) & 0xff) + ((color >> 8) & 0xff)) / 2;
+ const int c2 = (((ppcolor >> 16) & 0xff) + ((color >> 16) & 0xff)) / 2;
+ const int c3 = (((ppcolor >> 24) & 0xff) + ((color >> 24) & 0xff)) / 2;
+ return ImU32(c0 | (c1 << 8) | (c2 << 16) | (c3 << 24));
+ }
+ return color;
+}
+
+void TextEditor::HandleKeyboardInputs()
+{
+ ImGuiIO& io = ImGui::GetIO();
+ auto shift = io.KeyShift;
+ auto ctrl = io.ConfigMacOSXBehaviors ? io.KeySuper : io.KeyCtrl;
+ auto alt = io.ConfigMacOSXBehaviors ? io.KeyCtrl : io.KeyAlt;
+
+ if (ImGui::IsWindowFocused())
+ {
+ if (ImGui::IsWindowHovered())
+ ImGui::SetMouseCursor(ImGuiMouseCursor_TextInput);
+ //ImGui::CaptureKeyboardFromApp(true);
+
+ io.WantCaptureKeyboard = true;
+ io.WantTextInput = true;
+
+ if (!IsReadOnly() && ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Z)))
+ Undo();
+ else if (!IsReadOnly() && !ctrl && !shift && alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Backspace)))
+ Undo();
+ else if (!IsReadOnly() && ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Y)))
+ Redo();
+ else if (!ctrl && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_UpArrow)))
+ MoveUp(1, shift);
+ else if (!ctrl && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_DownArrow)))
+ MoveDown(1, shift);
+ else if (!alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_LeftArrow)))
+ MoveLeft(1, shift, ctrl);
+ else if (!alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_RightArrow)))
+ MoveRight(1, shift, ctrl);
+ else if (!alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_PageUp)))
+ MoveUp(GetPageSize() - 4, shift);
+ else if (!alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_PageDown)))
+ MoveDown(GetPageSize() - 4, shift);
+ else if (!alt && ctrl && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Home)))
+ MoveTop(shift);
+ else if (ctrl && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_End)))
+ MoveBottom(shift);
+ else if (!ctrl && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Home)))
+ MoveHome(shift);
+ else if (!ctrl && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_End)))
+ MoveEnd(shift);
+ else if (!IsReadOnly() && !ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Delete)))
+ Delete();
+ else if (!IsReadOnly() && !ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Backspace)))
+ Backspace();
+ else if (!ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Insert)))
+ mOverwrite ^= true;
+ else if (ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Insert)))
+ Copy();
+ else if (ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_C)))
+ Copy();
+ else if (!IsReadOnly() && !ctrl && shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Insert)))
+ Paste();
+ else if (!IsReadOnly() && ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_V)))
+ Paste();
+ else if (ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_X)))
+ Cut();
+ else if (!ctrl && shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Delete)))
+ Cut();
+ else if (ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_A)))
+ SelectAll();
+ else if (!IsReadOnly() && !ctrl && !shift && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Enter)))
+ EnterCharacter('\n', false);
+ else if (!IsReadOnly() && !ctrl && !alt && ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Tab)))
+ EnterCharacter('\t', shift);
+
+ if (!IsReadOnly() && !io.InputQueueCharacters.empty())
+ {
+ for (int i = 0; i < io.InputQueueCharacters.Size; i++)
+ {
+ auto c = io.InputQueueCharacters[i];
+ if (c != 0 && (c == '\n' || c >= 32))
+ EnterCharacter(c, shift);
+ }
+ io.InputQueueCharacters.resize(0);
+ }
+ }
+}
+
+void TextEditor::HandleMouseInputs()
+{
+ ImGuiIO& io = ImGui::GetIO();
+ auto shift = io.KeyShift;
+ auto ctrl = io.ConfigMacOSXBehaviors ? io.KeySuper : io.KeyCtrl;
+ auto alt = io.ConfigMacOSXBehaviors ? io.KeyCtrl : io.KeyAlt;
+
+ if (ImGui::IsWindowHovered())
+ {
+ if (!shift && !alt)
+ {
+ auto click = ImGui::IsMouseClicked(0);
+ auto doubleClick = ImGui::IsMouseDoubleClicked(0);
+ auto t = ImGui::GetTime();
+ auto tripleClick = click && !doubleClick && (mLastClick != -1.0f && (t - mLastClick) < io.MouseDoubleClickTime);
+
+ /*
+ Left mouse button triple click
+ */
+
+ if (tripleClick)
+ {
+ if (!ctrl)
+ {
+ mState.mCursorPosition = mInteractiveStart = mInteractiveEnd = ScreenPosToCoordinates(ImGui::GetMousePos());
+ mSelectionMode = SelectionMode::Line;
+ SetSelection(mInteractiveStart, mInteractiveEnd, mSelectionMode);
+ }
+
+ mLastClick = -1.0f;
+ }
+
+ /*
+ Left mouse button double click
+ */
+
+ else if (doubleClick)
+ {
+ if (!ctrl)
+ {
+ mState.mCursorPosition = mInteractiveStart = mInteractiveEnd = ScreenPosToCoordinates(ImGui::GetMousePos());
+ if (mSelectionMode == SelectionMode::Line)
+ mSelectionMode = SelectionMode::Normal;
+ else
+ mSelectionMode = SelectionMode::Word;
+ SetSelection(mInteractiveStart, mInteractiveEnd, mSelectionMode);
+ }
+
+ mLastClick = (float)ImGui::GetTime();
+ }
+
+ /*
+ Left mouse button click
+ */
+ else if (click)
+ {
+ mState.mCursorPosition = mInteractiveStart = mInteractiveEnd = ScreenPosToCoordinates(ImGui::GetMousePos());
+ if (ctrl)
+ mSelectionMode = SelectionMode::Word;
+ else
+ mSelectionMode = SelectionMode::Normal;
+ SetSelection(mInteractiveStart, mInteractiveEnd, mSelectionMode);
+
+ mLastClick = (float)ImGui::GetTime();
+ }
+ // Mouse left button dragging (=> update selection)
+ else if (ImGui::IsMouseDragging(0) && ImGui::IsMouseDown(0))
+ {
+ io.WantCaptureMouse = true;
+ mState.mCursorPosition = mInteractiveEnd = ScreenPosToCoordinates(ImGui::GetMousePos());
+ SetSelection(mInteractiveStart, mInteractiveEnd, mSelectionMode);
+ }
+ }
+ }
+}
+
+void TextEditor::Render()
+{
+ /* Compute mCharAdvance regarding to scaled font size (Ctrl + mouse wheel)*/
+ const float fontSize = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, "#", nullptr, nullptr).x;
+ mCharAdvance = ImVec2(fontSize, ImGui::GetTextLineHeightWithSpacing() * mLineSpacing);
+
+ /* Update palette with the current alpha from style */
+ for (int i = 0; i < (int)PaletteIndex::Max; ++i)
+ {
+ auto color = ImGui::ColorConvertU32ToFloat4(mPaletteBase[i]);
+ color.w *= ImGui::GetStyle().Alpha;
+ mPalette[i] = ImGui::ColorConvertFloat4ToU32(color);
+ }
+
+ assert(mLineBuffer.empty());
+
+ auto contentSize = ImGui::GetWindowContentRegionMax();
+ auto drawList = ImGui::GetWindowDrawList();
+ float longest(mTextStart);
+
+ if (mScrollToTop)
+ {
+ mScrollToTop = false;
+ ImGui::SetScrollY(0.f);
+ }
+
+ ImVec2 cursorScreenPos = ImGui::GetCursorScreenPos();
+ auto scrollX = ImGui::GetScrollX();
+ auto scrollY = ImGui::GetScrollY();
+
+ auto lineNo = (int)floor(scrollY / mCharAdvance.y);
+ auto globalLineMax = (int)mLines.size();
+ auto lineMax = std::max(0, std::min((int)mLines.size() - 1, lineNo + (int)floor((scrollY + contentSize.y) / mCharAdvance.y)));
+
+ // Deduce mTextStart by evaluating mLines size (global lineMax) plus two spaces as text width
+ char buf[16];
+ snprintf(buf, 16, " %d ", globalLineMax);
+ mTextStart = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, buf, nullptr, nullptr).x + mLeftMargin;
+
+ if (!mLines.empty())
+ {
+ float spaceSize = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, " ", nullptr, nullptr).x;
+
+ while (lineNo <= lineMax)
+ {
+ ImVec2 lineStartScreenPos = ImVec2(cursorScreenPos.x, cursorScreenPos.y + lineNo * mCharAdvance.y);
+ ImVec2 textScreenPos = ImVec2(lineStartScreenPos.x + mTextStart, lineStartScreenPos.y);
+
+ auto& line = mLines[lineNo];
+ longest = std::max(mTextStart + TextDistanceToLineStart(Coordinates(lineNo, GetLineMaxColumn(lineNo))), longest);
+ auto columnNo = 0;
+ Coordinates lineStartCoord(lineNo, 0);
+ Coordinates lineEndCoord(lineNo, GetLineMaxColumn(lineNo));
+
+ // Draw selection for the current line
+ float sstart = -1.0f;
+ float ssend = -1.0f;
+
+ assert(mState.mSelectionStart <= mState.mSelectionEnd);
+ if (mState.mSelectionStart <= lineEndCoord)
+ sstart = mState.mSelectionStart > lineStartCoord ? TextDistanceToLineStart(mState.mSelectionStart) : 0.0f;
+ if (mState.mSelectionEnd > lineStartCoord)
+ ssend = TextDistanceToLineStart(mState.mSelectionEnd < lineEndCoord ? mState.mSelectionEnd : lineEndCoord);
+
+ if (mState.mSelectionEnd.mLine > lineNo)
+ ssend += mCharAdvance.x;
+
+ if (sstart != -1 && ssend != -1 && sstart < ssend)
+ {
+ ImVec2 vstart(lineStartScreenPos.x + mTextStart + sstart, lineStartScreenPos.y);
+ ImVec2 vend(lineStartScreenPos.x + mTextStart + ssend, lineStartScreenPos.y + mCharAdvance.y);
+ drawList->AddRectFilled(vstart, vend, mPalette[(int)PaletteIndex::Selection]);
+ }
+
+ // Draw breakpoints
+ auto start = ImVec2(lineStartScreenPos.x + scrollX, lineStartScreenPos.y);
+
+ if (mBreakpoints.count(lineNo + 1) != 0)
+ {
+ auto end = ImVec2(lineStartScreenPos.x + contentSize.x + 2.0f * scrollX, lineStartScreenPos.y + mCharAdvance.y);
+ drawList->AddRectFilled(start, end, mPalette[(int)PaletteIndex::Breakpoint]);
+ }
+
+ // Draw error markers
+ auto errorIt = mErrorMarkers.find(lineNo + 1);
+ if (errorIt != mErrorMarkers.end())
+ {
+ auto end = ImVec2(lineStartScreenPos.x + contentSize.x + 2.0f * scrollX, lineStartScreenPos.y + mCharAdvance.y);
+ drawList->AddRectFilled(start, end, mPalette[(int)PaletteIndex::ErrorMarker]);
+
+ if (ImGui::IsMouseHoveringRect(lineStartScreenPos, end))
+ {
+ ImGui::BeginTooltip();
+ ImGui::PushStyleColor(ImGuiCol_Text, ImVec4(1.0f, 0.2f, 0.2f, 1.0f));
+ ImGui::Text("Error at line %d:", errorIt->first);
+ ImGui::PopStyleColor();
+ ImGui::Separator();
+ ImGui::PushStyleColor(ImGuiCol_Text, ImVec4(1.0f, 1.0f, 0.2f, 1.0f));
+ ImGui::Text("%s", errorIt->second.c_str());
+ ImGui::PopStyleColor();
+ ImGui::EndTooltip();
+ }
+ }
+
+ // Draw line number (right aligned)
+ snprintf(buf, 16, "%d ", lineNo + 1);
+
+ auto lineNoWidth = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, buf, nullptr, nullptr).x;
+ drawList->AddText(ImVec2(lineStartScreenPos.x + mTextStart - lineNoWidth, lineStartScreenPos.y), mPalette[(int)PaletteIndex::LineNumber], buf);
+
+ if (mState.mCursorPosition.mLine == lineNo)
+ {
+ auto focused = ImGui::IsWindowFocused();
+
+ // Highlight the current line (where the cursor is)
+ if (!HasSelection())
+ {
+ auto end = ImVec2(start.x + contentSize.x + scrollX, start.y + mCharAdvance.y);
+ drawList->AddRectFilled(start, end, mPalette[(int)(focused ? PaletteIndex::CurrentLineFill : PaletteIndex::CurrentLineFillInactive)]);
+ drawList->AddRect(start, end, mPalette[(int)PaletteIndex::CurrentLineEdge], 1.0f);
+ }
+
+ // Render the cursor
+ if (focused)
+ {
+ auto timeEnd = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
+ auto elapsed = timeEnd - mStartTime;
+ if (elapsed > 400)
+ {
+ float width = 1.0f;
+ auto cindex = GetCharacterIndex(mState.mCursorPosition);
+ float cx = TextDistanceToLineStart(mState.mCursorPosition);
+
+ if (mOverwrite && cindex < (int)line.size())
+ {
+ auto c = line[cindex].mChar;
+ if (c == '\t')
+ {
+ auto x = (1.0f + std::floor((1.0f + cx) / (float(mTabSize) * spaceSize))) * (float(mTabSize) * spaceSize);
+ width = x - cx;
+ }
+ else
+ {
+ char buf2[2];
+ buf2[0] = line[cindex].mChar;
+ buf2[1] = '\0';
+ width = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, buf2).x;
+ }
+ }
+ ImVec2 cstart(textScreenPos.x + cx, lineStartScreenPos.y);
+ ImVec2 cend(textScreenPos.x + cx + width, lineStartScreenPos.y + mCharAdvance.y);
+ drawList->AddRectFilled(cstart, cend, mPalette[(int)PaletteIndex::Cursor]);
+ if (elapsed > 800)
+ mStartTime = timeEnd;
+ }
+ }
+ }
+
+ // Render colorized text
+ auto prevColor = line.empty() ? mPalette[(int)PaletteIndex::Default] : GetGlyphColor(line[0]);
+ ImVec2 bufferOffset;
+
+ for (int i = 0; i < line.size();)
+ {
+ auto& glyph = line[i];
+ auto color = GetGlyphColor(glyph);
+
+ if ((color != prevColor || glyph.mChar == '\t' || glyph.mChar == ' ') && !mLineBuffer.empty())
+ {
+ const ImVec2 newOffset(textScreenPos.x + bufferOffset.x, textScreenPos.y + bufferOffset.y);
+ drawList->AddText(newOffset, prevColor, mLineBuffer.c_str());
+ auto textSize = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, mLineBuffer.c_str(), nullptr, nullptr);
+ bufferOffset.x += textSize.x;
+ mLineBuffer.clear();
+ }
+ prevColor = color;
+
+ if (glyph.mChar == '\t')
+ {
+ auto oldX = bufferOffset.x;
+ bufferOffset.x = (1.0f + std::floor((1.0f + bufferOffset.x) / (float(mTabSize) * spaceSize))) * (float(mTabSize) * spaceSize);
+ ++i;
+
+ if (mShowWhitespaces)
+ {
+ const auto s = ImGui::GetFontSize();
+ const auto x1 = textScreenPos.x + oldX + 1.0f;
+ const auto x2 = textScreenPos.x + bufferOffset.x - 1.0f;
+ const auto y = textScreenPos.y + bufferOffset.y + s * 0.5f;
+ const ImVec2 p1(x1, y);
+ const ImVec2 p2(x2, y);
+ const ImVec2 p3(x2 - s * 0.2f, y - s * 0.2f);
+ const ImVec2 p4(x2 - s * 0.2f, y + s * 0.2f);
+ drawList->AddLine(p1, p2, 0x90909090);
+ drawList->AddLine(p2, p3, 0x90909090);
+ drawList->AddLine(p2, p4, 0x90909090);
+ }
+ }
+ else if (glyph.mChar == ' ')
+ {
+ if (mShowWhitespaces)
+ {
+ const auto s = ImGui::GetFontSize();
+ const auto x = textScreenPos.x + bufferOffset.x + spaceSize * 0.5f;
+ const auto y = textScreenPos.y + bufferOffset.y + s * 0.5f;
+ drawList->AddCircleFilled(ImVec2(x, y), 1.5f, 0x80808080, 4);
+ }
+ bufferOffset.x += spaceSize;
+ i++;
+ }
+ else
+ {
+ auto l = UTF8CharLength(glyph.mChar);
+ while (l-- > 0)
+ mLineBuffer.push_back(line[i++].mChar);
+ }
+ ++columnNo;
+ }
+
+ if (!mLineBuffer.empty())
+ {
+ const ImVec2 newOffset(textScreenPos.x + bufferOffset.x, textScreenPos.y + bufferOffset.y);
+ drawList->AddText(newOffset, prevColor, mLineBuffer.c_str());
+ mLineBuffer.clear();
+ }
+
+ ++lineNo;
+ }
+
+ // Draw a tooltip on known identifiers/preprocessor symbols
+ if (ImGui::IsMousePosValid())
+ {
+ auto id = GetWordAt(ScreenPosToCoordinates(ImGui::GetMousePos()));
+ if (!id.empty())
+ {
+ auto it = mLanguageDefinition.mIdentifiers.find(id);
+ if (it != mLanguageDefinition.mIdentifiers.end())
+ {
+ ImGui::BeginTooltip();
+ ImGui::TextUnformatted(it->second.mDeclaration.c_str());
+ ImGui::EndTooltip();
+ }
+ else
+ {
+ auto pi = mLanguageDefinition.mPreprocIdentifiers.find(id);
+ if (pi != mLanguageDefinition.mPreprocIdentifiers.end())
+ {
+ ImGui::BeginTooltip();
+ ImGui::TextUnformatted(pi->second.mDeclaration.c_str());
+ ImGui::EndTooltip();
+ }
+ }
+ }
+ }
+ }
+
+
+ ImGui::Dummy(ImVec2((longest + 2), mLines.size() * mCharAdvance.y));
+
+ if (mScrollToCursor)
+ {
+ EnsureCursorVisible();
+ ImGui::SetWindowFocus();
+ mScrollToCursor = false;
+ }
+}
+
+void TextEditor::Render(const char* aTitle, const ImVec2& aSize, bool aBorder)
+{
+ mWithinRender = true;
+ mTextChanged = false;
+ mCursorPositionChanged = false;
+
+ ImGui::PushStyleColor(ImGuiCol_ChildBg, ImGui::ColorConvertU32ToFloat4(mPalette[(int)PaletteIndex::Background]));
+ ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(0.0f, 0.0f));
+ if (!mIgnoreImGuiChild)
+ ImGui::BeginChild(aTitle, aSize, aBorder, ImGuiWindowFlags_HorizontalScrollbar | ImGuiWindowFlags_AlwaysHorizontalScrollbar | ImGuiWindowFlags_NoMove);
+
+ if (mHandleKeyboardInputs)
+ {
+ HandleKeyboardInputs();
+ ImGui::PushAllowKeyboardFocus(true);
+ }
+
+ if (mHandleMouseInputs)
+ HandleMouseInputs();
+
+ ColorizeInternal();
+ Render();
+
+ if (mHandleKeyboardInputs)
+ ImGui::PopAllowKeyboardFocus();
+
+ if (!mIgnoreImGuiChild)
+ ImGui::EndChild();
+
+ ImGui::PopStyleVar();
+ ImGui::PopStyleColor();
+
+ mWithinRender = false;
+}
+
+void TextEditor::SetText(const std::string & aText)
+{
+ mLines.clear();
+ mLines.emplace_back(Line());
+ for (auto chr : aText)
+ {
+ if (chr == '\r')
+ {
+ // ignore the carriage return character
+ }
+ else if (chr == '\n')
+ mLines.emplace_back(Line());
+ else
+ {
+ mLines.back().emplace_back(Glyph(chr, PaletteIndex::Default));
+ }
+ }
+
+ mTextChanged = true;
+ mScrollToTop = true;
+
+ mUndoBuffer.clear();
+ mUndoIndex = 0;
+
+ Colorize();
+}
+
+void TextEditor::SetTextLines(const std::vector<std::string> & aLines)
+{
+ mLines.clear();
+
+ if (aLines.empty())
+ {
+ mLines.emplace_back(Line());
+ }
+ else
+ {
+ mLines.resize(aLines.size());
+
+ for (size_t i = 0; i < aLines.size(); ++i)
+ {
+ const std::string & aLine = aLines[i];
+
+ mLines[i].reserve(aLine.size());
+ for (size_t j = 0; j < aLine.size(); ++j)
+ mLines[i].emplace_back(Glyph(aLine[j], PaletteIndex::Default));
+ }
+ }
+
+ mTextChanged = true;
+ mScrollToTop = true;
+
+ mUndoBuffer.clear();
+ mUndoIndex = 0;
+
+ Colorize();
+}
+
+void TextEditor::EnterCharacter(ImWchar aChar, bool aShift)
+{
+ assert(!mReadOnly);
+
+ UndoRecord u;
+
+ u.mBefore = mState;
+
+ if (HasSelection())
+ {
+ if (aChar == '\t' && mState.mSelectionStart.mLine != mState.mSelectionEnd.mLine)
+ {
+
+ auto start = mState.mSelectionStart;
+ auto end = mState.mSelectionEnd;
+ auto originalEnd = end;
+
+ if (start > end)
+ std::swap(start, end);
+ start.mColumn = 0;
+ // end.mColumn = end.mLine < mLines.size() ? mLines[end.mLine].size() : 0;
+ if (end.mColumn == 0 && end.mLine > 0)
+ --end.mLine;
+ if (end.mLine >= (int)mLines.size())
+ end.mLine = mLines.empty() ? 0 : (int)mLines.size() - 1;
+ end.mColumn = GetLineMaxColumn(end.mLine);
+
+ //if (end.mColumn >= GetLineMaxColumn(end.mLine))
+ // end.mColumn = GetLineMaxColumn(end.mLine) - 1;
+
+ u.mRemovedStart = start;
+ u.mRemovedEnd = end;
+ u.mRemoved = GetText(start, end);
+
+ bool modified = false;
+
+ for (int i = start.mLine; i <= end.mLine; i++)
+ {
+ auto& line = mLines[i];
+ if (aShift)
+ {
+ if (!line.empty())
+ {
+ if (line.front().mChar == '\t')
+ {
+ line.erase(line.begin());
+ modified = true;
+ }
+ else
+ {
+ for (int j = 0; j < mTabSize && !line.empty() && line.front().mChar == ' '; j++)
+ {
+ line.erase(line.begin());
+ modified = true;
+ }
+ }
+ }
+ }
+ else
+ {
+ line.insert(line.begin(), Glyph('\t', TextEditor::PaletteIndex::Background));
+ modified = true;
+ }
+ }
+
+ if (modified)
+ {
+ start = Coordinates(start.mLine, GetCharacterColumn(start.mLine, 0));
+ Coordinates rangeEnd;
+ if (originalEnd.mColumn != 0)
+ {
+ end = Coordinates(end.mLine, GetLineMaxColumn(end.mLine));
+ rangeEnd = end;
+ u.mAdded = GetText(start, end);
+ }
+ else
+ {
+ end = Coordinates(originalEnd.mLine, 0);
+ rangeEnd = Coordinates(end.mLine - 1, GetLineMaxColumn(end.mLine - 1));
+ u.mAdded = GetText(start, rangeEnd);
+ }
+
+ u.mAddedStart = start;
+ u.mAddedEnd = rangeEnd;
+ u.mAfter = mState;
+
+ mState.mSelectionStart = start;
+ mState.mSelectionEnd = end;
+ AddUndo(u);
+
+ mTextChanged = true;
+
+ EnsureCursorVisible();
+ }
+
+ return;
+ } // c == '\t'
+ else
+ {
+ u.mRemoved = GetSelectedText();
+ u.mRemovedStart = mState.mSelectionStart;
+ u.mRemovedEnd = mState.mSelectionEnd;
+ DeleteSelection();
+ }
+ } // HasSelection
+
+ auto coord = GetActualCursorCoordinates();
+ u.mAddedStart = coord;
+
+ assert(!mLines.empty());
+
+ if (aChar == '\n')
+ {
+ InsertLine(coord.mLine + 1);
+ auto& line = mLines[coord.mLine];
+ auto& newLine = mLines[coord.mLine + 1];
+
+ if (mLanguageDefinition.mAutoIndentation)
+ for (size_t it = 0; it < line.size() && isascii(line[it].mChar) && isblank(line[it].mChar); ++it)
+ newLine.push_back(line[it]);
+
+ const size_t whitespaceSize = newLine.size();
+ auto cindex = GetCharacterIndex(coord);
+ newLine.insert(newLine.end(), line.begin() + cindex, line.end());
+ line.erase(line.begin() + cindex, line.begin() + line.size());
+ SetCursorPosition(Coordinates(coord.mLine + 1, GetCharacterColumn(coord.mLine + 1, (int)whitespaceSize)));
+ u.mAdded = (char)aChar;
+ }
+ else
+ {
+ char buf[7];
+ int e = ImTextCharToUtf8(buf, 7, aChar);
+ if (e > 0)
+ {
+ buf[e] = '\0';
+ auto& line = mLines[coord.mLine];
+ auto cindex = GetCharacterIndex(coord);
+
+ if (mOverwrite && cindex < (int)line.size())
+ {
+ auto d = UTF8CharLength(line[cindex].mChar);
+
+ u.mRemovedStart = mState.mCursorPosition;
+ u.mRemovedEnd = Coordinates(coord.mLine, GetCharacterColumn(coord.mLine, cindex + d));
+
+ while (d-- > 0 && cindex < (int)line.size())
+ {
+ u.mRemoved += line[cindex].mChar;
+ line.erase(line.begin() + cindex);
+ }
+ }
+
+ for (auto p = buf; *p != '\0'; p++, ++cindex)
+ line.insert(line.begin() + cindex, Glyph(*p, PaletteIndex::Default));
+ u.mAdded = buf;
+
+ SetCursorPosition(Coordinates(coord.mLine, GetCharacterColumn(coord.mLine, cindex)));
+ }
+ else
+ return;
+ }
+
+ mTextChanged = true;
+
+ u.mAddedEnd = GetActualCursorCoordinates();
+ u.mAfter = mState;
+
+ AddUndo(u);
+
+ Colorize(coord.mLine - 1, 3);
+ EnsureCursorVisible();
+}
+
+void TextEditor::SetReadOnly(bool aValue)
+{
+ mReadOnly = aValue;
+}
+
+void TextEditor::SetColorizerEnable(bool aValue)
+{
+ mColorizerEnabled = aValue;
+}
+
+void TextEditor::SetCursorPosition(const Coordinates & aPosition)
+{
+ if (mState.mCursorPosition != aPosition)
+ {
+ mState.mCursorPosition = aPosition;
+ mCursorPositionChanged = true;
+ EnsureCursorVisible();
+ }
+}
+
+void TextEditor::SetSelectionStart(const Coordinates & aPosition)
+{
+ mState.mSelectionStart = SanitizeCoordinates(aPosition);
+ if (mState.mSelectionStart > mState.mSelectionEnd)
+ std::swap(mState.mSelectionStart, mState.mSelectionEnd);
+}
+
+void TextEditor::SetSelectionEnd(const Coordinates & aPosition)
+{
+ mState.mSelectionEnd = SanitizeCoordinates(aPosition);
+ if (mState.mSelectionStart > mState.mSelectionEnd)
+ std::swap(mState.mSelectionStart, mState.mSelectionEnd);
+}
+
+void TextEditor::SetSelection(const Coordinates & aStart, const Coordinates & aEnd, SelectionMode aMode)
+{
+ auto oldSelStart = mState.mSelectionStart;
+ auto oldSelEnd = mState.mSelectionEnd;
+
+ mState.mSelectionStart = SanitizeCoordinates(aStart);
+ mState.mSelectionEnd = SanitizeCoordinates(aEnd);
+ if (mState.mSelectionStart > mState.mSelectionEnd)
+ std::swap(mState.mSelectionStart, mState.mSelectionEnd);
+
+ switch (aMode)
+ {
+ case TextEditor::SelectionMode::Normal:
+ break;
+ case TextEditor::SelectionMode::Word:
+ {
+ mState.mSelectionStart = FindWordStart(mState.mSelectionStart);
+ if (!IsOnWordBoundary(mState.mSelectionEnd))
+ mState.mSelectionEnd = FindWordEnd(FindWordStart(mState.mSelectionEnd));
+ break;
+ }
+ case TextEditor::SelectionMode::Line:
+ {
+ const auto lineNo = mState.mSelectionEnd.mLine;
+ const auto lineSize = (size_t)lineNo < mLines.size() ? mLines[lineNo].size() : 0;
+ mState.mSelectionStart = Coordinates(mState.mSelectionStart.mLine, 0);
+ mState.mSelectionEnd = Coordinates(lineNo, GetLineMaxColumn(lineNo));
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (mState.mSelectionStart != oldSelStart ||
+ mState.mSelectionEnd != oldSelEnd)
+ mCursorPositionChanged = true;
+}
+
+void TextEditor::SetTabSize(int aValue)
+{
+ mTabSize = std::max(0, std::min(32, aValue));
+}
+
+void TextEditor::InsertText(const std::string & aValue)
+{
+ InsertText(aValue.c_str());
+}
+
+void TextEditor::InsertText(const char * aValue)
+{
+ if (aValue == nullptr)
+ return;
+
+ auto pos = GetActualCursorCoordinates();
+ auto start = std::min(pos, mState.mSelectionStart);
+ int totalLines = pos.mLine - start.mLine;
+
+ totalLines += InsertTextAt(pos, aValue);
+
+ SetSelection(pos, pos);
+ SetCursorPosition(pos);
+ Colorize(start.mLine - 1, totalLines + 2);
+}
+
+void TextEditor::DeleteSelection()
+{
+ assert(mState.mSelectionEnd >= mState.mSelectionStart);
+
+ if (mState.mSelectionEnd == mState.mSelectionStart)
+ return;
+
+ DeleteRange(mState.mSelectionStart, mState.mSelectionEnd);
+
+ SetSelection(mState.mSelectionStart, mState.mSelectionStart);
+ SetCursorPosition(mState.mSelectionStart);
+ Colorize(mState.mSelectionStart.mLine, 1);
+}
+
+void TextEditor::MoveUp(int aAmount, bool aSelect)
+{
+ auto oldPos = mState.mCursorPosition;
+ mState.mCursorPosition.mLine = std::max(0, mState.mCursorPosition.mLine - aAmount);
+ if (oldPos != mState.mCursorPosition)
+ {
+ if (aSelect)
+ {
+ if (oldPos == mInteractiveStart)
+ mInteractiveStart = mState.mCursorPosition;
+ else if (oldPos == mInteractiveEnd)
+ mInteractiveEnd = mState.mCursorPosition;
+ else
+ {
+ mInteractiveStart = mState.mCursorPosition;
+ mInteractiveEnd = oldPos;
+ }
+ }
+ else
+ mInteractiveStart = mInteractiveEnd = mState.mCursorPosition;
+ SetSelection(mInteractiveStart, mInteractiveEnd);
+
+ EnsureCursorVisible();
+ }
+}
+
+void TextEditor::MoveDown(int aAmount, bool aSelect)
+{
+ assert(mState.mCursorPosition.mColumn >= 0);
+ auto oldPos = mState.mCursorPosition;
+ mState.mCursorPosition.mLine = std::max(0, std::min((int)mLines.size() - 1, mState.mCursorPosition.mLine + aAmount));
+
+ if (mState.mCursorPosition != oldPos)
+ {
+ if (aSelect)
+ {
+ if (oldPos == mInteractiveEnd)
+ mInteractiveEnd = mState.mCursorPosition;
+ else if (oldPos == mInteractiveStart)
+ mInteractiveStart = mState.mCursorPosition;
+ else
+ {
+ mInteractiveStart = oldPos;
+ mInteractiveEnd = mState.mCursorPosition;
+ }
+ }
+ else
+ mInteractiveStart = mInteractiveEnd = mState.mCursorPosition;
+ SetSelection(mInteractiveStart, mInteractiveEnd);
+
+ EnsureCursorVisible();
+ }
+}
+
+static bool IsUTFSequence(char c)
+{
+ return (c & 0xC0) == 0x80;
+}
+
+void TextEditor::MoveLeft(int aAmount, bool aSelect, bool aWordMode)
+{
+ if (mLines.empty())
+ return;
+
+ auto oldPos = mState.mCursorPosition;
+ mState.mCursorPosition = GetActualCursorCoordinates();
+ auto line = mState.mCursorPosition.mLine;
+ auto cindex = GetCharacterIndex(mState.mCursorPosition);
+
+ while (aAmount-- > 0)
+ {
+ if (cindex == 0)
+ {
+ if (line > 0)
+ {
+ --line;
+ if ((int)mLines.size() > line)
+ cindex = (int)mLines[line].size();
+ else
+ cindex = 0;
+ }
+ }
+ else
+ {
+ --cindex;
+ if (cindex > 0)
+ {
+ if ((int)mLines.size() > line)
+ {
+ while (cindex > 0 && IsUTFSequence(mLines[line][cindex].mChar))
+ --cindex;
+ }
+ }
+ }
+
+ mState.mCursorPosition = Coordinates(line, GetCharacterColumn(line, cindex));
+ if (aWordMode)
+ {
+ mState.mCursorPosition = FindWordStart(mState.mCursorPosition);
+ cindex = GetCharacterIndex(mState.mCursorPosition);
+ }
+ }
+
+ mState.mCursorPosition = Coordinates(line, GetCharacterColumn(line, cindex));
+
+ assert(mState.mCursorPosition.mColumn >= 0);
+ if (aSelect)
+ {
+ if (oldPos == mInteractiveStart)
+ mInteractiveStart = mState.mCursorPosition;
+ else if (oldPos == mInteractiveEnd)
+ mInteractiveEnd = mState.mCursorPosition;
+ else
+ {
+ mInteractiveStart = mState.mCursorPosition;
+ mInteractiveEnd = oldPos;
+ }
+ }
+ else
+ mInteractiveStart = mInteractiveEnd = mState.mCursorPosition;
+ SetSelection(mInteractiveStart, mInteractiveEnd, aSelect && aWordMode ? SelectionMode::Word : SelectionMode::Normal);
+
+ EnsureCursorVisible();
+}
+
+void TextEditor::MoveRight(int aAmount, bool aSelect, bool aWordMode)
+{
+ auto oldPos = mState.mCursorPosition;
+
+ if (mLines.empty() || oldPos.mLine >= mLines.size())
+ return;
+
+ auto cindex = GetCharacterIndex(mState.mCursorPosition);
+ while (aAmount-- > 0)
+ {
+ auto lindex = mState.mCursorPosition.mLine;
+ auto& line = mLines[lindex];
+
+ if (cindex >= line.size())
+ {
+ if (mState.mCursorPosition.mLine < mLines.size() - 1)
+ {
+ mState.mCursorPosition.mLine = std::max(0, std::min((int)mLines.size() - 1, mState.mCursorPosition.mLine + 1));
+ mState.mCursorPosition.mColumn = 0;
+ }
+ else
+ return;
+ }
+ else
+ {
+ cindex += UTF8CharLength(line[cindex].mChar);
+ mState.mCursorPosition = Coordinates(lindex, GetCharacterColumn(lindex, cindex));
+ if (aWordMode)
+ mState.mCursorPosition = FindNextWord(mState.mCursorPosition);
+ }
+ }
+
+ if (aSelect)
+ {
+ if (oldPos == mInteractiveEnd)
+ mInteractiveEnd = SanitizeCoordinates(mState.mCursorPosition);
+ else if (oldPos == mInteractiveStart)
+ mInteractiveStart = mState.mCursorPosition;
+ else
+ {
+ mInteractiveStart = oldPos;
+ mInteractiveEnd = mState.mCursorPosition;
+ }
+ }
+ else
+ mInteractiveStart = mInteractiveEnd = mState.mCursorPosition;
+ SetSelection(mInteractiveStart, mInteractiveEnd, aSelect && aWordMode ? SelectionMode::Word : SelectionMode::Normal);
+
+ EnsureCursorVisible();
+}
+
+void TextEditor::MoveTop(bool aSelect)
+{
+ auto oldPos = mState.mCursorPosition;
+ SetCursorPosition(Coordinates(0, 0));
+
+ if (mState.mCursorPosition != oldPos)
+ {
+ if (aSelect)
+ {
+ mInteractiveEnd = oldPos;
+ mInteractiveStart = mState.mCursorPosition;
+ }
+ else
+ mInteractiveStart = mInteractiveEnd = mState.mCursorPosition;
+ SetSelection(mInteractiveStart, mInteractiveEnd);
+ }
+}
+
+void TextEditor::TextEditor::MoveBottom(bool aSelect)
+{
+ auto oldPos = GetCursorPosition();
+ auto newPos = Coordinates((int)mLines.size() - 1, 0);
+ SetCursorPosition(newPos);
+ if (aSelect)
+ {
+ mInteractiveStart = oldPos;
+ mInteractiveEnd = newPos;
+ }
+ else
+ mInteractiveStart = mInteractiveEnd = newPos;
+ SetSelection(mInteractiveStart, mInteractiveEnd);
+}
+
+void TextEditor::MoveHome(bool aSelect)
+{
+ auto oldPos = mState.mCursorPosition;
+ SetCursorPosition(Coordinates(mState.mCursorPosition.mLine, 0));
+
+ if (mState.mCursorPosition != oldPos)
+ {
+ if (aSelect)
+ {
+ if (oldPos == mInteractiveStart)
+ mInteractiveStart = mState.mCursorPosition;
+ else if (oldPos == mInteractiveEnd)
+ mInteractiveEnd = mState.mCursorPosition;
+ else
+ {
+ mInteractiveStart = mState.mCursorPosition;
+ mInteractiveEnd = oldPos;
+ }
+ }
+ else
+ mInteractiveStart = mInteractiveEnd = mState.mCursorPosition;
+ SetSelection(mInteractiveStart, mInteractiveEnd);
+ }
+}
+
+void TextEditor::MoveEnd(bool aSelect)
+{
+ auto oldPos = mState.mCursorPosition;
+ SetCursorPosition(Coordinates(mState.mCursorPosition.mLine, GetLineMaxColumn(oldPos.mLine)));
+
+ if (mState.mCursorPosition != oldPos)
+ {
+ if (aSelect)
+ {
+ if (oldPos == mInteractiveEnd)
+ mInteractiveEnd = mState.mCursorPosition;
+ else if (oldPos == mInteractiveStart)
+ mInteractiveStart = mState.mCursorPosition;
+ else
+ {
+ mInteractiveStart = oldPos;
+ mInteractiveEnd = mState.mCursorPosition;
+ }
+ }
+ else
+ mInteractiveStart = mInteractiveEnd = mState.mCursorPosition;
+ SetSelection(mInteractiveStart, mInteractiveEnd);
+ }
+}
+
+void TextEditor::Delete()
+{
+ assert(!mReadOnly);
+
+ if (mLines.empty())
+ return;
+
+ UndoRecord u;
+ u.mBefore = mState;
+
+ if (HasSelection())
+ {
+ u.mRemoved = GetSelectedText();
+ u.mRemovedStart = mState.mSelectionStart;
+ u.mRemovedEnd = mState.mSelectionEnd;
+
+ DeleteSelection();
+ }
+ else
+ {
+ auto pos = GetActualCursorCoordinates();
+ SetCursorPosition(pos);
+ auto& line = mLines[pos.mLine];
+
+ if (pos.mColumn == GetLineMaxColumn(pos.mLine))
+ {
+ if (pos.mLine == (int)mLines.size() - 1)
+ return;
+
+ u.mRemoved = '\n';
+ u.mRemovedStart = u.mRemovedEnd = GetActualCursorCoordinates();
+ Advance(u.mRemovedEnd);
+
+ auto& nextLine = mLines[pos.mLine + 1];
+ line.insert(line.end(), nextLine.begin(), nextLine.end());
+ RemoveLine(pos.mLine + 1);
+ }
+ else
+ {
+ auto cindex = GetCharacterIndex(pos);
+ u.mRemovedStart = u.mRemovedEnd = GetActualCursorCoordinates();
+ u.mRemovedEnd.mColumn++;
+ u.mRemoved = GetText(u.mRemovedStart, u.mRemovedEnd);
+
+ auto d = UTF8CharLength(line[cindex].mChar);
+ while (d-- > 0 && cindex < (int)line.size())
+ line.erase(line.begin() + cindex);
+ }
+
+ mTextChanged = true;
+
+ Colorize(pos.mLine, 1);
+ }
+
+ u.mAfter = mState;
+ AddUndo(u);
+}
+
+void TextEditor::Backspace()
+{
+ assert(!mReadOnly);
+
+ if (mLines.empty())
+ return;
+
+ UndoRecord u;
+ u.mBefore = mState;
+
+ if (HasSelection())
+ {
+ u.mRemoved = GetSelectedText();
+ u.mRemovedStart = mState.mSelectionStart;
+ u.mRemovedEnd = mState.mSelectionEnd;
+
+ DeleteSelection();
+ }
+ else
+ {
+ auto pos = GetActualCursorCoordinates();
+ SetCursorPosition(pos);
+
+ if (mState.mCursorPosition.mColumn == 0)
+ {
+ if (mState.mCursorPosition.mLine == 0)
+ return;
+
+ u.mRemoved = '\n';
+ u.mRemovedStart = u.mRemovedEnd = Coordinates(pos.mLine - 1, GetLineMaxColumn(pos.mLine - 1));
+ Advance(u.mRemovedEnd);
+
+ auto& line = mLines[mState.mCursorPosition.mLine];
+ auto& prevLine = mLines[mState.mCursorPosition.mLine - 1];
+ auto prevSize = GetLineMaxColumn(mState.mCursorPosition.mLine - 1);
+ prevLine.insert(prevLine.end(), line.begin(), line.end());
+
+ ErrorMarkers etmp;
+ for (auto& i : mErrorMarkers)
+ etmp.insert(ErrorMarkers::value_type(i.first - 1 == mState.mCursorPosition.mLine ? i.first - 1 : i.first, i.second));
+ mErrorMarkers = std::move(etmp);
+
+ RemoveLine(mState.mCursorPosition.mLine);
+ --mState.mCursorPosition.mLine;
+ mState.mCursorPosition.mColumn = prevSize;
+ }
+ else
+ {
+ auto& line = mLines[mState.mCursorPosition.mLine];
+ auto cindex = GetCharacterIndex(pos) - 1;
+ auto cend = cindex + 1;
+ while (cindex > 0 && IsUTFSequence(line[cindex].mChar))
+ --cindex;
+
+ //if (cindex > 0 && UTF8CharLength(line[cindex].mChar) > 1)
+ // --cindex;
+
+ u.mRemovedStart = u.mRemovedEnd = GetActualCursorCoordinates();
+ --u.mRemovedStart.mColumn;
+ --mState.mCursorPosition.mColumn;
+
+ while (cindex < line.size() && cend-- > cindex)
+ {
+ u.mRemoved += line[cindex].mChar;
+ line.erase(line.begin() + cindex);
+ }
+ }
+
+ mTextChanged = true;
+
+ EnsureCursorVisible();
+ Colorize(mState.mCursorPosition.mLine, 1);
+ }
+
+ u.mAfter = mState;
+ AddUndo(u);
+}
+
+void TextEditor::SelectWordUnderCursor()
+{
+ auto c = GetCursorPosition();
+ SetSelection(FindWordStart(c), FindWordEnd(c));
+}
+
+void TextEditor::SelectAll()
+{
+ SetSelection(Coordinates(0, 0), Coordinates((int)mLines.size(), 0));
+}
+
+bool TextEditor::HasSelection() const
+{
+ return mState.mSelectionEnd > mState.mSelectionStart;
+}
+
+void TextEditor::Copy()
+{
+ if (HasSelection())
+ {
+ ImGui::SetClipboardText(GetSelectedText().c_str());
+ }
+ else
+ {
+ if (!mLines.empty())
+ {
+ std::string str;
+ auto& line = mLines[GetActualCursorCoordinates().mLine];
+ for (auto& g : line)
+ str.push_back(g.mChar);
+ ImGui::SetClipboardText(str.c_str());
+ }
+ }
+}
+
+void TextEditor::Cut()
+{
+ if (IsReadOnly())
+ {
+ Copy();
+ }
+ else
+ {
+ if (HasSelection())
+ {
+ UndoRecord u;
+ u.mBefore = mState;
+ u.mRemoved = GetSelectedText();
+ u.mRemovedStart = mState.mSelectionStart;
+ u.mRemovedEnd = mState.mSelectionEnd;
+
+ Copy();
+ DeleteSelection();
+
+ u.mAfter = mState;
+ AddUndo(u);
+ }
+ }
+}
+
+void TextEditor::Paste()
+{
+ if (IsReadOnly())
+ return;
+
+ auto clipText = ImGui::GetClipboardText();
+ if (clipText != nullptr && strlen(clipText) > 0)
+ {
+ UndoRecord u;
+ u.mBefore = mState;
+
+ if (HasSelection())
+ {
+ u.mRemoved = GetSelectedText();
+ u.mRemovedStart = mState.mSelectionStart;
+ u.mRemovedEnd = mState.mSelectionEnd;
+ DeleteSelection();
+ }
+
+ u.mAdded = clipText;
+ u.mAddedStart = GetActualCursorCoordinates();
+
+ InsertText(clipText);
+
+ u.mAddedEnd = GetActualCursorCoordinates();
+ u.mAfter = mState;
+ AddUndo(u);
+ }
+}
+
+bool TextEditor::CanUndo() const
+{
+ return !mReadOnly && mUndoIndex > 0;
+}
+
+bool TextEditor::CanRedo() const
+{
+ return !mReadOnly && mUndoIndex < (int)mUndoBuffer.size();
+}
+
+void TextEditor::Undo(int aSteps)
+{
+ while (CanUndo() && aSteps-- > 0)
+ mUndoBuffer[--mUndoIndex].Undo(this);
+}
+
+void TextEditor::Redo(int aSteps)
+{
+ while (CanRedo() && aSteps-- > 0)
+ mUndoBuffer[mUndoIndex++].Redo(this);
+}
+
+const TextEditor::Palette & TextEditor::GetDarkPalette()
+{
+ const static Palette p = { {
+ 0xff7f7f7f, // Default
+ 0xffd69c56, // Keyword
+ 0xff00ff00, // Number
+ 0xff7070e0, // String
+ 0xff70a0e0, // Char literal
+ 0xffffffff, // Punctuation
+ 0xff408080, // Preprocessor
+ 0xffaaaaaa, // Identifier
+ 0xff9bc64d, // Known identifier
+ 0xffc040a0, // Preproc identifier
+ 0xff206020, // Comment (single line)
+ 0xff406020, // Comment (multi line)
+ 0xff101010, // Background
+ 0xffe0e0e0, // Cursor
+ 0x80a06020, // Selection
+ 0x800020ff, // ErrorMarker
+ 0x40f08000, // Breakpoint
+ 0xff707000, // Line number
+ 0x40000000, // Current line fill
+ 0x40808080, // Current line fill (inactive)
+ 0x40a0a0a0, // Current line edge
+ } };
+ return p;
+}
+
+const TextEditor::Palette & TextEditor::GetLightPalette()
+{
+ const static Palette p = { {
+ 0xff7f7f7f, // None
+ 0xffff0c06, // Keyword
+ 0xff008000, // Number
+ 0xff2020a0, // String
+ 0xff304070, // Char literal
+ 0xff000000, // Punctuation
+ 0xff406060, // Preprocessor
+ 0xff404040, // Identifier
+ 0xff606010, // Known identifier
+ 0xffc040a0, // Preproc identifier
+ 0xff205020, // Comment (single line)
+ 0xff405020, // Comment (multi line)
+ 0xffffffff, // Background
+ 0xff000000, // Cursor
+ 0x80600000, // Selection
+ 0xa00010ff, // ErrorMarker
+ 0x80f08000, // Breakpoint
+ 0xff505000, // Line number
+ 0x40000000, // Current line fill
+ 0x40808080, // Current line fill (inactive)
+ 0x40000000, // Current line edge
+ } };
+ return p;
+}
+
+const TextEditor::Palette & TextEditor::GetRetroBluePalette()
+{
+ const static Palette p = { {
+ 0xff00ffff, // None
+ 0xffffff00, // Keyword
+ 0xff00ff00, // Number
+ 0xff808000, // String
+ 0xff808000, // Char literal
+ 0xffffffff, // Punctuation
+ 0xff008000, // Preprocessor
+ 0xff00ffff, // Identifier
+ 0xffffffff, // Known identifier
+ 0xffff00ff, // Preproc identifier
+ 0xff808080, // Comment (single line)
+ 0xff404040, // Comment (multi line)
+ 0xff800000, // Background
+ 0xff0080ff, // Cursor
+ 0x80ffff00, // Selection
+ 0xa00000ff, // ErrorMarker
+ 0x80ff8000, // Breakpoint
+ 0xff808000, // Line number
+ 0x40000000, // Current line fill
+ 0x40808080, // Current line fill (inactive)
+ 0x40000000, // Current line edge
+ } };
+ return p;
+}
+
+
+std::string TextEditor::GetText() const
+{
+ return GetText(Coordinates(), Coordinates((int)mLines.size(), 0));
+}
+
+std::vector<std::string> TextEditor::GetTextLines() const
+{
+ std::vector<std::string> result;
+
+ result.reserve(mLines.size());
+
+ for (auto & line : mLines)
+ {
+ std::string text;
+
+ text.resize(line.size());
+
+ for (size_t i = 0; i < line.size(); ++i)
+ text[i] = line[i].mChar;
+
+ result.emplace_back(std::move(text));
+ }
+
+ return result;
+}
+
+std::string TextEditor::GetSelectedText() const
+{
+ return GetText(mState.mSelectionStart, mState.mSelectionEnd);
+}
+
+std::string TextEditor::GetCurrentLineText()const
+{
+ auto lineLength = GetLineMaxColumn(mState.mCursorPosition.mLine);
+ return GetText(
+ Coordinates(mState.mCursorPosition.mLine, 0),
+ Coordinates(mState.mCursorPosition.mLine, lineLength));
+}
+
+void TextEditor::ProcessInputs()
+{
+}
+
+void TextEditor::Colorize(int aFromLine, int aLines)
+{
+ int toLine = aLines == -1 ? (int)mLines.size() : std::min((int)mLines.size(), aFromLine + aLines);
+ mColorRangeMin = std::min(mColorRangeMin, aFromLine);
+ mColorRangeMax = std::max(mColorRangeMax, toLine);
+ mColorRangeMin = std::max(0, mColorRangeMin);
+ mColorRangeMax = std::max(mColorRangeMin, mColorRangeMax);
+ mCheckComments = true;
+}
+
+void TextEditor::ColorizeRange(int aFromLine, int aToLine)
+{
+ if (mLines.empty() || aFromLine >= aToLine)
+ return;
+
+ std::string buffer;
+ std::cmatch results;
+ std::string id;
+
+ int endLine = std::max(0, std::min((int)mLines.size(), aToLine));
+ for (int i = aFromLine; i < endLine; ++i)
+ {
+ auto& line = mLines[i];
+
+ if (line.empty())
+ continue;
+
+ buffer.resize(line.size());
+ for (size_t j = 0; j < line.size(); ++j)
+ {
+ auto& col = line[j];
+ buffer[j] = col.mChar;
+ col.mColorIndex = PaletteIndex::Default;
+ }
+
+ const char * bufferBegin = &buffer.front();
+ const char * bufferEnd = bufferBegin + buffer.size();
+
+ auto last = bufferEnd;
+
+ for (auto first = bufferBegin; first != last; )
+ {
+ const char * token_begin = nullptr;
+ const char * token_end = nullptr;
+ PaletteIndex token_color = PaletteIndex::Default;
+
+ bool hasTokenizeResult = false;
+
+ if (mLanguageDefinition.mTokenize != nullptr)
+ {
+ if (mLanguageDefinition.mTokenize(first, last, token_begin, token_end, token_color))
+ hasTokenizeResult = true;
+ }
+
+ if (hasTokenizeResult == false)
+ {
+ // todo : remove
+ //printf("using regex for %.*s\n", first + 10 < last ? 10 : int(last - first), first);
+
+ for (auto& p : mRegexList)
+ {
+ if (std::regex_search(first, last, results, p.first, std::regex_constants::match_continuous))
+ {
+ hasTokenizeResult = true;
+
+ auto& v = *results.begin();
+ token_begin = v.first;
+ token_end = v.second;
+ token_color = p.second;
+ break;
+ }
+ }
+ }
+
+ if (hasTokenizeResult == false)
+ {
+ first++;
+ }
+ else
+ {
+ const size_t token_length = token_end - token_begin;
+
+ if (token_color == PaletteIndex::Identifier)
+ {
+ id.assign(token_begin, token_end);
+
+ // todo : allmost all language definitions use lower case to specify keywords, so shouldn't this use ::tolower ?
+ if (!mLanguageDefinition.mCaseSensitive)
+ std::transform(id.begin(), id.end(), id.begin(), ::toupper);
+
+ if (!line[first - bufferBegin].mPreprocessor)
+ {
+ if (mLanguageDefinition.mKeywords.count(id) != 0)
+ token_color = PaletteIndex::Keyword;
+ else if (mLanguageDefinition.mIdentifiers.count(id) != 0)
+ token_color = PaletteIndex::KnownIdentifier;
+ else if (mLanguageDefinition.mPreprocIdentifiers.count(id) != 0)
+ token_color = PaletteIndex::PreprocIdentifier;
+ }
+ else
+ {
+ if (mLanguageDefinition.mPreprocIdentifiers.count(id) != 0)
+ token_color = PaletteIndex::PreprocIdentifier;
+ }
+ }
+
+ for (size_t j = 0; j < token_length; ++j)
+ line[(token_begin - bufferBegin) + j].mColorIndex = token_color;
+
+ first = token_end;
+ }
+ }
+ }
+}
+
+void TextEditor::ColorizeInternal()
+{
+ if (mLines.empty() || !mColorizerEnabled)
+ return;
+
+ if (mCheckComments)
+ {
+ auto endLine = mLines.size();
+ auto endIndex = 0;
+ auto commentStartLine = endLine;
+ auto commentStartIndex = endIndex;
+ auto withinString = false;
+ auto withinSingleLineComment = false;
+ auto withinPreproc = false;
+ auto firstChar = true; // there is no other non-whitespace characters in the line before
+ auto concatenate = false; // '\' on the very end of the line
+ auto currentLine = 0;
+ auto currentIndex = 0;
+ while (currentLine < endLine || currentIndex < endIndex)
+ {
+ auto& line = mLines[currentLine];
+
+ if (currentIndex == 0 && !concatenate)
+ {
+ withinSingleLineComment = false;
+ withinPreproc = false;
+ firstChar = true;
+ }
+
+ concatenate = false;
+
+ if (!line.empty())
+ {
+ auto& g = line[currentIndex];
+ auto c = g.mChar;
+
+ if (c != mLanguageDefinition.mPreprocChar && !isspace(c))
+ firstChar = false;
+
+ if (currentIndex == (int)line.size() - 1 && line[line.size() - 1].mChar == '\\')
+ concatenate = true;
+
+ bool inComment = (commentStartLine < currentLine || (commentStartLine == currentLine && commentStartIndex <= currentIndex));
+
+ if (withinString)
+ {
+ line[currentIndex].mMultiLineComment = inComment;
+
+ if (c == '\"')
+ {
+ if (currentIndex + 1 < (int)line.size() && line[currentIndex + 1].mChar == '\"')
+ {
+ currentIndex += 1;
+ if (currentIndex < (int)line.size())
+ line[currentIndex].mMultiLineComment = inComment;
+ }
+ else
+ withinString = false;
+ }
+ else if (c == '\\')
+ {
+ currentIndex += 1;
+ if (currentIndex < (int)line.size())
+ line[currentIndex].mMultiLineComment = inComment;
+ }
+ }
+ else
+ {
+ if (firstChar && c == mLanguageDefinition.mPreprocChar)
+ withinPreproc = true;
+
+ if (c == '\"')
+ {
+ withinString = true;
+ line[currentIndex].mMultiLineComment = inComment;
+ }
+ else
+ {
+ auto pred = [](const char& a, const Glyph& b) { return a == b.mChar; };
+ auto from = line.begin() + currentIndex;
+ auto& startStr = mLanguageDefinition.mCommentStart;
+ auto& singleStartStr = mLanguageDefinition.mSingleLineComment;
+
+ if (singleStartStr.size() > 0 &&
+ currentIndex + singleStartStr.size() <= line.size() &&
+ equals(singleStartStr.begin(), singleStartStr.end(), from, from + singleStartStr.size(), pred))
+ {
+ withinSingleLineComment = true;
+ }
+ else if (!withinSingleLineComment && currentIndex + startStr.size() <= line.size() &&
+ equals(startStr.begin(), startStr.end(), from, from + startStr.size(), pred))
+ {
+ commentStartLine = currentLine;
+ commentStartIndex = currentIndex;
+ }
+
+ inComment = inComment = (commentStartLine < currentLine || (commentStartLine == currentLine && commentStartIndex <= currentIndex));
+
+ line[currentIndex].mMultiLineComment = inComment;
+ line[currentIndex].mComment = withinSingleLineComment;
+
+ auto& endStr = mLanguageDefinition.mCommentEnd;
+ if (currentIndex + 1 >= (int)endStr.size() &&
+ equals(endStr.begin(), endStr.end(), from + 1 - endStr.size(), from + 1, pred))
+ {
+ commentStartIndex = endIndex;
+ commentStartLine = endLine;
+ }
+ }
+ }
+ line[currentIndex].mPreprocessor = withinPreproc;
+ currentIndex += UTF8CharLength(c);
+ if (currentIndex >= (int)line.size())
+ {
+ currentIndex = 0;
+ ++currentLine;
+ }
+ }
+ else
+ {
+ currentIndex = 0;
+ ++currentLine;
+ }
+ }
+ mCheckComments = false;
+ }
+
+ if (mColorRangeMin < mColorRangeMax)
+ {
+ const int increment = (mLanguageDefinition.mTokenize == nullptr) ? 10 : 10000;
+ const int to = std::min(mColorRangeMin + increment, mColorRangeMax);
+ ColorizeRange(mColorRangeMin, to);
+ mColorRangeMin = to;
+
+ if (mColorRangeMax == mColorRangeMin)
+ {
+ mColorRangeMin = std::numeric_limits<int>::max();
+ mColorRangeMax = 0;
+ }
+ return;
+ }
+}
+
+float TextEditor::TextDistanceToLineStart(const Coordinates& aFrom) const
+{
+ auto& line = mLines[aFrom.mLine];
+ float distance = 0.0f;
+ float spaceSize = ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, " ", nullptr, nullptr).x;
+ int colIndex = GetCharacterIndex(aFrom);
+ for (size_t it = 0u; it < line.size() && it < colIndex; )
+ {
+ if (line[it].mChar == '\t')
+ {
+ distance = (1.0f + std::floor((1.0f + distance) / (float(mTabSize) * spaceSize))) * (float(mTabSize) * spaceSize);
+ ++it;
+ }
+ else
+ {
+ auto d = UTF8CharLength(line[it].mChar);
+ char tempCString[7];
+ int i = 0;
+ for (; i < 6 && d-- > 0 && it < (int)line.size(); i++, it++)
+ tempCString[i] = line[it].mChar;
+
+ tempCString[i] = '\0';
+ distance += ImGui::GetFont()->CalcTextSizeA(ImGui::GetFontSize(), FLT_MAX, -1.0f, tempCString, nullptr, nullptr).x;
+ }
+ }
+
+ return distance;
+}
+
+void TextEditor::EnsureCursorVisible()
+{
+ if (!mWithinRender)
+ {
+ mScrollToCursor = true;
+ return;
+ }
+
+ float scrollX = ImGui::GetScrollX();
+ float scrollY = ImGui::GetScrollY();
+
+ auto height = ImGui::GetWindowHeight();
+ auto width = ImGui::GetWindowWidth();
+
+ auto top = 1 + (int)ceil(scrollY / mCharAdvance.y);
+ auto bottom = (int)ceil((scrollY + height) / mCharAdvance.y);
+
+ auto left = (int)ceil(scrollX / mCharAdvance.x);
+ auto right = (int)ceil((scrollX + width) / mCharAdvance.x);
+
+ auto pos = GetActualCursorCoordinates();
+ auto len = TextDistanceToLineStart(pos);
+
+ if (pos.mLine < top)
+ ImGui::SetScrollY(std::max(0.0f, (pos.mLine - 1) * mCharAdvance.y));
+ if (pos.mLine > bottom - 4)
+ ImGui::SetScrollY(std::max(0.0f, (pos.mLine + 4) * mCharAdvance.y - height));
+ if (len + mTextStart < left + 4)
+ ImGui::SetScrollX(std::max(0.0f, len + mTextStart - 4));
+ if (len + mTextStart > right - 4)
+ ImGui::SetScrollX(std::max(0.0f, len + mTextStart + 4 - width));
+}
+
+int TextEditor::GetPageSize() const
+{
+ auto height = ImGui::GetWindowHeight() - 20.0f;
+ return (int)floor(height / mCharAdvance.y);
+}
+
+TextEditor::UndoRecord::UndoRecord(
+ const std::string& aAdded,
+ const TextEditor::Coordinates aAddedStart,
+ const TextEditor::Coordinates aAddedEnd,
+ const std::string& aRemoved,
+ const TextEditor::Coordinates aRemovedStart,
+ const TextEditor::Coordinates aRemovedEnd,
+ TextEditor::EditorState& aBefore,
+ TextEditor::EditorState& aAfter)
+ : mAdded(aAdded)
+ , mAddedStart(aAddedStart)
+ , mAddedEnd(aAddedEnd)
+ , mRemoved(aRemoved)
+ , mRemovedStart(aRemovedStart)
+ , mRemovedEnd(aRemovedEnd)
+ , mBefore(aBefore)
+ , mAfter(aAfter)
+{
+ assert(mAddedStart <= mAddedEnd);
+ assert(mRemovedStart <= mRemovedEnd);
+}
+
+void TextEditor::UndoRecord::Undo(TextEditor * aEditor)
+{
+ if (!mAdded.empty())
+ {
+ aEditor->DeleteRange(mAddedStart, mAddedEnd);
+ aEditor->Colorize(mAddedStart.mLine - 1, mAddedEnd.mLine - mAddedStart.mLine + 2);
+ }
+
+ if (!mRemoved.empty())
+ {
+ auto start = mRemovedStart;
+ aEditor->InsertTextAt(start, mRemoved.c_str());
+ aEditor->Colorize(mRemovedStart.mLine - 1, mRemovedEnd.mLine - mRemovedStart.mLine + 2);
+ }
+
+ aEditor->mState = mBefore;
+ aEditor->EnsureCursorVisible();
+
+}
+
+void TextEditor::UndoRecord::Redo(TextEditor * aEditor)
+{
+ if (!mRemoved.empty())
+ {
+ aEditor->DeleteRange(mRemovedStart, mRemovedEnd);
+ aEditor->Colorize(mRemovedStart.mLine - 1, mRemovedEnd.mLine - mRemovedStart.mLine + 1);
+ }
+
+ if (!mAdded.empty())
+ {
+ auto start = mAddedStart;
+ aEditor->InsertTextAt(start, mAdded.c_str());
+ aEditor->Colorize(mAddedStart.mLine - 1, mAddedEnd.mLine - mAddedStart.mLine + 1);
+ }
+
+ aEditor->mState = mAfter;
+ aEditor->EnsureCursorVisible();
+}
+
+static bool TokenizeCStyleString(const char * in_begin, const char * in_end, const char *& out_begin, const char *& out_end)
+{
+ const char * p = in_begin;
+
+ if (*p == '"')
+ {
+ p++;
+
+ while (p < in_end)
+ {
+ // handle end of string
+ if (*p == '"')
+ {
+ out_begin = in_begin;
+ out_end = p + 1;
+ return true;
+ }
+
+ // handle escape character for "
+ if (*p == '\\' && p + 1 < in_end && p[1] == '"')
+ p++;
+
+ p++;
+ }
+ }
+
+ return false;
+}
+
+static bool TokenizeCStyleCharacterLiteral(const char * in_begin, const char * in_end, const char *& out_begin, const char *& out_end)
+{
+ const char * p = in_begin;
+
+ if (*p == '\'')
+ {
+ p++;
+
+ // handle escape characters
+ if (p < in_end && *p == '\\')
+ p++;
+
+ if (p < in_end)
+ p++;
+
+ // handle end of character literal
+ if (p < in_end && *p == '\'')
+ {
+ out_begin = in_begin;
+ out_end = p + 1;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool TokenizeCStyleIdentifier(const char * in_begin, const char * in_end, const char *& out_begin, const char *& out_end)
+{
+ const char * p = in_begin;
+
+ if ((*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z') || *p == '_')
+ {
+ p++;
+
+ while ((p < in_end) && ((*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z') || (*p >= '0' && *p <= '9') || *p == '_'))
+ p++;
+
+ out_begin = in_begin;
+ out_end = p;
+ return true;
+ }
+
+ return false;
+}
+
+static bool TokenizeCStyleNumber(const char * in_begin, const char * in_end, const char *& out_begin, const char *& out_end)
+{
+ const char * p = in_begin;
+
+ const bool startsWithNumber = *p >= '0' && *p <= '9';
+
+ if (*p != '+' && *p != '-' && !startsWithNumber)
+ return false;
+
+ p++;
+
+ bool hasNumber = startsWithNumber;
+
+ while (p < in_end && (*p >= '0' && *p <= '9'))
+ {
+ hasNumber = true;
+
+ p++;
+ }
+
+ if (hasNumber == false)
+ return false;
+
+ bool isFloat = false;
+ bool isHex = false;
+ bool isBinary = false;
+
+ if (p < in_end)
+ {
+ if (*p == '.')
+ {
+ isFloat = true;
+
+ p++;
+
+ while (p < in_end && (*p >= '0' && *p <= '9'))
+ p++;
+ }
+ else if (*p == 'x' || *p == 'X')
+ {
+ // hex formatted integer of the type 0xef80
+
+ isHex = true;
+
+ p++;
+
+ while (p < in_end && ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F')))
+ p++;
+ }
+ else if (*p == 'b' || *p == 'B')
+ {
+ // binary formatted integer of the type 0b01011101
+
+ isBinary = true;
+
+ p++;
+
+ while (p < in_end && (*p >= '0' && *p <= '1'))
+ p++;
+ }
+ }
+
+ if (isHex == false && isBinary == false)
+ {
+ // floating point exponent
+ if (p < in_end && (*p == 'e' || *p == 'E'))
+ {
+ isFloat = true;
+
+ p++;
+
+ if (p < in_end && (*p == '+' || *p == '-'))
+ p++;
+
+ bool hasDigits = false;
+
+ while (p < in_end && (*p >= '0' && *p <= '9'))
+ {
+ hasDigits = true;
+
+ p++;
+ }
+
+ if (hasDigits == false)
+ return false;
+ }
+
+ // single precision floating point type
+ if (p < in_end && *p == 'f')
+ p++;
+ }
+
+ if (isFloat == false)
+ {
+ // integer size type
+ while (p < in_end && (*p == 'u' || *p == 'U' || *p == 'l' || *p == 'L'))
+ p++;
+ }
+
+ out_begin = in_begin;
+ out_end = p;
+ return true;
+}
+
+static bool TokenizeCStylePunctuation(const char * in_begin, const char * in_end, const char *& out_begin, const char *& out_end)
+{
+ (void)in_end;
+
+ switch (*in_begin)
+ {
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ case '!':
+ case '%':
+ case '^':
+ case '&':
+ case '*':
+ case '(':
+ case ')':
+ case '-':
+ case '+':
+ case '=':
+ case '~':
+ case '|':
+ case '<':
+ case '>':
+ case '?':
+ case ':':
+ case '/':
+ case ';':
+ case ',':
+ case '.':
+ out_begin = in_begin;
+ out_end = in_begin + 1;
+ return true;
+ }
+
+ return false;
+}
+
+const TextEditor::LanguageDefinition& TextEditor::LanguageDefinition::CPlusPlus()
+{
+ static bool inited = false;
+ static LanguageDefinition langDef;
+ if (!inited)
+ {
+ static const char* const cppKeywords[] = {
+ "alignas", "alignof", "and", "and_eq", "asm", "atomic_cancel", "atomic_commit", "atomic_noexcept", "auto", "bitand", "bitor", "bool", "break", "case", "catch", "char", "char16_t", "char32_t", "class",
+ "compl", "concept", "const", "constexpr", "const_cast", "continue", "decltype", "default", "delete", "do", "double", "dynamic_cast", "else", "enum", "explicit", "export", "extern", "false", "float",
+ "for", "friend", "goto", "if", "import", "inline", "int", "long", "module", "mutable", "namespace", "new", "noexcept", "not", "not_eq", "nullptr", "operator", "or", "or_eq", "private", "protected", "public",
+ "register", "reinterpret_cast", "requires", "return", "short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct", "switch", "synchronized", "template", "this", "thread_local",
+ "throw", "true", "try", "typedef", "typeid", "typename", "union", "unsigned", "using", "virtual", "void", "volatile", "wchar_t", "while", "xor", "xor_eq"
+ };
+ for (auto& k : cppKeywords)
+ langDef.mKeywords.insert(k);
+
+ static const char* const identifiers[] = {
+ "abort", "abs", "acos", "asin", "atan", "atexit", "atof", "atoi", "atol", "ceil", "clock", "cosh", "ctime", "div", "exit", "fabs", "floor", "fmod", "getchar", "getenv", "isalnum", "isalpha", "isdigit", "isgraph",
+ "ispunct", "isspace", "isupper", "kbhit", "log10", "log2", "log", "memcmp", "modf", "pow", "printf", "sprintf", "snprintf", "putchar", "putenv", "puts", "rand", "remove", "rename", "sinh", "sqrt", "srand", "strcat", "strcmp", "strerror", "time", "tolower", "toupper",
+ "std", "string", "vector", "map", "unordered_map", "set", "unordered_set", "min", "max"
+ };
+ for (auto& k : identifiers)
+ {
+ Identifier id;
+ id.mDeclaration = "Built-in function";
+ langDef.mIdentifiers.insert(std::make_pair(std::string(k), id));
+ }
+
+ langDef.mTokenize = [](const char * in_begin, const char * in_end, const char *& out_begin, const char *& out_end, PaletteIndex & paletteIndex) -> bool
+ {
+ paletteIndex = PaletteIndex::Max;
+
+ while (in_begin < in_end && isascii(*in_begin) && isblank(*in_begin))
+ in_begin++;
+
+ if (in_begin == in_end)
+ {
+ out_begin = in_end;
+ out_end = in_end;
+ paletteIndex = PaletteIndex::Default;
+ }
+ else if (TokenizeCStyleString(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::String;
+ else if (TokenizeCStyleCharacterLiteral(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::CharLiteral;
+ else if (TokenizeCStyleIdentifier(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::Identifier;
+ else if (TokenizeCStyleNumber(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::Number;
+ else if (TokenizeCStylePunctuation(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::Punctuation;
+
+ return paletteIndex != PaletteIndex::Max;
+ };
+
+ langDef.mCommentStart = "/*";
+ langDef.mCommentEnd = "*/";
+ langDef.mSingleLineComment = "//";
+
+ langDef.mCaseSensitive = true;
+ langDef.mAutoIndentation = true;
+
+ langDef.mName = "C++";
+
+ inited = true;
+ }
+ return langDef;
+}
+
+const TextEditor::LanguageDefinition& TextEditor::LanguageDefinition::HLSL()
+{
+ static bool inited = false;
+ static LanguageDefinition langDef;
+ if (!inited)
+ {
+ static const char* const keywords[] = {
+ "AppendStructuredBuffer", "asm", "asm_fragment", "BlendState", "bool", "break", "Buffer", "ByteAddressBuffer", "case", "cbuffer", "centroid", "class", "column_major", "compile", "compile_fragment",
+ "CompileShader", "const", "continue", "ComputeShader", "ConsumeStructuredBuffer", "default", "DepthStencilState", "DepthStencilView", "discard", "do", "double", "DomainShader", "dword", "else",
+ "export", "extern", "false", "float", "for", "fxgroup", "GeometryShader", "groupshared", "half", "Hullshader", "if", "in", "inline", "inout", "InputPatch", "int", "interface", "line", "lineadj",
+ "linear", "LineStream", "matrix", "min16float", "min10float", "min16int", "min12int", "min16uint", "namespace", "nointerpolation", "noperspective", "NULL", "out", "OutputPatch", "packoffset",
+ "pass", "pixelfragment", "PixelShader", "point", "PointStream", "precise", "RasterizerState", "RenderTargetView", "return", "register", "row_major", "RWBuffer", "RWByteAddressBuffer", "RWStructuredBuffer",
+ "RWTexture1D", "RWTexture1DArray", "RWTexture2D", "RWTexture2DArray", "RWTexture3D", "sample", "sampler", "SamplerState", "SamplerComparisonState", "shared", "snorm", "stateblock", "stateblock_state",
+ "static", "string", "struct", "switch", "StructuredBuffer", "tbuffer", "technique", "technique10", "technique11", "texture", "Texture1D", "Texture1DArray", "Texture2D", "Texture2DArray", "Texture2DMS",
+ "Texture2DMSArray", "Texture3D", "TextureCube", "TextureCubeArray", "true", "typedef", "triangle", "triangleadj", "TriangleStream", "uint", "uniform", "unorm", "unsigned", "vector", "vertexfragment",
+ "VertexShader", "void", "volatile", "while",
+ "bool1","bool2","bool3","bool4","double1","double2","double3","double4", "float1", "float2", "float3", "float4", "int1", "int2", "int3", "int4", "in", "out", "inout",
+ "uint1", "uint2", "uint3", "uint4", "dword1", "dword2", "dword3", "dword4", "half1", "half2", "half3", "half4",
+ "float1x1","float2x1","float3x1","float4x1","float1x2","float2x2","float3x2","float4x2",
+ "float1x3","float2x3","float3x3","float4x3","float1x4","float2x4","float3x4","float4x4",
+ "half1x1","half2x1","half3x1","half4x1","half1x2","half2x2","half3x2","half4x2",
+ "half1x3","half2x3","half3x3","half4x3","half1x4","half2x4","half3x4","half4x4",
+ };
+ for (auto& k : keywords)
+ langDef.mKeywords.insert(k);
+
+ static const char* const identifiers[] = {
+ "abort", "abs", "acos", "all", "AllMemoryBarrier", "AllMemoryBarrierWithGroupSync", "any", "asdouble", "asfloat", "asin", "asint", "asint", "asuint",
+ "asuint", "atan", "atan2", "ceil", "CheckAccessFullyMapped", "clamp", "clip", "cos", "cosh", "countbits", "cross", "D3DCOLORtoUBYTE4", "ddx",
+ "ddx_coarse", "ddx_fine", "ddy", "ddy_coarse", "ddy_fine", "degrees", "determinant", "DeviceMemoryBarrier", "DeviceMemoryBarrierWithGroupSync",
+ "distance", "dot", "dst", "errorf", "EvaluateAttributeAtCentroid", "EvaluateAttributeAtSample", "EvaluateAttributeSnapped", "exp", "exp2",
+ "f16tof32", "f32tof16", "faceforward", "firstbithigh", "firstbitlow", "floor", "fma", "fmod", "frac", "frexp", "fwidth", "GetRenderTargetSampleCount",
+ "GetRenderTargetSamplePosition", "GroupMemoryBarrier", "GroupMemoryBarrierWithGroupSync", "InterlockedAdd", "InterlockedAnd", "InterlockedCompareExchange",
+ "InterlockedCompareStore", "InterlockedExchange", "InterlockedMax", "InterlockedMin", "InterlockedOr", "InterlockedXor", "isfinite", "isinf", "isnan",
+ "ldexp", "length", "lerp", "lit", "log", "log10", "log2", "mad", "max", "min", "modf", "msad4", "mul", "noise", "normalize", "pow", "printf",
+ "Process2DQuadTessFactorsAvg", "Process2DQuadTessFactorsMax", "Process2DQuadTessFactorsMin", "ProcessIsolineTessFactors", "ProcessQuadTessFactorsAvg",
+ "ProcessQuadTessFactorsMax", "ProcessQuadTessFactorsMin", "ProcessTriTessFactorsAvg", "ProcessTriTessFactorsMax", "ProcessTriTessFactorsMin",
+ "radians", "rcp", "reflect", "refract", "reversebits", "round", "rsqrt", "saturate", "sign", "sin", "sincos", "sinh", "smoothstep", "sqrt", "step",
+ "tan", "tanh", "tex1D", "tex1D", "tex1Dbias", "tex1Dgrad", "tex1Dlod", "tex1Dproj", "tex2D", "tex2D", "tex2Dbias", "tex2Dgrad", "tex2Dlod", "tex2Dproj",
+ "tex3D", "tex3D", "tex3Dbias", "tex3Dgrad", "tex3Dlod", "tex3Dproj", "texCUBE", "texCUBE", "texCUBEbias", "texCUBEgrad", "texCUBElod", "texCUBEproj", "transpose", "trunc"
+ };
+ for (auto& k : identifiers)
+ {
+ Identifier id;
+ id.mDeclaration = "Built-in function";
+ langDef.mIdentifiers.insert(std::make_pair(std::string(k), id));
+ }
+
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[ \\t]*#[ \\t]*[a-zA-Z_]+", PaletteIndex::Preprocessor));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("L?\\\"(\\\\.|[^\\\"])*\\\"", PaletteIndex::String));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("\\'\\\\?[^\\']\\'", PaletteIndex::CharLiteral));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?([0-9]+([.][0-9]*)?|[.][0-9]+)([eE][+-]?[0-9]+)?[fF]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?[0-9]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[0-7]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[xX][0-9a-fA-F]+[uU]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[a-zA-Z_][a-zA-Z0-9_]*", PaletteIndex::Identifier));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[\\[\\]\\{\\}\\!\\%\\^\\&\\*\\(\\)\\-\\+\\=\\~\\|\\<\\>\\?\\/\\;\\,\\.]", PaletteIndex::Punctuation));
+
+ langDef.mCommentStart = "/*";
+ langDef.mCommentEnd = "*/";
+ langDef.mSingleLineComment = "//";
+
+ langDef.mCaseSensitive = true;
+ langDef.mAutoIndentation = true;
+
+ langDef.mName = "HLSL";
+
+ inited = true;
+ }
+ return langDef;
+}
+
+const TextEditor::LanguageDefinition& TextEditor::LanguageDefinition::GLSL()
+{
+ static bool inited = false;
+ static LanguageDefinition langDef;
+ if (!inited)
+ {
+ static const char* const keywords[] = {
+ "auto", "break", "case", "char", "const", "continue", "default", "do", "double", "else", "enum", "extern", "float", "for", "goto", "if", "inline", "int", "long", "register", "restrict", "return", "short",
+ "signed", "sizeof", "static", "struct", "switch", "typedef", "union", "unsigned", "void", "volatile", "while", "_Alignas", "_Alignof", "_Atomic", "_Bool", "_Complex", "_Generic", "_Imaginary",
+ "_Noreturn", "_Static_assert", "_Thread_local"
+ };
+ for (auto& k : keywords)
+ langDef.mKeywords.insert(k);
+
+ static const char* const identifiers[] = {
+ "abort", "abs", "acos", "asin", "atan", "atexit", "atof", "atoi", "atol", "ceil", "clock", "cosh", "ctime", "div", "exit", "fabs", "floor", "fmod", "getchar", "getenv", "isalnum", "isalpha", "isdigit", "isgraph",
+ "ispunct", "isspace", "isupper", "kbhit", "log10", "log2", "log", "memcmp", "modf", "pow", "putchar", "putenv", "puts", "rand", "remove", "rename", "sinh", "sqrt", "srand", "strcat", "strcmp", "strerror", "time", "tolower", "toupper"
+ };
+ for (auto& k : identifiers)
+ {
+ Identifier id;
+ id.mDeclaration = "Built-in function";
+ langDef.mIdentifiers.insert(std::make_pair(std::string(k), id));
+ }
+
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[ \\t]*#[ \\t]*[a-zA-Z_]+", PaletteIndex::Preprocessor));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("L?\\\"(\\\\.|[^\\\"])*\\\"", PaletteIndex::String));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("\\'\\\\?[^\\']\\'", PaletteIndex::CharLiteral));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?([0-9]+([.][0-9]*)?|[.][0-9]+)([eE][+-]?[0-9]+)?[fF]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?[0-9]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[0-7]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[xX][0-9a-fA-F]+[uU]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[a-zA-Z_][a-zA-Z0-9_]*", PaletteIndex::Identifier));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[\\[\\]\\{\\}\\!\\%\\^\\&\\*\\(\\)\\-\\+\\=\\~\\|\\<\\>\\?\\/\\;\\,\\.]", PaletteIndex::Punctuation));
+
+ langDef.mCommentStart = "/*";
+ langDef.mCommentEnd = "*/";
+ langDef.mSingleLineComment = "//";
+
+ langDef.mCaseSensitive = true;
+ langDef.mAutoIndentation = true;
+
+ langDef.mName = "GLSL";
+
+ inited = true;
+ }
+ return langDef;
+}
+
+const TextEditor::LanguageDefinition& TextEditor::LanguageDefinition::C()
+{
+ static bool inited = false;
+ static LanguageDefinition langDef;
+ if (!inited)
+ {
+ static const char* const keywords[] = {
+ "auto", "break", "case", "char", "const", "continue", "default", "do", "double", "else", "enum", "extern", "float", "for", "goto", "if", "inline", "int", "long", "register", "restrict", "return", "short",
+ "signed", "sizeof", "static", "struct", "switch", "typedef", "union", "unsigned", "void", "volatile", "while", "_Alignas", "_Alignof", "_Atomic", "_Bool", "_Complex", "_Generic", "_Imaginary",
+ "_Noreturn", "_Static_assert", "_Thread_local"
+ };
+ for (auto& k : keywords)
+ langDef.mKeywords.insert(k);
+
+ static const char* const identifiers[] = {
+ "abort", "abs", "acos", "asin", "atan", "atexit", "atof", "atoi", "atol", "ceil", "clock", "cosh", "ctime", "div", "exit", "fabs", "floor", "fmod", "getchar", "getenv", "isalnum", "isalpha", "isdigit", "isgraph",
+ "ispunct", "isspace", "isupper", "kbhit", "log10", "log2", "log", "memcmp", "modf", "pow", "putchar", "putenv", "puts", "rand", "remove", "rename", "sinh", "sqrt", "srand", "strcat", "strcmp", "strerror", "time", "tolower", "toupper"
+ };
+ for (auto& k : identifiers)
+ {
+ Identifier id;
+ id.mDeclaration = "Built-in function";
+ langDef.mIdentifiers.insert(std::make_pair(std::string(k), id));
+ }
+
+ langDef.mTokenize = [](const char * in_begin, const char * in_end, const char *& out_begin, const char *& out_end, PaletteIndex & paletteIndex) -> bool
+ {
+ paletteIndex = PaletteIndex::Max;
+
+ while (in_begin < in_end && isascii(*in_begin) && isblank(*in_begin))
+ in_begin++;
+
+ if (in_begin == in_end)
+ {
+ out_begin = in_end;
+ out_end = in_end;
+ paletteIndex = PaletteIndex::Default;
+ }
+ else if (TokenizeCStyleString(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::String;
+ else if (TokenizeCStyleCharacterLiteral(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::CharLiteral;
+ else if (TokenizeCStyleIdentifier(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::Identifier;
+ else if (TokenizeCStyleNumber(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::Number;
+ else if (TokenizeCStylePunctuation(in_begin, in_end, out_begin, out_end))
+ paletteIndex = PaletteIndex::Punctuation;
+
+ return paletteIndex != PaletteIndex::Max;
+ };
+
+ langDef.mCommentStart = "/*";
+ langDef.mCommentEnd = "*/";
+ langDef.mSingleLineComment = "//";
+
+ langDef.mCaseSensitive = true;
+ langDef.mAutoIndentation = true;
+
+ langDef.mName = "C";
+
+ inited = true;
+ }
+ return langDef;
+}
+
+const TextEditor::LanguageDefinition& TextEditor::LanguageDefinition::SQL()
+{
+ static bool inited = false;
+ static LanguageDefinition langDef;
+ if (!inited)
+ {
+ static const char* const keywords[] = {
+ "ADD", "EXCEPT", "PERCENT", "ALL", "EXEC", "PLAN", "ALTER", "EXECUTE", "PRECISION", "AND", "EXISTS", "PRIMARY", "ANY", "EXIT", "PRINT", "AS", "FETCH", "PROC", "ASC", "FILE", "PROCEDURE",
+ "AUTHORIZATION", "FILLFACTOR", "PUBLIC", "BACKUP", "FOR", "RAISERROR", "BEGIN", "FOREIGN", "READ", "BETWEEN", "FREETEXT", "READTEXT", "BREAK", "FREETEXTTABLE", "RECONFIGURE",
+ "BROWSE", "FROM", "REFERENCES", "BULK", "FULL", "REPLICATION", "BY", "FUNCTION", "RESTORE", "CASCADE", "GOTO", "RESTRICT", "CASE", "GRANT", "RETURN", "CHECK", "GROUP", "REVOKE",
+ "CHECKPOINT", "HAVING", "RIGHT", "CLOSE", "HOLDLOCK", "ROLLBACK", "CLUSTERED", "IDENTITY", "ROWCOUNT", "COALESCE", "IDENTITY_INSERT", "ROWGUIDCOL", "COLLATE", "IDENTITYCOL", "RULE",
+ "COLUMN", "IF", "SAVE", "COMMIT", "IN", "SCHEMA", "COMPUTE", "INDEX", "SELECT", "CONSTRAINT", "INNER", "SESSION_USER", "CONTAINS", "INSERT", "SET", "CONTAINSTABLE", "INTERSECT", "SETUSER",
+ "CONTINUE", "INTO", "SHUTDOWN", "CONVERT", "IS", "SOME", "CREATE", "JOIN", "STATISTICS", "CROSS", "KEY", "SYSTEM_USER", "CURRENT", "KILL", "TABLE", "CURRENT_DATE", "LEFT", "TEXTSIZE",
+ "CURRENT_TIME", "LIKE", "THEN", "CURRENT_TIMESTAMP", "LINENO", "TO", "CURRENT_USER", "LOAD", "TOP", "CURSOR", "NATIONAL", "TRAN", "DATABASE", "NOCHECK", "TRANSACTION",
+ "DBCC", "NONCLUSTERED", "TRIGGER", "DEALLOCATE", "NOT", "TRUNCATE", "DECLARE", "NULL", "TSEQUAL", "DEFAULT", "NULLIF", "UNION", "DELETE", "OF", "UNIQUE", "DENY", "OFF", "UPDATE",
+ "DESC", "OFFSETS", "UPDATETEXT", "DISK", "ON", "USE", "DISTINCT", "OPEN", "USER", "DISTRIBUTED", "OPENDATASOURCE", "VALUES", "DOUBLE", "OPENQUERY", "VARYING","DROP", "OPENROWSET", "VIEW",
+ "DUMMY", "OPENXML", "WAITFOR", "DUMP", "OPTION", "WHEN", "ELSE", "OR", "WHERE", "END", "ORDER", "WHILE", "ERRLVL", "OUTER", "WITH", "ESCAPE", "OVER", "WRITETEXT"
+ };
+
+ for (auto& k : keywords)
+ langDef.mKeywords.insert(k);
+
+ static const char* const identifiers[] = {
+ "ABS", "ACOS", "ADD_MONTHS", "ASCII", "ASCIISTR", "ASIN", "ATAN", "ATAN2", "AVG", "BFILENAME", "BIN_TO_NUM", "BITAND", "CARDINALITY", "CASE", "CAST", "CEIL",
+ "CHARTOROWID", "CHR", "COALESCE", "COMPOSE", "CONCAT", "CONVERT", "CORR", "COS", "COSH", "COUNT", "COVAR_POP", "COVAR_SAMP", "CUME_DIST", "CURRENT_DATE",
+ "CURRENT_TIMESTAMP", "DBTIMEZONE", "DECODE", "DECOMPOSE", "DENSE_RANK", "DUMP", "EMPTY_BLOB", "EMPTY_CLOB", "EXP", "EXTRACT", "FIRST_VALUE", "FLOOR", "FROM_TZ", "GREATEST",
+ "GROUP_ID", "HEXTORAW", "INITCAP", "INSTR", "INSTR2", "INSTR4", "INSTRB", "INSTRC", "LAG", "LAST_DAY", "LAST_VALUE", "LEAD", "LEAST", "LENGTH", "LENGTH2", "LENGTH4",
+ "LENGTHB", "LENGTHC", "LISTAGG", "LN", "LNNVL", "LOCALTIMESTAMP", "LOG", "LOWER", "LPAD", "LTRIM", "MAX", "MEDIAN", "MIN", "MOD", "MONTHS_BETWEEN", "NANVL", "NCHR",
+ "NEW_TIME", "NEXT_DAY", "NTH_VALUE", "NULLIF", "NUMTODSINTERVAL", "NUMTOYMINTERVAL", "NVL", "NVL2", "POWER", "RANK", "RAWTOHEX", "REGEXP_COUNT", "REGEXP_INSTR",
+ "REGEXP_REPLACE", "REGEXP_SUBSTR", "REMAINDER", "REPLACE", "ROUND", "ROWNUM", "RPAD", "RTRIM", "SESSIONTIMEZONE", "SIGN", "SIN", "SINH",
+ "SOUNDEX", "SQRT", "STDDEV", "SUBSTR", "SUM", "SYS_CONTEXT", "SYSDATE", "SYSTIMESTAMP", "TAN", "TANH", "TO_CHAR", "TO_CLOB", "TO_DATE", "TO_DSINTERVAL", "TO_LOB",
+ "TO_MULTI_BYTE", "TO_NCLOB", "TO_NUMBER", "TO_SINGLE_BYTE", "TO_TIMESTAMP", "TO_TIMESTAMP_TZ", "TO_YMINTERVAL", "TRANSLATE", "TRIM", "TRUNC", "TZ_OFFSET", "UID", "UPPER",
+ "USER", "USERENV", "VAR_POP", "VAR_SAMP", "VARIANCE", "VSIZE "
+ };
+ for (auto& k : identifiers)
+ {
+ Identifier id;
+ id.mDeclaration = "Built-in function";
+ langDef.mIdentifiers.insert(std::make_pair(std::string(k), id));
+ }
+
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("L?\\\"(\\\\.|[^\\\"])*\\\"", PaletteIndex::String));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("\\\'[^\\\']*\\\'", PaletteIndex::String));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?([0-9]+([.][0-9]*)?|[.][0-9]+)([eE][+-]?[0-9]+)?[fF]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?[0-9]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[0-7]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[xX][0-9a-fA-F]+[uU]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[a-zA-Z_][a-zA-Z0-9_]*", PaletteIndex::Identifier));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[\\[\\]\\{\\}\\!\\%\\^\\&\\*\\(\\)\\-\\+\\=\\~\\|\\<\\>\\?\\/\\;\\,\\.]", PaletteIndex::Punctuation));
+
+ langDef.mCommentStart = "/*";
+ langDef.mCommentEnd = "*/";
+ langDef.mSingleLineComment = "//";
+
+ langDef.mCaseSensitive = false;
+ langDef.mAutoIndentation = false;
+
+ langDef.mName = "SQL";
+
+ inited = true;
+ }
+ return langDef;
+}
+
+const TextEditor::LanguageDefinition& TextEditor::LanguageDefinition::AngelScript()
+{
+ static bool inited = false;
+ static LanguageDefinition langDef;
+ if (!inited)
+ {
+ static const char* const keywords[] = {
+ "and", "abstract", "auto", "bool", "break", "case", "cast", "class", "const", "continue", "default", "do", "double", "else", "enum", "false", "final", "float", "for",
+ "from", "funcdef", "function", "get", "if", "import", "in", "inout", "int", "interface", "int8", "int16", "int32", "int64", "is", "mixin", "namespace", "not",
+ "null", "or", "out", "override", "private", "protected", "return", "set", "shared", "super", "switch", "this ", "true", "typedef", "uint", "uint8", "uint16", "uint32",
+ "uint64", "void", "while", "xor"
+ };
+
+ for (auto& k : keywords)
+ langDef.mKeywords.insert(k);
+
+ static const char* const identifiers[] = {
+ "cos", "sin", "tab", "acos", "asin", "atan", "atan2", "cosh", "sinh", "tanh", "log", "log10", "pow", "sqrt", "abs", "ceil", "floor", "fraction", "closeTo", "fpFromIEEE", "fpToIEEE",
+ "complex", "opEquals", "opAddAssign", "opSubAssign", "opMulAssign", "opDivAssign", "opAdd", "opSub", "opMul", "opDiv"
+ };
+ for (auto& k : identifiers)
+ {
+ Identifier id;
+ id.mDeclaration = "Built-in function";
+ langDef.mIdentifiers.insert(std::make_pair(std::string(k), id));
+ }
+
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("L?\\\"(\\\\.|[^\\\"])*\\\"", PaletteIndex::String));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("\\'\\\\?[^\\']\\'", PaletteIndex::String));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?([0-9]+([.][0-9]*)?|[.][0-9]+)([eE][+-]?[0-9]+)?[fF]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?[0-9]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[0-7]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[xX][0-9a-fA-F]+[uU]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[a-zA-Z_][a-zA-Z0-9_]*", PaletteIndex::Identifier));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[\\[\\]\\{\\}\\!\\%\\^\\&\\*\\(\\)\\-\\+\\=\\~\\|\\<\\>\\?\\/\\;\\,\\.]", PaletteIndex::Punctuation));
+
+ langDef.mCommentStart = "/*";
+ langDef.mCommentEnd = "*/";
+ langDef.mSingleLineComment = "//";
+
+ langDef.mCaseSensitive = true;
+ langDef.mAutoIndentation = true;
+
+ langDef.mName = "AngelScript";
+
+ inited = true;
+ }
+ return langDef;
+}
+
+const TextEditor::LanguageDefinition& TextEditor::LanguageDefinition::Lua()
+{
+ static bool inited = false;
+ static LanguageDefinition langDef;
+ if (!inited)
+ {
+ static const char* const keywords[] = {
+ "and", "break", "do", "", "else", "elseif", "end", "false", "for", "function", "if", "in", "", "local", "nil", "not", "or", "repeat", "return", "then", "true", "until", "while"
+ };
+
+ for (auto& k : keywords)
+ langDef.mKeywords.insert(k);
+
+ static const char* const identifiers[] = {
+ "assert", "collectgarbage", "dofile", "error", "getmetatable", "ipairs", "loadfile", "load", "loadstring", "next", "pairs", "pcall", "print", "rawequal", "rawlen", "rawget", "rawset",
+ "select", "setmetatable", "tonumber", "tostring", "type", "xpcall", "_G", "_VERSION","arshift", "band", "bnot", "bor", "bxor", "btest", "extract", "lrotate", "lshift", "replace",
+ "rrotate", "rshift", "create", "resume", "running", "status", "wrap", "yield", "isyieldable", "debug","getuservalue", "gethook", "getinfo", "getlocal", "getregistry", "getmetatable",
+ "getupvalue", "upvaluejoin", "upvalueid", "setuservalue", "sethook", "setlocal", "setmetatable", "setupvalue", "traceback", "close", "flush", "input", "lines", "open", "output", "popen",
+ "read", "tmpfile", "type", "write", "close", "flush", "lines", "read", "seek", "setvbuf", "write", "__gc", "__tostring", "abs", "acos", "asin", "atan", "ceil", "cos", "deg", "exp", "tointeger",
+ "floor", "fmod", "ult", "log", "max", "min", "modf", "rad", "random", "randomseed", "sin", "sqrt", "string", "tan", "type", "atan2", "cosh", "sinh", "tanh",
+ "pow", "frexp", "ldexp", "log10", "pi", "huge", "maxinteger", "mininteger", "loadlib", "searchpath", "seeall", "preload", "cpath", "path", "searchers", "loaded", "module", "require", "clock",
+ "date", "difftime", "execute", "exit", "getenv", "remove", "rename", "setlocale", "time", "tmpname", "byte", "char", "dump", "find", "format", "gmatch", "gsub", "len", "lower", "match", "rep",
+ "reverse", "sub", "upper", "pack", "packsize", "unpack", "concat", "maxn", "insert", "pack", "unpack", "remove", "move", "sort", "offset", "codepoint", "char", "len", "codes", "charpattern",
+ "coroutine", "table", "io", "os", "string", "utf8", "bit32", "math", "debug", "package"
+ };
+ for (auto& k : identifiers)
+ {
+ Identifier id;
+ id.mDeclaration = "Built-in function";
+ langDef.mIdentifiers.insert(std::make_pair(std::string(k), id));
+ }
+
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("L?\\\"(\\\\.|[^\\\"])*\\\"", PaletteIndex::String));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("\\\'[^\\\']*\\\'", PaletteIndex::String));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("0[xX][0-9a-fA-F]+[uU]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?([0-9]+([.][0-9]*)?|[.][0-9]+)([eE][+-]?[0-9]+)?[fF]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[+-]?[0-9]+[Uu]?[lL]?[lL]?", PaletteIndex::Number));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[a-zA-Z_][a-zA-Z0-9_]*", PaletteIndex::Identifier));
+ langDef.mTokenRegexStrings.push_back(std::make_pair<std::string, PaletteIndex>("[\\[\\]\\{\\}\\!\\%\\^\\&\\*\\(\\)\\-\\+\\=\\~\\|\\<\\>\\?\\/\\;\\,\\.]", PaletteIndex::Punctuation));
+
+ langDef.mCommentStart = "--[[";
+ langDef.mCommentEnd = "]]";
+ langDef.mSingleLineComment = "--";
+
+ langDef.mCaseSensitive = true;
+ langDef.mAutoIndentation = false;
+
+ langDef.mName = "Lua";
+
+ inited = true;
+ }
+ return langDef;
+}
diff --git a/3rdparty/imguicolortextedit/TextEditor.h b/3rdparty/imguicolortextedit/TextEditor.h
new file mode 100644
index 0000000..bd52e13
--- /dev/null
+++ b/3rdparty/imguicolortextedit/TextEditor.h
@@ -0,0 +1,389 @@
+#pragma once
+
+#include <string>
+#include <vector>
+#include <array>
+#include <memory>
+#include <unordered_set>
+#include <unordered_map>
+#include <map>
+#include <regex>
+#include "imgui.h"
+
+class TextEditor
+{
+public:
+ enum class PaletteIndex
+ {
+ Default,
+ Keyword,
+ Number,
+ String,
+ CharLiteral,
+ Punctuation,
+ Preprocessor,
+ Identifier,
+ KnownIdentifier,
+ PreprocIdentifier,
+ Comment,
+ MultiLineComment,
+ Background,
+ Cursor,
+ Selection,
+ ErrorMarker,
+ Breakpoint,
+ LineNumber,
+ CurrentLineFill,
+ CurrentLineFillInactive,
+ CurrentLineEdge,
+ Max
+ };
+
+ enum class SelectionMode
+ {
+ Normal,
+ Word,
+ Line
+ };
+
+ struct Breakpoint
+ {
+ int mLine;
+ bool mEnabled;
+ std::string mCondition;
+
+ Breakpoint()
+ : mLine(-1)
+ , mEnabled(false)
+ {}
+ };
+
+ // Represents a character coordinate from the user's point of view,
+ // i. e. consider an uniform grid (assuming fixed-width font) on the
+ // screen as it is rendered, and each cell has its own coordinate, starting from 0.
+ // Tabs are counted as [1..mTabSize] count empty spaces, depending on
+ // how many space is necessary to reach the next tab stop.
+ // For example, coordinate (1, 5) represents the character 'B' in a line "\tABC", when mTabSize = 4,
+ // because it is rendered as " ABC" on the screen.
+ struct Coordinates
+ {
+ int mLine, mColumn;
+ Coordinates() : mLine(0), mColumn(0) {}
+ Coordinates(int aLine, int aColumn) : mLine(aLine), mColumn(aColumn)
+ {
+ assert(aLine >= 0);
+ assert(aColumn >= 0);
+ }
+ static Coordinates Invalid() { static Coordinates invalid(-1, -1); return invalid; }
+
+ bool operator ==(const Coordinates& o) const
+ {
+ return
+ mLine == o.mLine &&
+ mColumn == o.mColumn;
+ }
+
+ bool operator !=(const Coordinates& o) const
+ {
+ return
+ mLine != o.mLine ||
+ mColumn != o.mColumn;
+ }
+
+ bool operator <(const Coordinates& o) const
+ {
+ if (mLine != o.mLine)
+ return mLine < o.mLine;
+ return mColumn < o.mColumn;
+ }
+
+ bool operator >(const Coordinates& o) const
+ {
+ if (mLine != o.mLine)
+ return mLine > o.mLine;
+ return mColumn > o.mColumn;
+ }
+
+ bool operator <=(const Coordinates& o) const
+ {
+ if (mLine != o.mLine)
+ return mLine < o.mLine;
+ return mColumn <= o.mColumn;
+ }
+
+ bool operator >=(const Coordinates& o) const
+ {
+ if (mLine != o.mLine)
+ return mLine > o.mLine;
+ return mColumn >= o.mColumn;
+ }
+ };
+
+ struct Identifier
+ {
+ Coordinates mLocation;
+ std::string mDeclaration;
+ };
+
+ typedef std::string String;
+ typedef std::unordered_map<std::string, Identifier> Identifiers;
+ typedef std::unordered_set<std::string> Keywords;
+ typedef std::map<int, std::string> ErrorMarkers;
+ typedef std::unordered_set<int> Breakpoints;
+ typedef std::array<ImU32, (unsigned)PaletteIndex::Max> Palette;
+ typedef uint8_t Char;
+
+ struct Glyph
+ {
+ Char mChar;
+ PaletteIndex mColorIndex = PaletteIndex::Default;
+ bool mComment : 1;
+ bool mMultiLineComment : 1;
+ bool mPreprocessor : 1;
+
+ Glyph(Char aChar, PaletteIndex aColorIndex) : mChar(aChar), mColorIndex(aColorIndex),
+ mComment(false), mMultiLineComment(false), mPreprocessor(false) {}
+ };
+
+ typedef std::vector<Glyph> Line;
+ typedef std::vector<Line> Lines;
+
+ struct LanguageDefinition
+ {
+ typedef std::pair<std::string, PaletteIndex> TokenRegexString;
+ typedef std::vector<TokenRegexString> TokenRegexStrings;
+ typedef bool(*TokenizeCallback)(const char * in_begin, const char * in_end, const char *& out_begin, const char *& out_end, PaletteIndex & paletteIndex);
+
+ std::string mName;
+ Keywords mKeywords;
+ Identifiers mIdentifiers;
+ Identifiers mPreprocIdentifiers;
+ std::string mCommentStart, mCommentEnd, mSingleLineComment;
+ char mPreprocChar;
+ bool mAutoIndentation;
+
+ TokenizeCallback mTokenize;
+
+ TokenRegexStrings mTokenRegexStrings;
+
+ bool mCaseSensitive;
+
+ LanguageDefinition()
+ : mPreprocChar('#'), mAutoIndentation(true), mTokenize(nullptr), mCaseSensitive(true)
+ {
+ }
+
+ static const LanguageDefinition& CPlusPlus();
+ static const LanguageDefinition& HLSL();
+ static const LanguageDefinition& GLSL();
+ static const LanguageDefinition& C();
+ static const LanguageDefinition& SQL();
+ static const LanguageDefinition& AngelScript();
+ static const LanguageDefinition& Lua();
+ };
+
+ TextEditor();
+ ~TextEditor();
+
+ void SetLanguageDefinition(const LanguageDefinition& aLanguageDef);
+ const LanguageDefinition& GetLanguageDefinition() const { return mLanguageDefinition; }
+
+ const Palette& GetPalette() const { return mPaletteBase; }
+ void SetPalette(const Palette& aValue);
+
+ void SetErrorMarkers(const ErrorMarkers& aMarkers) { mErrorMarkers = aMarkers; }
+ void SetBreakpoints(const Breakpoints& aMarkers) { mBreakpoints = aMarkers; }
+
+ void Render(const char* aTitle, const ImVec2& aSize = ImVec2(), bool aBorder = false);
+ void SetText(const std::string& aText);
+ std::string GetText() const;
+
+ void SetTextLines(const std::vector<std::string>& aLines);
+ std::vector<std::string> GetTextLines() const;
+
+ std::string GetSelectedText() const;
+ std::string GetCurrentLineText()const;
+
+ int GetTotalLines() const { return (int)mLines.size(); }
+ bool IsOverwrite() const { return mOverwrite; }
+
+ void SetReadOnly(bool aValue);
+ bool IsReadOnly() const { return mReadOnly; }
+ bool IsTextChanged() const { return mTextChanged; }
+ bool IsCursorPositionChanged() const { return mCursorPositionChanged; }
+
+ bool IsColorizerEnabled() const { return mColorizerEnabled; }
+ void SetColorizerEnable(bool aValue);
+
+ Coordinates GetCursorPosition() const { return GetActualCursorCoordinates(); }
+ void SetCursorPosition(const Coordinates& aPosition);
+
+ inline void SetHandleMouseInputs (bool aValue){ mHandleMouseInputs = aValue;}
+ inline bool IsHandleMouseInputsEnabled() const { return mHandleKeyboardInputs; }
+
+ inline void SetHandleKeyboardInputs (bool aValue){ mHandleKeyboardInputs = aValue;}
+ inline bool IsHandleKeyboardInputsEnabled() const { return mHandleKeyboardInputs; }
+
+ inline void SetImGuiChildIgnored (bool aValue){ mIgnoreImGuiChild = aValue;}
+ inline bool IsImGuiChildIgnored() const { return mIgnoreImGuiChild; }
+
+ inline void SetShowWhitespaces(bool aValue) { mShowWhitespaces = aValue; }
+ inline bool IsShowingWhitespaces() const { return mShowWhitespaces; }
+
+ void SetTabSize(int aValue);
+ inline int GetTabSize() const { return mTabSize; }
+
+ void InsertText(const std::string& aValue);
+ void InsertText(const char* aValue);
+
+ void MoveUp(int aAmount = 1, bool aSelect = false);
+ void MoveDown(int aAmount = 1, bool aSelect = false);
+ void MoveLeft(int aAmount = 1, bool aSelect = false, bool aWordMode = false);
+ void MoveRight(int aAmount = 1, bool aSelect = false, bool aWordMode = false);
+ void MoveTop(bool aSelect = false);
+ void MoveBottom(bool aSelect = false);
+ void MoveHome(bool aSelect = false);
+ void MoveEnd(bool aSelect = false);
+
+ void SetSelectionStart(const Coordinates& aPosition);
+ void SetSelectionEnd(const Coordinates& aPosition);
+ void SetSelection(const Coordinates& aStart, const Coordinates& aEnd, SelectionMode aMode = SelectionMode::Normal);
+ void SelectWordUnderCursor();
+ void SelectAll();
+ bool HasSelection() const;
+
+ void Copy();
+ void Cut();
+ void Paste();
+ void Delete();
+
+ bool CanUndo() const;
+ bool CanRedo() const;
+ void Undo(int aSteps = 1);
+ void Redo(int aSteps = 1);
+
+ static const Palette& GetDarkPalette();
+ static const Palette& GetLightPalette();
+ static const Palette& GetRetroBluePalette();
+
+private:
+ typedef std::vector<std::pair<std::regex, PaletteIndex>> RegexList;
+
+ struct EditorState
+ {
+ Coordinates mSelectionStart;
+ Coordinates mSelectionEnd;
+ Coordinates mCursorPosition;
+ };
+
+ class UndoRecord
+ {
+ public:
+ UndoRecord() {}
+ ~UndoRecord() {}
+
+ UndoRecord(
+ const std::string& aAdded,
+ const TextEditor::Coordinates aAddedStart,
+ const TextEditor::Coordinates aAddedEnd,
+
+ const std::string& aRemoved,
+ const TextEditor::Coordinates aRemovedStart,
+ const TextEditor::Coordinates aRemovedEnd,
+
+ TextEditor::EditorState& aBefore,
+ TextEditor::EditorState& aAfter);
+
+ void Undo(TextEditor* aEditor);
+ void Redo(TextEditor* aEditor);
+
+ std::string mAdded;
+ Coordinates mAddedStart;
+ Coordinates mAddedEnd;
+
+ std::string mRemoved;
+ Coordinates mRemovedStart;
+ Coordinates mRemovedEnd;
+
+ EditorState mBefore;
+ EditorState mAfter;
+ };
+
+ typedef std::vector<UndoRecord> UndoBuffer;
+
+ void ProcessInputs();
+ void Colorize(int aFromLine = 0, int aCount = -1);
+ void ColorizeRange(int aFromLine = 0, int aToLine = 0);
+ void ColorizeInternal();
+ float TextDistanceToLineStart(const Coordinates& aFrom) const;
+ void EnsureCursorVisible();
+ int GetPageSize() const;
+ std::string GetText(const Coordinates& aStart, const Coordinates& aEnd) const;
+ Coordinates GetActualCursorCoordinates() const;
+ Coordinates SanitizeCoordinates(const Coordinates& aValue) const;
+ void Advance(Coordinates& aCoordinates) const;
+ void DeleteRange(const Coordinates& aStart, const Coordinates& aEnd);
+ int InsertTextAt(Coordinates& aWhere, const char* aValue);
+ void AddUndo(UndoRecord& aValue);
+ Coordinates ScreenPosToCoordinates(const ImVec2& aPosition) const;
+ Coordinates FindWordStart(const Coordinates& aFrom) const;
+ Coordinates FindWordEnd(const Coordinates& aFrom) const;
+ Coordinates FindNextWord(const Coordinates& aFrom) const;
+ int GetCharacterIndex(const Coordinates& aCoordinates) const;
+ int GetCharacterColumn(int aLine, int aIndex) const;
+ int GetLineCharacterCount(int aLine) const;
+ int GetLineMaxColumn(int aLine) const;
+ bool IsOnWordBoundary(const Coordinates& aAt) const;
+ void RemoveLine(int aStart, int aEnd);
+ void RemoveLine(int aIndex);
+ Line& InsertLine(int aIndex);
+ void EnterCharacter(ImWchar aChar, bool aShift);
+ void Backspace();
+ void DeleteSelection();
+ std::string GetWordUnderCursor() const;
+ std::string GetWordAt(const Coordinates& aCoords) const;
+ ImU32 GetGlyphColor(const Glyph& aGlyph) const;
+
+ void HandleKeyboardInputs();
+ void HandleMouseInputs();
+ void Render();
+
+ float mLineSpacing;
+ Lines mLines;
+ EditorState mState;
+ UndoBuffer mUndoBuffer;
+ int mUndoIndex;
+
+ int mTabSize;
+ bool mOverwrite;
+ bool mReadOnly;
+ bool mWithinRender;
+ bool mScrollToCursor;
+ bool mScrollToTop;
+ bool mTextChanged;
+ bool mColorizerEnabled;
+ float mTextStart; // position (in pixels) where a code line starts relative to the left of the TextEditor.
+ int mLeftMargin;
+ bool mCursorPositionChanged;
+ int mColorRangeMin, mColorRangeMax;
+ SelectionMode mSelectionMode;
+ bool mHandleKeyboardInputs;
+ bool mHandleMouseInputs;
+ bool mIgnoreImGuiChild;
+ bool mShowWhitespaces;
+
+ Palette mPaletteBase;
+ Palette mPalette;
+ LanguageDefinition mLanguageDefinition;
+ RegexList mRegexList;
+
+ bool mCheckComments;
+ Breakpoints mBreakpoints;
+ ErrorMarkers mErrorMarkers;
+ ImVec2 mCharAdvance;
+ Coordinates mInteractiveStart, mInteractiveEnd;
+ std::string mLineBuffer;
+ uint64_t mStartTime;
+
+ float mLastClick;
+};
diff --git a/3rdparty/tracy/tracy/Tracy.hpp b/3rdparty/tracy/tracy/Tracy.hpp
new file mode 100644
index 0000000..6d22c7d
--- /dev/null
+++ b/3rdparty/tracy/tracy/Tracy.hpp
@@ -0,0 +1,267 @@
+#ifndef __TRACY_HPP__
+#define __TRACY_HPP__
+
+#include "common/TracyColor.hpp"
+#include "common/TracySystem.hpp"
+
+#ifndef TRACY_ENABLE
+
+#define ZoneNamed(x,y)
+#define ZoneNamedN(x,y,z)
+#define ZoneNamedC(x,y,z)
+#define ZoneNamedNC(x,y,z,w)
+
+#define ZoneTransient(x,y)
+#define ZoneTransientN(x,y,z)
+
+#define ZoneScoped
+#define ZoneScopedN(x)
+#define ZoneScopedC(x)
+#define ZoneScopedNC(x,y)
+
+#define ZoneText(x,y)
+#define ZoneTextV(x,y,z)
+#define ZoneName(x,y)
+#define ZoneNameV(x,y,z)
+#define ZoneColor(x)
+#define ZoneColorV(x,y)
+#define ZoneValue(x)
+#define ZoneValueV(x,y)
+#define ZoneIsActive false
+#define ZoneIsActiveV(x) false
+
+#define FrameMark
+#define FrameMarkNamed(x)
+#define FrameMarkStart(x)
+#define FrameMarkEnd(x)
+
+#define FrameImage(x,y,z,w,a)
+
+#define TracyLockable( type, varname ) type varname;
+#define TracyLockableN( type, varname, desc ) type varname;
+#define TracySharedLockable( type, varname ) type varname;
+#define TracySharedLockableN( type, varname, desc ) type varname;
+#define LockableBase( type ) type
+#define SharedLockableBase( type ) type
+#define LockMark(x) (void)x;
+#define LockableName(x,y,z);
+
+#define TracyPlot(x,y)
+#define TracyPlotConfig(x,y)
+
+#define TracyMessage(x,y)
+#define TracyMessageL(x)
+#define TracyMessageC(x,y,z)
+#define TracyMessageLC(x,y)
+#define TracyAppInfo(x,y)
+
+#define TracyAlloc(x,y)
+#define TracyFree(x)
+#define TracySecureAlloc(x,y)
+#define TracySecureFree(x)
+
+#define TracyAllocN(x,y,z)
+#define TracyFreeN(x,y)
+#define TracySecureAllocN(x,y,z)
+#define TracySecureFreeN(x,y)
+
+#define ZoneNamedS(x,y,z)
+#define ZoneNamedNS(x,y,z,w)
+#define ZoneNamedCS(x,y,z,w)
+#define ZoneNamedNCS(x,y,z,w,a)
+
+#define ZoneTransientS(x,y,z)
+#define ZoneTransientNS(x,y,z,w)
+
+#define ZoneScopedS(x)
+#define ZoneScopedNS(x,y)
+#define ZoneScopedCS(x,y)
+#define ZoneScopedNCS(x,y,z)
+
+#define TracyAllocS(x,y,z)
+#define TracyFreeS(x,y)
+#define TracySecureAllocS(x,y,z)
+#define TracySecureFreeS(x,y)
+
+#define TracyAllocNS(x,y,z,w)
+#define TracyFreeNS(x,y,z)
+#define TracySecureAllocNS(x,y,z,w)
+#define TracySecureFreeNS(x,y,z)
+
+#define TracyMessageS(x,y,z)
+#define TracyMessageLS(x,y)
+#define TracyMessageCS(x,y,z,w)
+#define TracyMessageLCS(x,y,z)
+
+#define TracyParameterRegister(x)
+#define TracyParameterSetup(x,y,z,w)
+#define TracyIsConnected false
+
+#define TracyFiberEnter(x)
+#define TracyFiberLeave
+
+#else
+
+#include <string.h>
+
+#include "client/TracyLock.hpp"
+#include "client/TracyProfiler.hpp"
+#include "client/TracyScoped.hpp"
+
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define ZoneNamed( varname, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { nullptr, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define ZoneNamedN( varname, name, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define ZoneNamedC( varname, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { nullptr, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define ZoneNamedNC( varname, name, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), TRACY_CALLSTACK, active );
+
+# define ZoneTransient( varname, active ) tracy::ScopedZone varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), nullptr, 0, TRACY_CALLSTACK, active );
+# define ZoneTransientN( varname, name, active ) tracy::ScopedZone varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), TRACY_CALLSTACK, active );
+#else
+# define ZoneNamed( varname, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { nullptr, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), active );
+# define ZoneNamedN( varname, name, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), active );
+# define ZoneNamedC( varname, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { nullptr, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), active );
+# define ZoneNamedNC( varname, name, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), active );
+
+# define ZoneTransient( varname, active ) tracy::ScopedZone varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), nullptr, 0, active );
+# define ZoneTransientN( varname, name, active ) tracy::ScopedZone varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), active );
+#endif
+
+#define ZoneScoped ZoneNamed( ___tracy_scoped_zone, true )
+#define ZoneScopedN( name ) ZoneNamedN( ___tracy_scoped_zone, name, true )
+#define ZoneScopedC( color ) ZoneNamedC( ___tracy_scoped_zone, color, true )
+#define ZoneScopedNC( name, color ) ZoneNamedNC( ___tracy_scoped_zone, name, color, true )
+
+#define ZoneText( txt, size ) ___tracy_scoped_zone.Text( txt, size );
+#define ZoneTextV( varname, txt, size ) varname.Text( txt, size );
+#define ZoneName( txt, size ) ___tracy_scoped_zone.Name( txt, size );
+#define ZoneNameV( varname, txt, size ) varname.Name( txt, size );
+#define ZoneColor( color ) ___tracy_scoped_zone.Color( color );
+#define ZoneColorV( varname, color ) varname.Color( color );
+#define ZoneValue( value ) ___tracy_scoped_zone.Value( value );
+#define ZoneValueV( varname, value ) varname.Value( value );
+#define ZoneIsActive ___tracy_scoped_zone.IsActive()
+#define ZoneIsActiveV( varname ) varname.IsActive()
+
+#define FrameMark tracy::Profiler::SendFrameMark( nullptr );
+#define FrameMarkNamed( name ) tracy::Profiler::SendFrameMark( name );
+#define FrameMarkStart( name ) tracy::Profiler::SendFrameMark( name, tracy::QueueType::FrameMarkMsgStart );
+#define FrameMarkEnd( name ) tracy::Profiler::SendFrameMark( name, tracy::QueueType::FrameMarkMsgEnd );
+
+#define FrameImage( image, width, height, offset, flip ) tracy::Profiler::SendFrameImage( image, width, height, offset, flip );
+
+#define TracyLockable( type, varname ) tracy::Lockable<type> varname { [] () -> const tracy::SourceLocationData* { static constexpr tracy::SourceLocationData srcloc { nullptr, #type " " #varname, __FILE__, __LINE__, 0 }; return &srcloc; }() };
+#define TracyLockableN( type, varname, desc ) tracy::Lockable<type> varname { [] () -> const tracy::SourceLocationData* { static constexpr tracy::SourceLocationData srcloc { nullptr, desc, __FILE__, __LINE__, 0 }; return &srcloc; }() };
+#define TracySharedLockable( type, varname ) tracy::SharedLockable<type> varname { [] () -> const tracy::SourceLocationData* { static constexpr tracy::SourceLocationData srcloc { nullptr, #type " " #varname, __FILE__, __LINE__, 0 }; return &srcloc; }() };
+#define TracySharedLockableN( type, varname, desc ) tracy::SharedLockable<type> varname { [] () -> const tracy::SourceLocationData* { static constexpr tracy::SourceLocationData srcloc { nullptr, desc, __FILE__, __LINE__, 0 }; return &srcloc; }() };
+#define LockableBase( type ) tracy::Lockable<type>
+#define SharedLockableBase( type ) tracy::SharedLockable<type>
+#define LockMark( varname ) static constexpr tracy::SourceLocationData __tracy_lock_location_##varname { nullptr, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; varname.Mark( &__tracy_lock_location_##varname );
+#define LockableName( varname, txt, size ) varname.CustomName( txt, size );
+
+#define TracyPlot( name, val ) tracy::Profiler::PlotData( name, val );
+#define TracyPlotConfig( name, type ) tracy::Profiler::ConfigurePlot( name, type );
+
+#define TracyAppInfo( txt, size ) tracy::Profiler::MessageAppInfo( txt, size );
+
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define TracyMessage( txt, size ) tracy::Profiler::Message( txt, size, TRACY_CALLSTACK );
+# define TracyMessageL( txt ) tracy::Profiler::Message( txt, TRACY_CALLSTACK );
+# define TracyMessageC( txt, size, color ) tracy::Profiler::MessageColor( txt, size, color, TRACY_CALLSTACK );
+# define TracyMessageLC( txt, color ) tracy::Profiler::MessageColor( txt, color, TRACY_CALLSTACK );
+
+# define TracyAlloc( ptr, size ) tracy::Profiler::MemAllocCallstack( ptr, size, TRACY_CALLSTACK, false );
+# define TracyFree( ptr ) tracy::Profiler::MemFreeCallstack( ptr, TRACY_CALLSTACK, false );
+# define TracySecureAlloc( ptr, size ) tracy::Profiler::MemAllocCallstack( ptr, size, TRACY_CALLSTACK, true );
+# define TracySecureFree( ptr ) tracy::Profiler::MemFreeCallstack( ptr, TRACY_CALLSTACK, true );
+
+# define TracyAllocN( ptr, size, name ) tracy::Profiler::MemAllocCallstackNamed( ptr, size, TRACY_CALLSTACK, false, name );
+# define TracyFreeN( ptr, name ) tracy::Profiler::MemFreeCallstackNamed( ptr, TRACY_CALLSTACK, false, name );
+# define TracySecureAllocN( ptr, size, name ) tracy::Profiler::MemAllocCallstackNamed( ptr, size, TRACY_CALLSTACK, true, name );
+# define TracySecureFreeN( ptr, name ) tracy::Profiler::MemFreeCallstackNamed( ptr, TRACY_CALLSTACK, true, name );
+#else
+# define TracyMessage( txt, size ) tracy::Profiler::Message( txt, size, 0 );
+# define TracyMessageL( txt ) tracy::Profiler::Message( txt, 0 );
+# define TracyMessageC( txt, size, color ) tracy::Profiler::MessageColor( txt, size, color, 0 );
+# define TracyMessageLC( txt, color ) tracy::Profiler::MessageColor( txt, color, 0 );
+
+# define TracyAlloc( ptr, size ) tracy::Profiler::MemAlloc( ptr, size, false );
+# define TracyFree( ptr ) tracy::Profiler::MemFree( ptr, false );
+# define TracySecureAlloc( ptr, size ) tracy::Profiler::MemAlloc( ptr, size, true );
+# define TracySecureFree( ptr ) tracy::Profiler::MemFree( ptr, true );
+
+# define TracyAllocN( ptr, size, name ) tracy::Profiler::MemAllocNamed( ptr, size, false, name );
+# define TracyFreeN( ptr, name ) tracy::Profiler::MemFreeNamed( ptr, false, name );
+# define TracySecureAllocN( ptr, size, name ) tracy::Profiler::MemAllocNamed( ptr, size, true, name );
+# define TracySecureFreeN( ptr, name ) tracy::Profiler::MemFreeNamed( ptr, true, name );
+#endif
+
+#ifdef TRACY_HAS_CALLSTACK
+# define ZoneNamedS( varname, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { nullptr, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), depth, active );
+# define ZoneNamedNS( varname, name, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), depth, active );
+# define ZoneNamedCS( varname, color, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { nullptr, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), depth, active );
+# define ZoneNamedNCS( varname, name, color, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::ScopedZone varname( &TracyConcat(__tracy_source_location,__LINE__), depth, active );
+
+# define ZoneTransientS( varname, depth, active ) tracy::ScopedZone varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), nullptr, 0, depth, active );
+# define ZoneTransientNS( varname, name, depth, active ) tracy::ScopedZone varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), depth, active );
+
+# define ZoneScopedS( depth ) ZoneNamedS( ___tracy_scoped_zone, depth, true )
+# define ZoneScopedNS( name, depth ) ZoneNamedNS( ___tracy_scoped_zone, name, depth, true )
+# define ZoneScopedCS( color, depth ) ZoneNamedCS( ___tracy_scoped_zone, color, depth, true )
+# define ZoneScopedNCS( name, color, depth ) ZoneNamedNCS( ___tracy_scoped_zone, name, color, depth, true )
+
+# define TracyAllocS( ptr, size, depth ) tracy::Profiler::MemAllocCallstack( ptr, size, depth, false );
+# define TracyFreeS( ptr, depth ) tracy::Profiler::MemFreeCallstack( ptr, depth, false );
+# define TracySecureAllocS( ptr, size, depth ) tracy::Profiler::MemAllocCallstack( ptr, size, depth, true );
+# define TracySecureFreeS( ptr, depth ) tracy::Profiler::MemFreeCallstack( ptr, depth, true );
+
+# define TracyAllocNS( ptr, size, depth, name ) tracy::Profiler::MemAllocCallstackNamed( ptr, size, depth, false, name );
+# define TracyFreeNS( ptr, depth, name ) tracy::Profiler::MemFreeCallstackNamed( ptr, depth, false, name );
+# define TracySecureAllocNS( ptr, size, depth, name ) tracy::Profiler::MemAllocCallstackNamed( ptr, size, depth, true, name );
+# define TracySecureFreeNS( ptr, depth, name ) tracy::Profiler::MemFreeCallstackNamed( ptr, depth, true, name );
+
+# define TracyMessageS( txt, size, depth ) tracy::Profiler::Message( txt, size, depth );
+# define TracyMessageLS( txt, depth ) tracy::Profiler::Message( txt, depth );
+# define TracyMessageCS( txt, size, color, depth ) tracy::Profiler::MessageColor( txt, size, color, depth );
+# define TracyMessageLCS( txt, color, depth ) tracy::Profiler::MessageColor( txt, color, depth );
+#else
+# define ZoneNamedS( varname, depth, active ) ZoneNamed( varname, active )
+# define ZoneNamedNS( varname, name, depth, active ) ZoneNamedN( varname, name, active )
+# define ZoneNamedCS( varname, color, depth, active ) ZoneNamedC( varname, color, active )
+# define ZoneNamedNCS( varname, name, color, depth, active ) ZoneNamedNC( varname, name, color, active )
+
+# define ZoneTransientS( varname, depth, active ) ZoneTransient( varname, active )
+# define ZoneTransientNS( varname, name, depth, active ) ZoneTransientN( varname, name, active )
+
+# define ZoneScopedS( depth ) ZoneScoped
+# define ZoneScopedNS( name, depth ) ZoneScopedN( name )
+# define ZoneScopedCS( color, depth ) ZoneScopedC( color )
+# define ZoneScopedNCS( name, color, depth ) ZoneScopedNC( name, color )
+
+# define TracyAllocS( ptr, size, depth ) TracyAlloc( ptr, size )
+# define TracyFreeS( ptr, depth ) TracyFree( ptr )
+# define TracySecureAllocS( ptr, size, depth ) TracySecureAlloc( ptr, size )
+# define TracySecureFreeS( ptr, depth ) TracySecureFree( ptr )
+
+# define TracyAllocNS( ptr, size, depth, name ) TracyAlloc( ptr, size, name )
+# define TracyFreeNS( ptr, depth, name ) TracyFree( ptr, name )
+# define TracySecureAllocNS( ptr, size, depth, name ) TracySecureAlloc( ptr, size, name )
+# define TracySecureFreeNS( ptr, depth, name ) TracySecureFree( ptr, name )
+
+# define TracyMessageS( txt, size, depth ) TracyMessage( txt, size )
+# define TracyMessageLS( txt, depth ) TracyMessageL( txt )
+# define TracyMessageCS( txt, size, color, depth ) TracyMessageC( txt, size, color )
+# define TracyMessageLCS( txt, color, depth ) TracyMessageLC( txt, color )
+#endif
+
+#define TracyParameterRegister( cb ) tracy::Profiler::ParameterRegister( cb );
+#define TracyParameterSetup( idx, name, isBool, val ) tracy::Profiler::ParameterSetup( idx, name, isBool, val );
+#define TracyIsConnected tracy::GetProfiler().IsConnected()
+
+#ifdef TRACY_FIBERS
+# define TracyFiberEnter( fiber ) tracy::Profiler::EnterFiber( fiber );
+# define TracyFiberLeave tracy::Profiler::LeaveFiber();
+#endif
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/TracyC.h b/3rdparty/tracy/tracy/TracyC.h
new file mode 100644
index 0000000..2461606
--- /dev/null
+++ b/3rdparty/tracy/tracy/TracyC.h
@@ -0,0 +1,320 @@
+#ifndef __TRACYC_HPP__
+#define __TRACYC_HPP__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "client/TracyCallstack.h"
+#include "common/TracyApi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+TRACY_API void ___tracy_set_thread_name( const char* name );
+
+#define TracyCSetThreadName( name ) ___tracy_set_thread_name( name );
+
+
+#ifndef TRACY_ENABLE
+
+typedef const void* TracyCZoneCtx;
+
+#define TracyCZone(c,x)
+#define TracyCZoneN(c,x,y)
+#define TracyCZoneC(c,x,y)
+#define TracyCZoneNC(c,x,y,z)
+#define TracyCZoneEnd(c)
+#define TracyCZoneText(c,x,y)
+#define TracyCZoneName(c,x,y)
+#define TracyCZoneColor(c,x)
+#define TracyCZoneValue(c,x)
+
+#define TracyCAlloc(x,y)
+#define TracyCFree(x)
+#define TracyCSecureAlloc(x,y)
+#define TracyCSecureFree(x)
+
+#define TracyCAllocN(x,y,z)
+#define TracyCFreeN(x,y)
+#define TracyCSecureAllocN(x,y,z)
+#define TracyCSecureFreeN(x,y)
+
+#define TracyCFrameMark
+#define TracyCFrameMarkNamed(x)
+#define TracyCFrameMarkStart(x)
+#define TracyCFrameMarkEnd(x)
+#define TracyCFrameImage(x,y,z,w,a)
+
+#define TracyCPlot(x,y)
+#define TracyCMessage(x,y)
+#define TracyCMessageL(x)
+#define TracyCMessageC(x,y,z)
+#define TracyCMessageLC(x,y)
+#define TracyCAppInfo(x,y)
+
+#define TracyCZoneS(x,y,z)
+#define TracyCZoneNS(x,y,z,w)
+#define TracyCZoneCS(x,y,z,w)
+#define TracyCZoneNCS(x,y,z,w,a)
+
+#define TracyCAllocS(x,y,z)
+#define TracyCFreeS(x,y)
+#define TracyCSecureAllocS(x,y,z)
+#define TracyCSecureFreeS(x,y)
+
+#define TracyCAllocNS(x,y,z,w)
+#define TracyCFreeNS(x,y,z)
+#define TracyCSecureAllocNS(x,y,z,w)
+#define TracyCSecureFreeNS(x,y,z)
+
+#define TracyCMessageS(x,y,z)
+#define TracyCMessageLS(x,y)
+#define TracyCMessageCS(x,y,z,w)
+#define TracyCMessageLCS(x,y,z)
+
+#define TracyCIsConnected 0
+
+#ifdef TRACY_FIBERS
+# define TracyCFiberEnter(fiber)
+# define TracyCFiberLeave
+#endif
+
+#else
+
+#ifndef TracyConcat
+# define TracyConcat(x,y) TracyConcatIndirect(x,y)
+#endif
+#ifndef TracyConcatIndirect
+# define TracyConcatIndirect(x,y) x##y
+#endif
+
+struct ___tracy_source_location_data
+{
+ const char* name;
+ const char* function;
+ const char* file;
+ uint32_t line;
+ uint32_t color;
+};
+
+struct ___tracy_c_zone_context
+{
+ uint32_t id;
+ int active;
+};
+
+struct ___tracy_gpu_time_data
+{
+ int64_t gpuTime;
+ uint16_t queryId;
+ uint8_t context;
+};
+
+struct ___tracy_gpu_zone_begin_data {
+ uint64_t srcloc;
+ uint16_t queryId;
+ uint8_t context;
+};
+
+struct ___tracy_gpu_zone_end_data {
+ uint16_t queryId;
+ uint8_t context;
+};
+
+struct ___tracy_gpu_new_context_data {
+ int64_t gpuTime;
+ float period;
+ uint8_t context;
+ uint8_t flags;
+ uint8_t type;
+};
+
+struct ___tracy_gpu_context_name_data {
+ uint8_t context;
+ const char* name;
+ uint16_t len;
+};
+
+// Some containers don't support storing const types.
+// This struct, as visible to user, is immutable, so treat it as if const was declared here.
+typedef /*const*/ struct ___tracy_c_zone_context TracyCZoneCtx;
+
+
+#ifdef TRACY_MANUAL_LIFETIME
+TRACY_API void ___tracy_startup_profiler(void);
+TRACY_API void ___tracy_shutdown_profiler(void);
+#endif
+
+TRACY_API uint64_t ___tracy_alloc_srcloc( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz );
+TRACY_API uint64_t ___tracy_alloc_srcloc_name( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz );
+
+TRACY_API TracyCZoneCtx ___tracy_emit_zone_begin( const struct ___tracy_source_location_data* srcloc, int active );
+TRACY_API TracyCZoneCtx ___tracy_emit_zone_begin_callstack( const struct ___tracy_source_location_data* srcloc, int depth, int active );
+TRACY_API TracyCZoneCtx ___tracy_emit_zone_begin_alloc( uint64_t srcloc, int active );
+TRACY_API TracyCZoneCtx ___tracy_emit_zone_begin_alloc_callstack( uint64_t srcloc, int depth, int active );
+TRACY_API void ___tracy_emit_zone_end( TracyCZoneCtx ctx );
+TRACY_API void ___tracy_emit_zone_text( TracyCZoneCtx ctx, const char* txt, size_t size );
+TRACY_API void ___tracy_emit_zone_name( TracyCZoneCtx ctx, const char* txt, size_t size );
+TRACY_API void ___tracy_emit_zone_color( TracyCZoneCtx ctx, uint32_t color );
+TRACY_API void ___tracy_emit_zone_value( TracyCZoneCtx ctx, uint64_t value );
+
+TRACY_API void ___tracy_emit_gpu_zone_begin_alloc( const struct ___tracy_gpu_zone_begin_data );
+TRACY_API void ___tracy_emit_gpu_zone_end( const struct ___tracy_gpu_zone_end_data data );
+TRACY_API void ___tracy_emit_gpu_time( const struct ___tracy_gpu_time_data );
+TRACY_API void ___tracy_emit_gpu_new_context( const struct ___tracy_gpu_new_context_data );
+TRACY_API void ___tracy_emit_gpu_context_name( const struct ___tracy_gpu_context_name_data );
+
+TRACY_API void ___tracy_emit_gpu_zone_begin_alloc_serial( const struct ___tracy_gpu_zone_begin_data );
+TRACY_API void ___tracy_emit_gpu_zone_end_serial( const struct ___tracy_gpu_zone_end_data data );
+TRACY_API void ___tracy_emit_gpu_time_serial( const struct ___tracy_gpu_time_data );
+TRACY_API void ___tracy_emit_gpu_new_context_serial( const struct ___tracy_gpu_new_context_data );
+TRACY_API void ___tracy_emit_gpu_context_name_serial( const struct ___tracy_gpu_context_name_data );
+
+TRACY_API int ___tracy_connected(void);
+
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define TracyCZone( ctx, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { NULL, __func__, __FILE__, (uint32_t)__LINE__, 0 }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin_callstack( &TracyConcat(__tracy_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyCZoneN( ctx, name, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, 0 }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin_callstack( &TracyConcat(__tracy_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyCZoneC( ctx, color, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { NULL, __func__, __FILE__, (uint32_t)__LINE__, color }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin_callstack( &TracyConcat(__tracy_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyCZoneNC( ctx, name, color, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, color }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin_callstack( &TracyConcat(__tracy_source_location,__LINE__), TRACY_CALLSTACK, active );
+#else
+# define TracyCZone( ctx, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { NULL, __func__, __FILE__, (uint32_t)__LINE__, 0 }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin( &TracyConcat(__tracy_source_location,__LINE__), active );
+# define TracyCZoneN( ctx, name, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, 0 }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin( &TracyConcat(__tracy_source_location,__LINE__), active );
+# define TracyCZoneC( ctx, color, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { NULL, __func__, __FILE__, (uint32_t)__LINE__, color }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin( &TracyConcat(__tracy_source_location,__LINE__), active );
+# define TracyCZoneNC( ctx, name, color, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, color }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin( &TracyConcat(__tracy_source_location,__LINE__), active );
+#endif
+
+#define TracyCZoneEnd( ctx ) ___tracy_emit_zone_end( ctx );
+
+#define TracyCZoneText( ctx, txt, size ) ___tracy_emit_zone_text( ctx, txt, size );
+#define TracyCZoneName( ctx, txt, size ) ___tracy_emit_zone_name( ctx, txt, size );
+#define TracyCZoneColor( ctx, color ) ___tracy_emit_zone_color( ctx, color );
+#define TracyCZoneValue( ctx, value ) ___tracy_emit_zone_value( ctx, value );
+
+
+TRACY_API void ___tracy_emit_memory_alloc( const void* ptr, size_t size, int secure );
+TRACY_API void ___tracy_emit_memory_alloc_callstack( const void* ptr, size_t size, int depth, int secure );
+TRACY_API void ___tracy_emit_memory_free( const void* ptr, int secure );
+TRACY_API void ___tracy_emit_memory_free_callstack( const void* ptr, int depth, int secure );
+TRACY_API void ___tracy_emit_memory_alloc_named( const void* ptr, size_t size, int secure, const char* name );
+TRACY_API void ___tracy_emit_memory_alloc_callstack_named( const void* ptr, size_t size, int depth, int secure, const char* name );
+TRACY_API void ___tracy_emit_memory_free_named( const void* ptr, int secure, const char* name );
+TRACY_API void ___tracy_emit_memory_free_callstack_named( const void* ptr, int depth, int secure, const char* name );
+
+TRACY_API void ___tracy_emit_message( const char* txt, size_t size, int callstack );
+TRACY_API void ___tracy_emit_messageL( const char* txt, int callstack );
+TRACY_API void ___tracy_emit_messageC( const char* txt, size_t size, uint32_t color, int callstack );
+TRACY_API void ___tracy_emit_messageLC( const char* txt, uint32_t color, int callstack );
+
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define TracyCAlloc( ptr, size ) ___tracy_emit_memory_alloc_callstack( ptr, size, TRACY_CALLSTACK, 0 )
+# define TracyCFree( ptr ) ___tracy_emit_memory_free_callstack( ptr, TRACY_CALLSTACK, 0 )
+# define TracyCSecureAlloc( ptr, size ) ___tracy_emit_memory_alloc_callstack( ptr, size, TRACY_CALLSTACK, 1 )
+# define TracyCSecureFree( ptr ) ___tracy_emit_memory_free_callstack( ptr, TRACY_CALLSTACK, 1 )
+
+# define TracyCAllocN( ptr, size, name ) ___tracy_emit_memory_alloc_callstack_named( ptr, size, TRACY_CALLSTACK, 0, name )
+# define TracyCFreeN( ptr, name ) ___tracy_emit_memory_free_callstack_named( ptr, TRACY_CALLSTACK, 0, name )
+# define TracyCSecureAllocN( ptr, size, name ) ___tracy_emit_memory_alloc_callstack_named( ptr, size, TRACY_CALLSTACK, 1, name )
+# define TracyCSecureFreeN( ptr, name ) ___tracy_emit_memory_free_callstack_named( ptr, TRACY_CALLSTACK, 1, name )
+
+# define TracyCMessage( txt, size ) ___tracy_emit_message( txt, size, TRACY_CALLSTACK );
+# define TracyCMessageL( txt ) ___tracy_emit_messageL( txt, TRACY_CALLSTACK );
+# define TracyCMessageC( txt, size, color ) ___tracy_emit_messageC( txt, size, color, TRACY_CALLSTACK );
+# define TracyCMessageLC( txt, color ) ___tracy_emit_messageLC( txt, color, TRACY_CALLSTACK );
+#else
+# define TracyCAlloc( ptr, size ) ___tracy_emit_memory_alloc( ptr, size, 0 );
+# define TracyCFree( ptr ) ___tracy_emit_memory_free( ptr, 0 );
+# define TracyCSecureAlloc( ptr, size ) ___tracy_emit_memory_alloc( ptr, size, 1 );
+# define TracyCSecureFree( ptr ) ___tracy_emit_memory_free( ptr, 1 );
+
+# define TracyCAllocN( ptr, size, name ) ___tracy_emit_memory_alloc_named( ptr, size, 0, name );
+# define TracyCFreeN( ptr, name ) ___tracy_emit_memory_free_named( ptr, 0, name );
+# define TracyCSecureAllocN( ptr, size, name ) ___tracy_emit_memory_alloc_named( ptr, size, 1, name );
+# define TracyCSecureFreeN( ptr, name ) ___tracy_emit_memory_free_named( ptr, 1, name );
+
+# define TracyCMessage( txt, size ) ___tracy_emit_message( txt, size, 0 );
+# define TracyCMessageL( txt ) ___tracy_emit_messageL( txt, 0 );
+# define TracyCMessageC( txt, size, color ) ___tracy_emit_messageC( txt, size, color, 0 );
+# define TracyCMessageLC( txt, color ) ___tracy_emit_messageLC( txt, color, 0 );
+#endif
+
+
+TRACY_API void ___tracy_emit_frame_mark( const char* name );
+TRACY_API void ___tracy_emit_frame_mark_start( const char* name );
+TRACY_API void ___tracy_emit_frame_mark_end( const char* name );
+TRACY_API void ___tracy_emit_frame_image( const void* image, uint16_t w, uint16_t h, uint8_t offset, int flip );
+
+#define TracyCFrameMark ___tracy_emit_frame_mark( 0 );
+#define TracyCFrameMarkNamed( name ) ___tracy_emit_frame_mark( name );
+#define TracyCFrameMarkStart( name ) ___tracy_emit_frame_mark_start( name );
+#define TracyCFrameMarkEnd( name ) ___tracy_emit_frame_mark_end( name );
+#define TracyCFrameImage( image, width, height, offset, flip ) ___tracy_emit_frame_image( image, width, height, offset, flip );
+
+
+TRACY_API void ___tracy_emit_plot( const char* name, double val );
+TRACY_API void ___tracy_emit_message_appinfo( const char* txt, size_t size );
+
+#define TracyCPlot( name, val ) ___tracy_emit_plot( name, val );
+#define TracyCAppInfo( txt, size ) ___tracy_emit_message_appinfo( txt, size );
+
+
+#ifdef TRACY_HAS_CALLSTACK
+# define TracyCZoneS( ctx, depth, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { NULL, __func__, __FILE__, (uint32_t)__LINE__, 0 }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin_callstack( &TracyConcat(__tracy_source_location,__LINE__), depth, active );
+# define TracyCZoneNS( ctx, name, depth, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, 0 }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin_callstack( &TracyConcat(__tracy_source_location,__LINE__), depth, active );
+# define TracyCZoneCS( ctx, color, depth, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { NULL, __func__, __FILE__, (uint32_t)__LINE__, color }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin_callstack( &TracyConcat(__tracy_source_location,__LINE__), depth, active );
+# define TracyCZoneNCS( ctx, name, color, depth, active ) static const struct ___tracy_source_location_data TracyConcat(__tracy_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, color }; TracyCZoneCtx ctx = ___tracy_emit_zone_begin_callstack( &TracyConcat(__tracy_source_location,__LINE__), depth, active );
+
+# define TracyCAllocS( ptr, size, depth ) ___tracy_emit_memory_alloc_callstack( ptr, size, depth, 0 )
+# define TracyCFreeS( ptr, depth ) ___tracy_emit_memory_free_callstack( ptr, depth, 0 )
+# define TracyCSecureAllocS( ptr, size, depth ) ___tracy_emit_memory_alloc_callstack( ptr, size, depth, 1 )
+# define TracyCSecureFreeS( ptr, depth ) ___tracy_emit_memory_free_callstack( ptr, depth, 1 )
+
+# define TracyCAllocNS( ptr, size, depth, name ) ___tracy_emit_memory_alloc_callstack_named( ptr, size, depth, 0, name )
+# define TracyCFreeNS( ptr, depth, name ) ___tracy_emit_memory_free_callstack_named( ptr, depth, 0, name )
+# define TracyCSecureAllocNS( ptr, size, depth, name ) ___tracy_emit_memory_alloc_callstack_named( ptr, size, depth, 1, name )
+# define TracyCSecureFreeNS( ptr, depth, name ) ___tracy_emit_memory_free_callstack_named( ptr, depth, 1, name )
+
+# define TracyCMessageS( txt, size, depth ) ___tracy_emit_message( txt, size, depth );
+# define TracyCMessageLS( txt, depth ) ___tracy_emit_messageL( txt, depth );
+# define TracyCMessageCS( txt, size, color, depth ) ___tracy_emit_messageC( txt, size, color, depth );
+# define TracyCMessageLCS( txt, color, depth ) ___tracy_emit_messageLC( txt, color, depth );
+#else
+# define TracyCZoneS( ctx, depth, active ) TracyCZone( ctx, active )
+# define TracyCZoneNS( ctx, name, depth, active ) TracyCZoneN( ctx, name, active )
+# define TracyCZoneCS( ctx, color, depth, active ) TracyCZoneC( ctx, color, active )
+# define TracyCZoneNCS( ctx, name, color, depth, active ) TracyCZoneNC( ctx, name, color, active )
+
+# define TracyCAllocS( ptr, size, depth ) TracyCAlloc( ptr, size )
+# define TracyCFreeS( ptr, depth ) TracyCFree( ptr )
+# define TracyCSecureAllocS( ptr, size, depth ) TracyCSecureAlloc( ptr, size )
+# define TracyCSecureFreeS( ptr, depth ) TracyCSecureFree( ptr )
+
+# define TracyCAllocNS( ptr, size, depth, name ) TracyCAllocN( ptr, size, name )
+# define TracyCFreeNS( ptr, depth, name ) TracyCFreeN( ptr, name )
+# define TracyCSecureAllocNS( ptr, size, depth, name ) TracyCSecureAllocN( ptr, size, name )
+# define TracyCSecureFreeNS( ptr, depth, name ) TracyCSecureFreeN( ptr, name )
+
+# define TracyCMessageS( txt, size, depth ) TracyCMessage( txt, size )
+# define TracyCMessageLS( txt, depth ) TracyCMessageL( txt )
+# define TracyCMessageCS( txt, size, color, depth ) TracyCMessageC( txt, size, color )
+# define TracyCMessageLCS( txt, color, depth ) TracyCMessageLC( txt, color )
+#endif
+
+#define TracyCIsConnected ___tracy_connected()
+
+TRACY_API void ___tracy_fiber_enter( const char* fiber );
+TRACY_API void ___tracy_fiber_leave( void );
+
+#ifdef TRACY_FIBERS
+# define TracyCFiberEnter( fiber ) ___tracy_fiber_enter( fiber );
+# define TracyCFiberLeave ___tracy_fiber_leave();
+#endif
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/TracyClient.cpp b/3rdparty/tracy/tracy/TracyClient.cpp
new file mode 100644
index 0000000..4aa4647
--- /dev/null
+++ b/3rdparty/tracy/tracy/TracyClient.cpp
@@ -0,0 +1,56 @@
+//
+// Tracy profiler
+// ----------------
+//
+// For fast integration, compile and
+// link with this source file (and none
+// other) in your executable (or in the
+// main DLL / shared object on multi-DLL
+// projects).
+//
+
+// Define TRACY_ENABLE to enable profiler.
+
+#include "common/TracySystem.cpp"
+
+#ifdef TRACY_ENABLE
+
+#ifdef _MSC_VER
+# pragma warning(push, 0)
+#endif
+
+#include "common/tracy_lz4.cpp"
+#include "client/TracyProfiler.cpp"
+#include "client/TracyCallstack.cpp"
+#include "client/TracySysTime.cpp"
+#include "client/TracySysTrace.cpp"
+#include "common/TracySocket.cpp"
+#include "client/tracy_rpmalloc.cpp"
+#include "client/TracyDxt1.cpp"
+#include "client/TracyAlloc.cpp"
+
+#if TRACY_HAS_CALLSTACK == 2 || TRACY_HAS_CALLSTACK == 3 || TRACY_HAS_CALLSTACK == 4 || TRACY_HAS_CALLSTACK == 6
+# include "libbacktrace/alloc.cpp"
+# include "libbacktrace/dwarf.cpp"
+# include "libbacktrace/fileline.cpp"
+# include "libbacktrace/mmapio.cpp"
+# include "libbacktrace/posix.cpp"
+# include "libbacktrace/sort.cpp"
+# include "libbacktrace/state.cpp"
+# if TRACY_HAS_CALLSTACK == 4
+# include "libbacktrace/macho.cpp"
+# else
+# include "libbacktrace/elf.cpp"
+# endif
+# include "common/TracyStackFrames.cpp"
+#endif
+
+#ifdef _MSC_VER
+# pragma comment(lib, "ws2_32.lib")
+# pragma comment(lib, "dbghelp.lib")
+# pragma comment(lib, "advapi32.lib")
+# pragma comment(lib, "user32.lib")
+# pragma warning(pop)
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/TracyD3D11.hpp b/3rdparty/tracy/tracy/TracyD3D11.hpp
new file mode 100644
index 0000000..8552f36
--- /dev/null
+++ b/3rdparty/tracy/tracy/TracyD3D11.hpp
@@ -0,0 +1,442 @@
+#ifndef __TRACYD3D11_HPP__
+#define __TRACYD3D11_HPP__
+
+#ifndef TRACY_ENABLE
+
+#define TracyD3D11Context(device,queue) nullptr
+#define TracyD3D11Destroy(ctx)
+#define TracyD3D11ContextName(ctx, name, size)
+
+#define TracyD3D11NewFrame(ctx)
+
+#define TracyD3D11Zone(ctx, name)
+#define TracyD3D11ZoneC(ctx, name, color)
+#define TracyD3D11NamedZone(ctx, varname, name, active)
+#define TracyD3D11NamedZoneC(ctx, varname, name, color, active)
+#define TracyD3D12ZoneTransient(ctx, varname, name, active)
+
+#define TracyD3D11ZoneS(ctx, name, depth)
+#define TracyD3D11ZoneCS(ctx, name, color, depth)
+#define TracyD3D11NamedZoneS(ctx, varname, name, depth, active)
+#define TracyD3D11NamedZoneCS(ctx, varname, name, color, depth, active)
+#define TracyD3D12ZoneTransientS(ctx, varname, name, depth, active)
+
+#define TracyD3D11Collect(ctx)
+
+namespace tracy
+{
+class D3D11ZoneScope {};
+}
+
+using TracyD3D11Ctx = void*;
+
+#else
+
+#include <atomic>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "Tracy.hpp"
+#include "client/TracyProfiler.hpp"
+#include "client/TracyCallstack.hpp"
+#include "common/TracyAlign.hpp"
+#include "common/TracyAlloc.hpp"
+
+namespace tracy
+{
+
+class D3D11Ctx
+{
+ friend class D3D11ZoneScope;
+
+ enum { QueryCount = 64 * 1024 };
+
+public:
+ D3D11Ctx( ID3D11Device* device, ID3D11DeviceContext* devicectx )
+ : m_device( device )
+ , m_devicectx( devicectx )
+ , m_context( GetGpuCtxCounter().fetch_add( 1, std::memory_order_relaxed ) )
+ , m_head( 0 )
+ , m_tail( 0 )
+ {
+ assert( m_context != 255 );
+
+ for (int i = 0; i < QueryCount; i++)
+ {
+ HRESULT hr = S_OK;
+ D3D11_QUERY_DESC desc;
+ desc.MiscFlags = 0;
+
+ desc.Query = D3D11_QUERY_TIMESTAMP;
+ hr |= device->CreateQuery(&desc, &m_queries[i]);
+
+ desc.Query = D3D11_QUERY_TIMESTAMP_DISJOINT;
+ hr |= device->CreateQuery(&desc, &m_disjoints[i]);
+
+ m_disjointMap[i] = nullptr;
+
+ assert(SUCCEEDED(hr));
+ }
+
+ // Force query the initial GPU timestamp (pipeline stall)
+ D3D11_QUERY_DATA_TIMESTAMP_DISJOINT disjoint;
+ UINT64 timestamp;
+ for (int attempts = 0; attempts < 50; attempts++)
+ {
+ devicectx->Begin(m_disjoints[0]);
+ devicectx->End(m_queries[0]);
+ devicectx->End(m_disjoints[0]);
+ devicectx->Flush();
+
+ while (devicectx->GetData(m_disjoints[0], &disjoint, sizeof(disjoint), 0) == S_FALSE)
+ /* Nothing */;
+
+ if (disjoint.Disjoint)
+ continue;
+
+ while (devicectx->GetData(m_queries[0], &timestamp, sizeof(timestamp), 0) == S_FALSE)
+ /* Nothing */;
+
+ break;
+ }
+
+ int64_t tgpu = timestamp * (1000000000ull / disjoint.Frequency);
+ int64_t tcpu = Profiler::GetTime();
+
+ uint8_t flags = 0;
+
+ const float period = 1.f;
+ auto* item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuNewContext );
+ MemWrite( &item->gpuNewContext.cpuTime, tcpu );
+ MemWrite( &item->gpuNewContext.gpuTime, tgpu );
+ memset(&item->gpuNewContext.thread, 0, sizeof(item->gpuNewContext.thread));
+ MemWrite( &item->gpuNewContext.period, period );
+ MemWrite( &item->gpuNewContext.context, m_context );
+ MemWrite( &item->gpuNewContext.flags, flags );
+ MemWrite( &item->gpuNewContext.type, GpuContextType::Direct3D11 );
+
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+
+ Profiler::QueueSerialFinish();
+ }
+
+ ~D3D11Ctx()
+ {
+ for (int i = 0; i < QueryCount; i++)
+ {
+ m_queries[i]->Release();
+ m_disjoints[i]->Release();
+ m_disjointMap[i] = nullptr;
+ }
+ }
+
+ void Name( const char* name, uint16_t len )
+ {
+ auto ptr = (char*)tracy_malloc( len );
+ memcpy( ptr, name, len );
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuContextName );
+ MemWrite( &item->gpuContextNameFat.context, m_context );
+ MemWrite( &item->gpuContextNameFat.ptr, (uint64_t)ptr );
+ MemWrite( &item->gpuContextNameFat.size, len );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ void Collect()
+ {
+ ZoneScopedC( Color::Red4 );
+
+ if( m_tail == m_head ) return;
+
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() )
+ {
+ m_head = m_tail = 0;
+ return;
+ }
+#endif
+
+ auto start = m_tail;
+ auto end = m_head + QueryCount;
+ auto cnt = (end - start) % QueryCount;
+ while (cnt > 1)
+ {
+ auto mid = start + cnt / 2;
+
+ bool available =
+ m_devicectx->GetData(m_disjointMap[mid % QueryCount], nullptr, 0, D3D11_ASYNC_GETDATA_DONOTFLUSH) == S_OK &&
+ m_devicectx->GetData(m_queries[mid % QueryCount], nullptr, 0, D3D11_ASYNC_GETDATA_DONOTFLUSH) == S_OK;
+
+ if (available)
+ {
+ start = mid;
+ }
+ else
+ {
+ end = mid;
+ }
+ cnt = (end - start) % QueryCount;
+ }
+
+ start %= QueryCount;
+
+ while (m_tail != start)
+ {
+ D3D11_QUERY_DATA_TIMESTAMP_DISJOINT disjoint;
+ UINT64 time;
+
+ m_devicectx->GetData(m_disjointMap[m_tail], &disjoint, sizeof(disjoint), 0);
+ m_devicectx->GetData(m_queries[m_tail], &time, sizeof(time), 0);
+
+ time *= (1000000000ull / disjoint.Frequency);
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuTime);
+ MemWrite(&item->gpuTime.gpuTime, (int64_t)time);
+ MemWrite(&item->gpuTime.queryId, (uint16_t)m_tail);
+ MemWrite(&item->gpuTime.context, m_context);
+ Profiler::QueueSerialFinish();
+
+ m_tail = (m_tail + 1) % QueryCount;
+ }
+ }
+
+private:
+ tracy_force_inline unsigned int NextQueryId()
+ {
+ const auto id = m_head;
+ m_head = ( m_head + 1 ) % QueryCount;
+ assert( m_head != m_tail );
+ return id;
+ }
+
+ tracy_force_inline ID3D11Query* TranslateQueryId( unsigned int id )
+ {
+ return m_queries[id];
+ }
+
+ tracy_force_inline ID3D11Query* MapDisjointQueryId( unsigned int id, unsigned int disjointId )
+ {
+ m_disjointMap[id] = m_disjoints[disjointId];
+ return m_disjoints[disjointId];
+ }
+
+ tracy_force_inline uint8_t GetId() const
+ {
+ return m_context;
+ }
+
+ ID3D11Device* m_device;
+ ID3D11DeviceContext* m_devicectx;
+
+ ID3D11Query* m_queries[QueryCount];
+ ID3D11Query* m_disjoints[QueryCount];
+ ID3D11Query* m_disjointMap[QueryCount]; // Multiple time queries can have one disjoint query
+ uint8_t m_context;
+
+ unsigned int m_head;
+ unsigned int m_tail;
+};
+
+class D3D11ZoneScope
+{
+public:
+ tracy_force_inline D3D11ZoneScope( D3D11Ctx* ctx, const SourceLocationData* srcloc, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+ m_ctx = ctx;
+
+ const auto queryId = ctx->NextQueryId();
+ ctx->m_devicectx->Begin(ctx->MapDisjointQueryId(queryId, queryId));
+ ctx->m_devicectx->End(ctx->TranslateQueryId(queryId));
+
+ m_disjointId = queryId;
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuZoneBeginSerial );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneBegin.srcloc, (uint64_t)srcloc );
+ MemWrite( &item->gpuZoneBegin.thread, GetThreadHandle() );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, ctx->GetId() );
+
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline D3D11ZoneScope( D3D11Ctx* ctx, const SourceLocationData* srcloc, int depth, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+ m_ctx = ctx;
+
+ const auto queryId = ctx->NextQueryId();
+ ctx->m_devicectx->Begin(ctx->MapDisjointQueryId(queryId, queryId));
+ ctx->m_devicectx->End(ctx->TranslateQueryId(queryId));
+
+ m_disjointId = queryId;
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuZoneBeginCallstackSerial );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneBegin.srcloc, (uint64_t)srcloc );
+ MemWrite( &item->gpuZoneBegin.thread, GetThreadHandle() );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, ctx->GetId() );
+
+ Profiler::QueueSerialFinish();
+
+ GetProfiler().SendCallstack( depth );
+ }
+
+ tracy_force_inline D3D11ZoneScope(D3D11Ctx* ctx, uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, bool active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(active&& GetProfiler().IsConnected())
+#else
+ : m_active(active)
+#endif
+ {
+ if( !m_active ) return;
+ m_ctx = ctx;
+
+ const auto queryId = ctx->NextQueryId();
+ ctx->m_devicectx->Begin(ctx->MapDisjointQueryId(queryId, queryId));
+ ctx->m_devicectx->End(ctx->TranslateQueryId(queryId));
+
+ m_disjointId = queryId;
+
+ const auto sourceLocation = Profiler::AllocSourceLocation(line, source, sourceSz, function, functionSz, name, nameSz);
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginAllocSrcLocSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, sourceLocation);
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, static_cast<uint16_t>(queryId));
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline D3D11ZoneScope(D3D11Ctx* ctx, uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, int depth, bool active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(active&& GetProfiler().IsConnected())
+#else
+ : m_active(active)
+#endif
+ {
+ if( !m_active ) return;
+ m_ctx = ctx;
+
+ const auto queryId = ctx->NextQueryId();
+ ctx->m_devicectx->Begin(ctx->MapDisjointQueryId(queryId, queryId));
+ ctx->m_devicectx->End(ctx->TranslateQueryId(queryId));
+
+ m_disjointId = queryId;
+
+ const auto sourceLocation = Profiler::AllocSourceLocation(line, source, sourceSz, function, functionSz, name, nameSz);
+
+ auto* item = Profiler::QueueSerialCallstack(Callstack(depth));
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginAllocSrcLocCallstackSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, sourceLocation);
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, static_cast<uint16_t>(queryId));
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline ~D3D11ZoneScope()
+ {
+ if( !m_active ) return;
+
+ const auto queryId = m_ctx->NextQueryId();
+ m_ctx->m_devicectx->End(m_ctx->TranslateQueryId(queryId));
+ m_ctx->m_devicectx->End(m_ctx->MapDisjointQueryId(queryId, m_disjointId));
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuZoneEndSerial );
+ MemWrite( &item->gpuZoneEnd.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneEnd.thread, GetThreadHandle() );
+ MemWrite( &item->gpuZoneEnd.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneEnd.context, m_ctx->GetId() );
+
+ Profiler::QueueSerialFinish();
+ }
+
+private:
+ const bool m_active;
+
+ D3D11Ctx* m_ctx;
+ unsigned int m_disjointId;
+};
+
+static inline D3D11Ctx* CreateD3D11Context( ID3D11Device* device, ID3D11DeviceContext* devicectx )
+{
+ auto ctx = (D3D11Ctx*)tracy_malloc( sizeof( D3D11Ctx ) );
+ new(ctx) D3D11Ctx( device, devicectx );
+ return ctx;
+}
+
+static inline void DestroyD3D11Context( D3D11Ctx* ctx )
+{
+ ctx->~D3D11Ctx();
+ tracy_free( ctx );
+}
+}
+
+using TracyD3D11Ctx = tracy::D3D11Ctx*;
+
+#define TracyD3D11Context( device, devicectx ) tracy::CreateD3D11Context( device, devicectx );
+#define TracyD3D11Destroy(ctx) tracy::DestroyD3D11Context(ctx);
+#define TracyD3D11ContextName(ctx, name, size) ctx->Name(name, size);
+
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define TracyD3D11Zone( ctx, name ) TracyD3D11NamedZoneS( ctx, ___tracy_gpu_zone, name, TRACY_CALLSTACK, true )
+# define TracyD3D11ZoneC( ctx, name, color ) TracyD3D11NamedZoneCS( ctx, ___tracy_gpu_zone, name, color, TRACY_CALLSTACK, true )
+# define TracyD3D11NamedZone( ctx, varname, name, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::D3D11ZoneScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyD3D11NamedZoneC( ctx, varname, name, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::D3D11ZoneScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyD3D11ZoneTransient(ctx, varname, name, active) TracyD3D11ZoneTransientS(ctx, varname, cmdList, name, TRACY_CALLSTACK, active)
+#else
+# define TracyD3D11Zone( ctx, name ) TracyD3D11NamedZone( ctx, ___tracy_gpu_zone, name, true )
+# define TracyD3D11ZoneC( ctx, name, color ) TracyD3D11NamedZoneC( ctx, ___tracy_gpu_zone, name, color, true )
+# define TracyD3D11NamedZone( ctx, varname, name, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::D3D11ZoneScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), active );
+# define TracyD3D11NamedZoneC( ctx, varname, name, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::D3D11ZoneScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), active );
+# define TracyD3D11ZoneTransient(ctx, varname, name, active) tracy::D3D11ZoneScope varname{ ctx, __LINE__, __FILE__, strlen(__FILE__), __FUNCTION__, strlen(__FUNCTION__), name, strlen(name), active };
+#endif
+
+#ifdef TRACY_HAS_CALLSTACK
+# define TracyD3D11ZoneS( ctx, name, depth ) TracyD3D11NamedZoneS( ctx, ___tracy_gpu_zone, name, depth, true )
+# define TracyD3D11ZoneCS( ctx, name, color, depth ) TracyD3D11NamedZoneCS( ctx, ___tracy_gpu_zone, name, color, depth, true )
+# define TracyD3D11NamedZoneS( ctx, varname, name, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::D3D11ZoneScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), depth, active );
+# define TracyD3D11NamedZoneCS( ctx, varname, name, color, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::D3D11ZoneScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), depth, active );
+# define TracyD3D11ZoneTransientS(ctx, varname, name, depth, active) tracy::D3D11ZoneScope varname{ ctx, __LINE__, __FILE__, strlen(__FILE__), __FUNCTION__, strlen(__FUNCTION__), name, strlen(name), depth, active };
+#else
+# define TracyD3D11ZoneS( ctx, name, depth, active ) TracyD3D11Zone( ctx, name )
+# define TracyD3D11ZoneCS( ctx, name, color, depth, active ) TracyD3D11ZoneC( name, color )
+# define TracyD3D11NamedZoneS( ctx, varname, name, depth, active ) TracyD3D11NamedZone( ctx, varname, name, active )
+# define TracyD3D11NamedZoneCS( ctx, varname, name, color, depth, active ) TracyD3D11NamedZoneC( ctx, varname, name, color, active )
+# define TracyD3D11ZoneTransientS(ctx, varname, name, depth, active) TracyD3D12ZoneTransient(ctx, varname, name, active)
+#endif
+
+#define TracyD3D11Collect( ctx ) ctx->Collect();
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/TracyD3D12.hpp b/3rdparty/tracy/tracy/TracyD3D12.hpp
new file mode 100644
index 0000000..1276b9e
--- /dev/null
+++ b/3rdparty/tracy/tracy/TracyD3D12.hpp
@@ -0,0 +1,506 @@
+#ifndef __TRACYD3D12_HPP__
+#define __TRACYD3D12_HPP__
+
+#ifndef TRACY_ENABLE
+
+#define TracyD3D12Context(device, queue) nullptr
+#define TracyD3D12Destroy(ctx)
+#define TracyD3D12ContextName(ctx, name, size)
+
+#define TracyD3D12NewFrame(ctx)
+
+#define TracyD3D12Zone(ctx, cmdList, name)
+#define TracyD3D12ZoneC(ctx, cmdList, name, color)
+#define TracyD3D12NamedZone(ctx, varname, cmdList, name, active)
+#define TracyD3D12NamedZoneC(ctx, varname, cmdList, name, color, active)
+#define TracyD3D12ZoneTransient(ctx, varname, cmdList, name, active)
+
+#define TracyD3D12ZoneS(ctx, cmdList, name, depth)
+#define TracyD3D12ZoneCS(ctx, cmdList, name, color, depth)
+#define TracyD3D12NamedZoneS(ctx, varname, cmdList, name, depth, active)
+#define TracyD3D12NamedZoneCS(ctx, varname, cmdList, name, color, depth, active)
+#define TracyD3D12ZoneTransientS(ctx, varname, cmdList, name, depth, active)
+
+#define TracyD3D12Collect(ctx)
+
+namespace tracy
+{
+ class D3D12ZoneScope {};
+}
+
+using TracyD3D12Ctx = void*;
+
+#else
+
+#include "Tracy.hpp"
+#include "client/TracyProfiler.hpp"
+#include "client/TracyCallstack.hpp"
+
+#include <cstdlib>
+#include <cassert>
+#include <d3d12.h>
+#include <dxgi.h>
+#include <wrl/client.h>
+#include <queue>
+
+namespace tracy
+{
+
+ struct D3D12QueryPayload
+ {
+ uint32_t m_queryIdStart = 0;
+ uint32_t m_queryCount = 0;
+ };
+
+ // Command queue context.
+ class D3D12QueueCtx
+ {
+ friend class D3D12ZoneScope;
+
+ static constexpr uint32_t MaxQueries = 64 * 1024; // Queries are begin and end markers, so we can store half as many total time durations. Must be even!
+
+ bool m_initialized = false;
+
+ ID3D12Device* m_device = nullptr;
+ ID3D12CommandQueue* m_queue = nullptr;
+ uint8_t m_context;
+ Microsoft::WRL::ComPtr<ID3D12QueryHeap> m_queryHeap;
+ Microsoft::WRL::ComPtr<ID3D12Resource> m_readbackBuffer;
+
+ // In-progress payload.
+ uint32_t m_queryLimit = MaxQueries;
+ uint32_t m_queryCounter = 0;
+ uint32_t m_previousQueryCounter = 0;
+
+ uint32_t m_activePayload = 0;
+ Microsoft::WRL::ComPtr<ID3D12Fence> m_payloadFence;
+ std::queue<D3D12QueryPayload> m_payloadQueue;
+
+ int64_t m_prevCalibration = 0;
+ int64_t m_qpcToNs = int64_t{ 1000000000 / GetFrequencyQpc() };
+
+ public:
+ D3D12QueueCtx(ID3D12Device* device, ID3D12CommandQueue* queue)
+ : m_device(device)
+ , m_queue(queue)
+ , m_context(GetGpuCtxCounter().fetch_add(1, std::memory_order_relaxed))
+ {
+ // Verify we support timestamp queries on this queue.
+
+ if (queue->GetDesc().Type == D3D12_COMMAND_LIST_TYPE_COPY)
+ {
+ D3D12_FEATURE_DATA_D3D12_OPTIONS3 featureData{};
+
+ bool Success = SUCCEEDED(device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS3, &featureData, sizeof(featureData)));
+ assert(Success && featureData.CopyQueueTimestampQueriesSupported && "Platform does not support profiling of copy queues.");
+ }
+
+ uint64_t timestampFrequency;
+
+ if (FAILED(queue->GetTimestampFrequency(&timestampFrequency)))
+ {
+ assert(false && "Failed to get timestamp frequency.");
+ }
+
+ uint64_t cpuTimestamp;
+ uint64_t gpuTimestamp;
+
+ if (FAILED(queue->GetClockCalibration(&gpuTimestamp, &cpuTimestamp)))
+ {
+ assert(false && "Failed to get queue clock calibration.");
+ }
+
+ // Save the device cpu timestamp, not the profiler's timestamp.
+ m_prevCalibration = cpuTimestamp * m_qpcToNs;
+
+ cpuTimestamp = Profiler::GetTime();
+
+ D3D12_QUERY_HEAP_DESC heapDesc{};
+ heapDesc.Type = queue->GetDesc().Type == D3D12_COMMAND_LIST_TYPE_COPY ? D3D12_QUERY_HEAP_TYPE_COPY_QUEUE_TIMESTAMP : D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
+ heapDesc.Count = m_queryLimit;
+ heapDesc.NodeMask = 0; // #TODO: Support multiple adapters.
+
+ while (FAILED(device->CreateQueryHeap(&heapDesc, IID_PPV_ARGS(&m_queryHeap))))
+ {
+ m_queryLimit /= 2;
+ heapDesc.Count = m_queryLimit;
+ }
+
+ // Create a readback buffer, which will be used as a destination for the query data.
+
+ D3D12_RESOURCE_DESC readbackBufferDesc{};
+ readbackBufferDesc.Alignment = 0;
+ readbackBufferDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+ readbackBufferDesc.Width = m_queryLimit * sizeof(uint64_t);
+ readbackBufferDesc.Height = 1;
+ readbackBufferDesc.DepthOrArraySize = 1;
+ readbackBufferDesc.Format = DXGI_FORMAT_UNKNOWN;
+ readbackBufferDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; // Buffers are always row major.
+ readbackBufferDesc.MipLevels = 1;
+ readbackBufferDesc.SampleDesc.Count = 1;
+ readbackBufferDesc.SampleDesc.Quality = 0;
+ readbackBufferDesc.Flags = D3D12_RESOURCE_FLAG_NONE;
+
+ D3D12_HEAP_PROPERTIES readbackHeapProps{};
+ readbackHeapProps.Type = D3D12_HEAP_TYPE_READBACK;
+ readbackHeapProps.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+ readbackHeapProps.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+ readbackHeapProps.CreationNodeMask = 0;
+ readbackHeapProps.VisibleNodeMask = 0; // #TODO: Support multiple adapters.
+
+ if (FAILED(device->CreateCommittedResource(&readbackHeapProps, D3D12_HEAP_FLAG_NONE, &readbackBufferDesc, D3D12_RESOURCE_STATE_COPY_DEST, nullptr, IID_PPV_ARGS(&m_readbackBuffer))))
+ {
+ assert(false && "Failed to create query readback buffer.");
+ }
+
+ if (FAILED(device->CreateFence(0, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&m_payloadFence))))
+ {
+ assert(false && "Failed to create payload fence.");
+ }
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuNewContext);
+ MemWrite(&item->gpuNewContext.cpuTime, cpuTimestamp);
+ MemWrite(&item->gpuNewContext.gpuTime, gpuTimestamp);
+ memset(&item->gpuNewContext.thread, 0, sizeof(item->gpuNewContext.thread));
+ MemWrite(&item->gpuNewContext.period, 1E+09f / static_cast<float>(timestampFrequency));
+ MemWrite(&item->gpuNewContext.context, m_context);
+ MemWrite(&item->gpuNewContext.flags, GpuContextCalibration);
+ MemWrite(&item->gpuNewContext.type, GpuContextType::Direct3D12);
+
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem(*item);
+#endif
+
+ Profiler::QueueSerialFinish();
+
+ m_initialized = true;
+ }
+
+ void NewFrame()
+ {
+ m_payloadQueue.emplace(D3D12QueryPayload{ m_previousQueryCounter, m_queryCounter });
+ m_previousQueryCounter += m_queryCounter;
+ m_queryCounter = 0;
+
+ if (m_previousQueryCounter >= m_queryLimit)
+ {
+ m_previousQueryCounter -= m_queryLimit;
+ }
+
+ m_queue->Signal(m_payloadFence.Get(), ++m_activePayload);
+ }
+
+ void Name( const char* name, uint16_t len )
+ {
+ auto ptr = (char*)tracy_malloc( len );
+ memcpy( ptr, name, len );
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuContextName );
+ MemWrite( &item->gpuContextNameFat.context, m_context );
+ MemWrite( &item->gpuContextNameFat.ptr, (uint64_t)ptr );
+ MemWrite( &item->gpuContextNameFat.size, len );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ void Collect()
+ {
+ ZoneScopedC(Color::Red4);
+
+#ifdef TRACY_ON_DEMAND
+ if (!GetProfiler().IsConnected())
+ {
+ m_queryCounter = 0;
+
+ return;
+ }
+#endif
+
+ // Find out what payloads are available.
+ const auto newestReadyPayload = m_payloadFence->GetCompletedValue();
+ const auto payloadCount = m_payloadQueue.size() - (m_activePayload - newestReadyPayload);
+
+ if (!payloadCount)
+ {
+ return; // No payloads are available yet, exit out.
+ }
+
+ D3D12_RANGE mapRange{ 0, m_queryLimit * sizeof(uint64_t) };
+
+ // Map the readback buffer so we can fetch the query data from the GPU.
+ void* readbackBufferMapping = nullptr;
+
+ if (FAILED(m_readbackBuffer->Map(0, &mapRange, &readbackBufferMapping)))
+ {
+ assert(false && "Failed to map readback buffer.");
+ }
+
+ auto* timestampData = static_cast<uint64_t*>(readbackBufferMapping);
+
+ for (uint32_t i = 0; i < payloadCount; ++i)
+ {
+ const auto& payload = m_payloadQueue.front();
+
+ for (uint32_t j = 0; j < payload.m_queryCount; ++j)
+ {
+ const auto counter = (payload.m_queryIdStart + j) % m_queryLimit;
+ const auto timestamp = timestampData[counter];
+ const auto queryId = counter;
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuTime);
+ MemWrite(&item->gpuTime.gpuTime, timestamp);
+ MemWrite(&item->gpuTime.queryId, static_cast<uint16_t>(queryId));
+ MemWrite(&item->gpuTime.context, m_context);
+
+ Profiler::QueueSerialFinish();
+ }
+
+ m_payloadQueue.pop();
+ }
+
+ m_readbackBuffer->Unmap(0, nullptr);
+
+ // Recalibrate to account for drift.
+
+ uint64_t cpuTimestamp;
+ uint64_t gpuTimestamp;
+
+ if (FAILED(m_queue->GetClockCalibration(&gpuTimestamp, &cpuTimestamp)))
+ {
+ assert(false && "Failed to get queue clock calibration.");
+ }
+
+ cpuTimestamp *= m_qpcToNs;
+
+ const auto cpuDelta = cpuTimestamp - m_prevCalibration;
+ if (cpuDelta > 0)
+ {
+ m_prevCalibration = cpuTimestamp;
+ cpuTimestamp = Profiler::GetTime();
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuCalibration);
+ MemWrite(&item->gpuCalibration.gpuTime, gpuTimestamp);
+ MemWrite(&item->gpuCalibration.cpuTime, cpuTimestamp);
+ MemWrite(&item->gpuCalibration.cpuDelta, cpuDelta);
+ MemWrite(&item->gpuCalibration.context, m_context);
+
+ Profiler::QueueSerialFinish();
+ }
+ }
+
+ private:
+ tracy_force_inline uint32_t NextQueryId()
+ {
+ assert(m_queryCounter < m_queryLimit && "Submitted too many GPU queries! Consider increasing MaxQueries.");
+
+ const uint32_t id = (m_previousQueryCounter + m_queryCounter) % m_queryLimit;
+ m_queryCounter += 2; // Allocate space for a begin and end query.
+
+ return id;
+ }
+
+ tracy_force_inline uint8_t GetId() const
+ {
+ return m_context;
+ }
+ };
+
+ class D3D12ZoneScope
+ {
+ const bool m_active;
+ D3D12QueueCtx* m_ctx = nullptr;
+ ID3D12GraphicsCommandList* m_cmdList = nullptr;
+ uint32_t m_queryId = 0; // Used for tracking in nested zones.
+
+ public:
+ tracy_force_inline D3D12ZoneScope(D3D12QueueCtx* ctx, ID3D12GraphicsCommandList* cmdList, const SourceLocationData* srcLocation, bool active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(active && GetProfiler().IsConnected())
+#else
+ : m_active(active)
+#endif
+ {
+ if (!m_active) return;
+
+ m_ctx = ctx;
+ m_cmdList = cmdList;
+
+ m_queryId = ctx->NextQueryId();
+ cmdList->EndQuery(ctx->m_queryHeap.Get(), D3D12_QUERY_TYPE_TIMESTAMP, m_queryId);
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, reinterpret_cast<uint64_t>(srcLocation));
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, static_cast<uint16_t>(m_queryId));
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline D3D12ZoneScope(D3D12QueueCtx* ctx, ID3D12GraphicsCommandList* cmdList, const SourceLocationData* srcLocation, int depth, bool active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(active&& GetProfiler().IsConnected())
+#else
+ : m_active(active)
+#endif
+ {
+ if (!m_active) return;
+
+ m_ctx = ctx;
+ m_cmdList = cmdList;
+
+ m_queryId = ctx->NextQueryId();
+ cmdList->EndQuery(ctx->m_queryHeap.Get(), D3D12_QUERY_TYPE_TIMESTAMP, m_queryId);
+
+ auto* item = Profiler::QueueSerialCallstack(Callstack(depth));
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginCallstackSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, reinterpret_cast<uint64_t>(srcLocation));
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, static_cast<uint16_t>(m_queryId));
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline D3D12ZoneScope(D3D12QueueCtx* ctx, uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, ID3D12GraphicsCommandList* cmdList, bool active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(active&& GetProfiler().IsConnected())
+#else
+ : m_active(active)
+#endif
+ {
+ if (!m_active) return;
+
+ m_ctx = ctx;
+ m_cmdList = cmdList;
+
+ m_queryId = ctx->NextQueryId();
+ cmdList->EndQuery(ctx->m_queryHeap.Get(), D3D12_QUERY_TYPE_TIMESTAMP, m_queryId);
+
+ const auto sourceLocation = Profiler::AllocSourceLocation(line, source, sourceSz, function, functionSz, name, nameSz);
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginAllocSrcLocSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, sourceLocation);
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, static_cast<uint16_t>(m_queryId));
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline D3D12ZoneScope(D3D12QueueCtx* ctx, uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, ID3D12GraphicsCommandList* cmdList, int depth, bool active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(active&& GetProfiler().IsConnected())
+#else
+ : m_active(active)
+#endif
+ {
+ if (!m_active) return;
+
+ m_ctx = ctx;
+ m_cmdList = cmdList;
+
+ m_queryId = ctx->NextQueryId();
+ cmdList->EndQuery(ctx->m_queryHeap.Get(), D3D12_QUERY_TYPE_TIMESTAMP, m_queryId);
+
+ const auto sourceLocation = Profiler::AllocSourceLocation(line, source, sourceSz, function, functionSz, name, nameSz);
+
+ auto* item = Profiler::QueueSerialCallstack(Callstack(depth));
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginAllocSrcLocCallstackSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, sourceLocation);
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, static_cast<uint16_t>(m_queryId));
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline ~D3D12ZoneScope()
+ {
+ if (!m_active) return;
+
+ const auto queryId = m_queryId + 1; // Our end query slot is immediately after the begin slot.
+ m_cmdList->EndQuery(m_ctx->m_queryHeap.Get(), D3D12_QUERY_TYPE_TIMESTAMP, queryId);
+
+ auto* item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuZoneEndSerial);
+ MemWrite(&item->gpuZoneEnd.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneEnd.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneEnd.queryId, static_cast<uint16_t>(queryId));
+ MemWrite(&item->gpuZoneEnd.context, m_ctx->GetId());
+
+ Profiler::QueueSerialFinish();
+
+ m_cmdList->ResolveQueryData(m_ctx->m_queryHeap.Get(), D3D12_QUERY_TYPE_TIMESTAMP, m_queryId, 2, m_ctx->m_readbackBuffer.Get(), m_queryId * sizeof(uint64_t));
+ }
+ };
+
+ static inline D3D12QueueCtx* CreateD3D12Context(ID3D12Device* device, ID3D12CommandQueue* queue)
+ {
+ auto* ctx = static_cast<D3D12QueueCtx*>(tracy_malloc(sizeof(D3D12QueueCtx)));
+ new (ctx) D3D12QueueCtx{ device, queue };
+
+ return ctx;
+ }
+
+ static inline void DestroyD3D12Context(D3D12QueueCtx* ctx)
+ {
+ ctx->~D3D12QueueCtx();
+ tracy_free(ctx);
+ }
+
+}
+
+using TracyD3D12Ctx = tracy::D3D12QueueCtx*;
+
+#define TracyD3D12Context(device, queue) tracy::CreateD3D12Context(device, queue);
+#define TracyD3D12Destroy(ctx) tracy::DestroyD3D12Context(ctx);
+#define TracyD3D12ContextName(ctx, name, size) ctx->Name(name, size);
+
+#define TracyD3D12NewFrame(ctx) ctx->NewFrame();
+
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define TracyD3D12Zone(ctx, cmdList, name) TracyD3D12NamedZoneS(ctx, ___tracy_gpu_zone, cmdList, name, TRACY_CALLSTACK, true)
+# define TracyD3D12ZoneC(ctx, cmdList, name, color) TracyD3D12NamedZoneCS(ctx, ___tracy_gpu_zone, cmdList, name, color, TRACY_CALLSTACK, true)
+# define TracyD3D12NamedZone(ctx, varname, cmdList, name, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location, __LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::D3D12ZoneScope varname{ ctx, cmdList, &TracyConcat(__tracy_gpu_source_location, __LINE__), TRACY_CALLSTACK, active };
+# define TracyD3D12NamedZoneC(ctx, varname, cmdList, name, color, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location, __LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::D3D12ZoneScope varname{ ctx, cmdList, &TracyConcat(__tracy_gpu_source_location, __LINE__), TRACY_CALLSTACK, active };
+# define TracyD3D12ZoneTransient(ctx, varname, cmdList, name, active) TracyD3D12ZoneTransientS(ctx, varname, cmdList, name, TRACY_CALLSTACK, active)
+#else
+# define TracyD3D12Zone(ctx, cmdList, name) TracyD3D12NamedZone(ctx, ___tracy_gpu_zone, cmdList, name, true)
+# define TracyD3D12ZoneC(ctx, cmdList, name, color) TracyD3D12NamedZoneC(ctx, ___tracy_gpu_zone, cmdList, name, color, true)
+# define TracyD3D12NamedZone(ctx, varname, cmdList, name, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location, __LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::D3D12ZoneScope varname{ ctx, cmdList, &TracyConcat(__tracy_gpu_source_location, __LINE__), active };
+# define TracyD3D12NamedZoneC(ctx, varname, cmdList, name, color, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location, __LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::D3D12ZoneScope varname{ ctx, cmdList, &TracyConcat(__tracy_gpu_source_location, __LINE__), active };
+# define TracyD3D12ZoneTransient(ctx, varname, cmdList, name, active) tracy::D3D12ZoneScope varname{ ctx, __LINE__, __FILE__, strlen(__FILE__), __FUNCTION__, strlen(__FUNCTION__), name, strlen(name), cmdList, active };
+#endif
+
+#ifdef TRACY_HAS_CALLSTACK
+# define TracyD3D12ZoneS(ctx, cmdList, name, depth) TracyD3D12NamedZoneS(ctx, ___tracy_gpu_zone, cmdList, name, depth, true)
+# define TracyD3D12ZoneCS(ctx, cmdList, name, color, depth) TracyD3D12NamedZoneCS(ctx, ___tracy_gpu_zone, cmdList, name, color, depth, true)
+# define TracyD3D12NamedZoneS(ctx, varname, cmdList, name, depth, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location, __LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::D3D12ZoneScope varname{ ctx, cmdList, &TracyConcat(__tracy_gpu_source_location, __LINE__), depth, active };
+# define TracyD3D12NamedZoneCS(ctx, varname, cmdList, name, color, depth, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location, __LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::D3D12ZoneScope varname{ ctx, cmdList, &TracyConcat(__tracy_gpu_source_location, __LINE__), depth, active };
+# define TracyD3D12ZoneTransientS(ctx, varname, cmdList, name, depth, active) tracy::D3D12ZoneScope varname{ ctx, __LINE__, __FILE__, strlen(__FILE__), __FUNCTION__, strlen(__FUNCTION__), name, strlen(name), cmdList, depth, active };
+#else
+# define TracyD3D12ZoneS(ctx, cmdList, name, depth) TracyD3D12Zone(ctx, cmdList, name)
+# define TracyD3D12ZoneCS(ctx, cmdList, name, color, depth) TracyD3D12Zone(ctx, cmdList, name, color)
+# define TracyD3D12NamedZoneS(ctx, varname, cmdList, name, depth, active) TracyD3D12NamedZone(ctx, varname, cmdList, name, active)
+# define TracyD3D12NamedZoneCS(ctx, varname, cmdList, name, color, depth, active) TracyD3D12NamedZoneC(ctx, varname, cmdList, name, color, active)
+# define TracyD3D12ZoneTransientS(ctx, varname, cmdList, name, depth, active) TracyD3D12ZoneTransient(ctx, varname, cmdList, name, active)
+#endif
+
+#define TracyD3D12Collect(ctx) ctx->Collect();
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/TracyLua.hpp b/3rdparty/tracy/tracy/TracyLua.hpp
new file mode 100644
index 0000000..c9a6431
--- /dev/null
+++ b/3rdparty/tracy/tracy/TracyLua.hpp
@@ -0,0 +1,431 @@
+#ifndef __TRACYLUA_HPP__
+#define __TRACYLUA_HPP__
+
+// Include this file after you include lua headers.
+
+#ifndef TRACY_ENABLE
+
+#include <string.h>
+
+namespace tracy
+{
+
+namespace detail
+{
+static inline int noop( lua_State* L ) { return 0; }
+}
+
+static inline void LuaRegister( lua_State* L )
+{
+ lua_newtable( L );
+ lua_pushcfunction( L, detail::noop );
+ lua_setfield( L, -2, "ZoneBegin" );
+ lua_pushcfunction( L, detail::noop );
+ lua_setfield( L, -2, "ZoneBeginN" );
+ lua_pushcfunction( L, detail::noop );
+ lua_setfield( L, -2, "ZoneBeginS" );
+ lua_pushcfunction( L, detail::noop );
+ lua_setfield( L, -2, "ZoneBeginNS" );
+ lua_pushcfunction( L, detail::noop );
+ lua_setfield( L, -2, "ZoneEnd" );
+ lua_pushcfunction( L, detail::noop );
+ lua_setfield( L, -2, "ZoneText" );
+ lua_pushcfunction( L, detail::noop );
+ lua_setfield( L, -2, "ZoneName" );
+ lua_pushcfunction( L, detail::noop );
+ lua_setfield( L, -2, "Message" );
+ lua_setglobal( L, "tracy" );
+}
+
+static inline char* FindEnd( char* ptr )
+{
+ unsigned int cnt = 1;
+ while( cnt != 0 )
+ {
+ if( *ptr == '(' ) cnt++;
+ else if( *ptr == ')' ) cnt--;
+ ptr++;
+ }
+ return ptr;
+}
+
+static inline void LuaRemove( char* script )
+{
+ while( *script )
+ {
+ if( strncmp( script, "tracy.", 6 ) == 0 )
+ {
+ if( strncmp( script + 6, "Zone", 4 ) == 0 )
+ {
+ if( strncmp( script + 10, "End()", 5 ) == 0 )
+ {
+ memset( script, ' ', 15 );
+ script += 15;
+ }
+ else if( strncmp( script + 10, "Begin()", 7 ) == 0 )
+ {
+ memset( script, ' ', 17 );
+ script += 17;
+ }
+ else if( strncmp( script + 10, "Text(", 5 ) == 0 )
+ {
+ auto end = FindEnd( script + 15 );
+ memset( script, ' ', end - script );
+ script = end;
+ }
+ else if( strncmp( script + 10, "Name(", 5 ) == 0 )
+ {
+ auto end = FindEnd( script + 15 );
+ memset( script, ' ', end - script );
+ script = end;
+ }
+ else if( strncmp( script + 10, "BeginN(", 7 ) == 0 )
+ {
+ auto end = FindEnd( script + 17 );
+ memset( script, ' ', end - script );
+ script = end;
+ }
+ else if( strncmp( script + 10, "BeginS(", 7 ) == 0 )
+ {
+ auto end = FindEnd( script + 17 );
+ memset( script, ' ', end - script );
+ script = end;
+ }
+ else if( strncmp( script + 10, "BeginNS(", 8 ) == 0 )
+ {
+ auto end = FindEnd( script + 18 );
+ memset( script, ' ', end - script );
+ script = end;
+ }
+ else
+ {
+ script += 10;
+ }
+ }
+ else if( strncmp( script + 6, "Message(", 8 ) == 0 )
+ {
+ auto end = FindEnd( script + 14 );
+ memset( script, ' ', end - script );
+ script = end;
+ }
+ else
+ {
+ script += 6;
+ }
+ }
+ else
+ {
+ script++;
+ }
+ }
+}
+
+}
+
+#else
+
+#include <assert.h>
+#include <limits>
+
+#include "common/TracyColor.hpp"
+#include "common/TracyAlign.hpp"
+#include "common/TracyForceInline.hpp"
+#include "common/TracySystem.hpp"
+#include "client/TracyProfiler.hpp"
+
+namespace tracy
+{
+
+#ifdef TRACY_ON_DEMAND
+TRACY_API LuaZoneState& GetLuaZoneState();
+#endif
+
+namespace detail
+{
+
+#ifdef TRACY_HAS_CALLSTACK
+static tracy_force_inline void SendLuaCallstack( lua_State* L, uint32_t depth )
+{
+ assert( depth <= 64 );
+ lua_Debug dbg[64];
+ const char* func[64];
+ uint32_t fsz[64];
+ uint32_t ssz[64];
+
+ uint8_t cnt;
+ uint16_t spaceNeeded = sizeof( cnt );
+ for( cnt=0; cnt<depth; cnt++ )
+ {
+ if( lua_getstack( L, cnt+1, dbg+cnt ) == 0 ) break;
+ lua_getinfo( L, "Snl", dbg+cnt );
+ func[cnt] = dbg[cnt].name ? dbg[cnt].name : dbg[cnt].short_src;
+ fsz[cnt] = uint32_t( strlen( func[cnt] ) );
+ ssz[cnt] = uint32_t( strlen( dbg[cnt].source ) );
+ spaceNeeded += fsz[cnt] + ssz[cnt];
+ }
+ spaceNeeded += cnt * ( 4 + 2 + 2 ); // source line, function string length, source string length
+
+ auto ptr = (char*)tracy_malloc( spaceNeeded + 2 );
+ auto dst = ptr;
+ memcpy( dst, &spaceNeeded, 2 ); dst += 2;
+ memcpy( dst, &cnt, 1 ); dst++;
+ for( uint8_t i=0; i<cnt; i++ )
+ {
+ const uint32_t line = dbg[i].currentline;
+ memcpy( dst, &line, 4 ); dst += 4;
+ assert( fsz[i] <= std::numeric_limits<uint16_t>::max() );
+ memcpy( dst, fsz+i, 2 ); dst += 2;
+ memcpy( dst, func[i], fsz[i] ); dst += fsz[i];
+ assert( ssz[i] <= std::numeric_limits<uint16_t>::max() );
+ memcpy( dst, ssz+i, 2 ); dst += 2;
+ memcpy( dst, dbg[i].source, ssz[i] ), dst += ssz[i];
+ }
+ assert( dst - ptr == spaceNeeded + 2 );
+
+ TracyQueuePrepare( QueueType::CallstackAlloc );
+ MemWrite( &item->callstackAllocFat.ptr, (uint64_t)ptr );
+ MemWrite( &item->callstackAllocFat.nativePtr, (uint64_t)Callstack( depth ) );
+ TracyQueueCommit( callstackAllocFatThread );
+}
+
+static inline int LuaZoneBeginS( lua_State* L )
+{
+#ifdef TRACY_ON_DEMAND
+ const auto zoneCnt = GetLuaZoneState().counter++;
+ if( zoneCnt != 0 && !GetLuaZoneState().active ) return 0;
+ GetLuaZoneState().active = GetProfiler().IsConnected();
+ if( !GetLuaZoneState().active ) return 0;
+#endif
+
+#ifdef TRACY_CALLSTACK
+ const uint32_t depth = TRACY_CALLSTACK;
+#else
+ const auto depth = uint32_t( lua_tointeger( L, 1 ) );
+#endif
+ SendLuaCallstack( L, depth );
+
+ lua_Debug dbg;
+ lua_getstack( L, 1, &dbg );
+ lua_getinfo( L, "Snl", &dbg );
+ const auto srcloc = Profiler::AllocSourceLocation( dbg.currentline, dbg.source, dbg.name ? dbg.name : dbg.short_src );
+
+ TracyQueuePrepare( QueueType::ZoneBeginAllocSrcLocCallstack );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, srcloc );
+ TracyQueueCommit( zoneBeginThread );
+
+ return 0;
+}
+
+static inline int LuaZoneBeginNS( lua_State* L )
+{
+#ifdef TRACY_ON_DEMAND
+ const auto zoneCnt = GetLuaZoneState().counter++;
+ if( zoneCnt != 0 && !GetLuaZoneState().active ) return 0;
+ GetLuaZoneState().active = GetProfiler().IsConnected();
+ if( !GetLuaZoneState().active ) return 0;
+#endif
+
+#ifdef TRACY_CALLSTACK
+ const uint32_t depth = TRACY_CALLSTACK;
+#else
+ const auto depth = uint32_t( lua_tointeger( L, 2 ) );
+#endif
+ SendLuaCallstack( L, depth );
+
+ lua_Debug dbg;
+ lua_getstack( L, 1, &dbg );
+ lua_getinfo( L, "Snl", &dbg );
+ size_t nsz;
+ const auto name = lua_tolstring( L, 1, &nsz );
+ const auto srcloc = Profiler::AllocSourceLocation( dbg.currentline, dbg.source, dbg.name ? dbg.name : dbg.short_src, name, nsz );
+
+ TracyQueuePrepare( QueueType::ZoneBeginAllocSrcLocCallstack );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, srcloc );
+ TracyQueueCommit( zoneBeginThread );
+
+ return 0;
+}
+#endif
+
+static inline int LuaZoneBegin( lua_State* L )
+{
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+ return LuaZoneBeginS( L );
+#else
+#ifdef TRACY_ON_DEMAND
+ const auto zoneCnt = GetLuaZoneState().counter++;
+ if( zoneCnt != 0 && !GetLuaZoneState().active ) return 0;
+ GetLuaZoneState().active = GetProfiler().IsConnected();
+ if( !GetLuaZoneState().active ) return 0;
+#endif
+
+ lua_Debug dbg;
+ lua_getstack( L, 1, &dbg );
+ lua_getinfo( L, "Snl", &dbg );
+ const auto srcloc = Profiler::AllocSourceLocation( dbg.currentline, dbg.source, dbg.name ? dbg.name : dbg.short_src );
+
+ TracyQueuePrepare( QueueType::ZoneBeginAllocSrcLoc );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, srcloc );
+ TracyQueueCommit( zoneBeginThread );
+ return 0;
+#endif
+}
+
+static inline int LuaZoneBeginN( lua_State* L )
+{
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+ return LuaZoneBeginNS( L );
+#else
+#ifdef TRACY_ON_DEMAND
+ const auto zoneCnt = GetLuaZoneState().counter++;
+ if( zoneCnt != 0 && !GetLuaZoneState().active ) return 0;
+ GetLuaZoneState().active = GetProfiler().IsConnected();
+ if( !GetLuaZoneState().active ) return 0;
+#endif
+
+ lua_Debug dbg;
+ lua_getstack( L, 1, &dbg );
+ lua_getinfo( L, "Snl", &dbg );
+ size_t nsz;
+ const auto name = lua_tolstring( L, 1, &nsz );
+ const auto srcloc = Profiler::AllocSourceLocation( dbg.currentline, dbg.source, dbg.name ? dbg.name : dbg.short_src, name, nsz );
+
+ TracyQueuePrepare( QueueType::ZoneBeginAllocSrcLoc );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, srcloc );
+ TracyQueueCommit( zoneBeginThread );
+ return 0;
+#endif
+}
+
+static inline int LuaZoneEnd( lua_State* L )
+{
+#ifdef TRACY_ON_DEMAND
+ assert( GetLuaZoneState().counter != 0 );
+ GetLuaZoneState().counter--;
+ if( !GetLuaZoneState().active ) return 0;
+ if( !GetProfiler().IsConnected() )
+ {
+ GetLuaZoneState().active = false;
+ return 0;
+ }
+#endif
+
+ TracyQueuePrepare( QueueType::ZoneEnd );
+ MemWrite( &item->zoneEnd.time, Profiler::GetTime() );
+ TracyQueueCommit( zoneEndThread );
+ return 0;
+}
+
+static inline int LuaZoneText( lua_State* L )
+{
+#ifdef TRACY_ON_DEMAND
+ if( !GetLuaZoneState().active ) return 0;
+ if( !GetProfiler().IsConnected() )
+ {
+ GetLuaZoneState().active = false;
+ return 0;
+ }
+#endif
+
+ auto txt = lua_tostring( L, 1 );
+ const auto size = strlen( txt );
+ assert( size < std::numeric_limits<uint16_t>::max() );
+
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, txt, size );
+
+ TracyQueuePrepare( QueueType::ZoneText );
+ MemWrite( &item->zoneTextFat.text, (uint64_t)ptr );
+ MemWrite( &item->zoneTextFat.size, (uint16_t)size );
+ TracyQueueCommit( zoneTextFatThread );
+ return 0;
+}
+
+static inline int LuaZoneName( lua_State* L )
+{
+#ifdef TRACY_ON_DEMAND
+ if( !GetLuaZoneState().active ) return 0;
+ if( !GetProfiler().IsConnected() )
+ {
+ GetLuaZoneState().active = false;
+ return 0;
+ }
+#endif
+
+ auto txt = lua_tostring( L, 1 );
+ const auto size = strlen( txt );
+ assert( size < std::numeric_limits<uint16_t>::max() );
+
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, txt, size );
+
+ TracyQueuePrepare( QueueType::ZoneName );
+ MemWrite( &item->zoneTextFat.text, (uint64_t)ptr );
+ MemWrite( &item->zoneTextFat.size, (uint16_t)size );
+ TracyQueueCommit( zoneTextFatThread );
+ return 0;
+}
+
+static inline int LuaMessage( lua_State* L )
+{
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return 0;
+#endif
+
+ auto txt = lua_tostring( L, 1 );
+ const auto size = strlen( txt );
+ assert( size < std::numeric_limits<uint16_t>::max() );
+
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, txt, size );
+
+ TracyQueuePrepare( QueueType::Message );
+ MemWrite( &item->messageFat.time, Profiler::GetTime() );
+ MemWrite( &item->messageFat.text, (uint64_t)ptr );
+ MemWrite( &item->messageFat.size, (uint16_t)size );
+ TracyQueueCommit( messageFatThread );
+ return 0;
+}
+
+}
+
+static inline void LuaRegister( lua_State* L )
+{
+ lua_newtable( L );
+ lua_pushcfunction( L, detail::LuaZoneBegin );
+ lua_setfield( L, -2, "ZoneBegin" );
+ lua_pushcfunction( L, detail::LuaZoneBeginN );
+ lua_setfield( L, -2, "ZoneBeginN" );
+#ifdef TRACY_HAS_CALLSTACK
+ lua_pushcfunction( L, detail::LuaZoneBeginS );
+ lua_setfield( L, -2, "ZoneBeginS" );
+ lua_pushcfunction( L, detail::LuaZoneBeginNS );
+ lua_setfield( L, -2, "ZoneBeginNS" );
+#else
+ lua_pushcfunction( L, detail::LuaZoneBegin );
+ lua_setfield( L, -2, "ZoneBeginS" );
+ lua_pushcfunction( L, detail::LuaZoneBeginN );
+ lua_setfield( L, -2, "ZoneBeginNS" );
+#endif
+ lua_pushcfunction( L, detail::LuaZoneEnd );
+ lua_setfield( L, -2, "ZoneEnd" );
+ lua_pushcfunction( L, detail::LuaZoneText );
+ lua_setfield( L, -2, "ZoneText" );
+ lua_pushcfunction( L, detail::LuaZoneName );
+ lua_setfield( L, -2, "ZoneName" );
+ lua_pushcfunction( L, detail::LuaMessage );
+ lua_setfield( L, -2, "Message" );
+ lua_setglobal( L, "tracy" );
+}
+
+static inline void LuaRemove( char* script ) {}
+
+}
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/TracyOpenCL.hpp b/3rdparty/tracy/tracy/TracyOpenCL.hpp
new file mode 100644
index 0000000..3891e8a
--- /dev/null
+++ b/3rdparty/tracy/tracy/TracyOpenCL.hpp
@@ -0,0 +1,414 @@
+#ifndef __TRACYOPENCL_HPP__
+#define __TRACYOPENCL_HPP__
+
+#if !defined TRACY_ENABLE
+
+#define TracyCLContext(c, x) nullptr
+#define TracyCLDestroy(c)
+#define TracyCLContextName(c, x, y)
+
+#define TracyCLNamedZone(c, x, y, z)
+#define TracyCLNamedZoneC(c, x, y, z, w)
+#define TracyCLZone(c, x)
+#define TracyCLZoneC(c, x, y)
+#define TracyCLZoneTransient(c,x,y,z)
+
+#define TracyCLNamedZoneS(c, x, y, z, w)
+#define TracyCLNamedZoneCS(c, x, y, z, w, v)
+#define TracyCLZoneS(c, x, y)
+#define TracyCLZoneCS(c, x, y, z)
+#define TracyCLZoneTransientS(c,x,y,z,w)
+
+#define TracyCLNamedZoneSetEvent(x, e)
+#define TracyCLZoneSetEvent(e)
+
+#define TracyCLCollect(c)
+
+namespace tracy
+{
+ class OpenCLCtxScope {};
+}
+
+using TracyCLCtx = void*;
+
+#else
+
+#include <CL/cl.h>
+
+#include <atomic>
+#include <cassert>
+#include <sstream>
+
+#include "Tracy.hpp"
+#include "client/TracyCallstack.hpp"
+#include "client/TracyProfiler.hpp"
+#include "common/TracyAlloc.hpp"
+
+#define TRACY_CL_TO_STRING_INDIRECT(T) #T
+#define TRACY_CL_TO_STRING(T) TRACY_CL_TO_STRING_INDIRECT(T)
+#define TRACY_CL_ASSERT(p) if(!(p)) { \
+ TracyMessageL( "TRACY_CL_ASSERT failed on " __FILE__ ":" TRACY_CL_TO_STRING(__LINE__) ); \
+ assert(false && "TRACY_CL_ASSERT failed"); \
+}
+#define TRACY_CL_CHECK_ERROR(err) if(err != CL_SUCCESS) { \
+ std::ostringstream oss; \
+ oss << "TRACY_CL_CHECK_ERROR failed on " << __FILE__ << ":" << __LINE__ \
+ << ": error code " << err; \
+ auto msg = oss.str(); \
+ TracyMessage(msg.data(), msg.size()); \
+ assert(false && "TRACY_CL_CHECK_ERROR failed"); \
+}
+
+namespace tracy {
+
+ enum class EventPhase : uint8_t
+ {
+ Begin,
+ End
+ };
+
+ struct EventInfo
+ {
+ cl_event event;
+ EventPhase phase;
+ };
+
+ class OpenCLCtx
+ {
+ public:
+ enum { QueryCount = 64 * 1024 };
+
+ OpenCLCtx(cl_context context, cl_device_id device)
+ : m_contextId(GetGpuCtxCounter().fetch_add(1, std::memory_order_relaxed))
+ , m_head(0)
+ , m_tail(0)
+ {
+ int64_t tcpu, tgpu;
+ TRACY_CL_ASSERT(m_contextId != 255);
+
+ cl_int err = CL_SUCCESS;
+ cl_command_queue queue = clCreateCommandQueue(context, device, CL_QUEUE_PROFILING_ENABLE, &err);
+ TRACY_CL_CHECK_ERROR(err)
+ uint32_t dummyValue = 42;
+ cl_mem dummyBuffer = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(uint32_t), nullptr, &err);
+ TRACY_CL_CHECK_ERROR(err)
+ cl_event writeBufferEvent;
+ TRACY_CL_CHECK_ERROR(clEnqueueWriteBuffer(queue, dummyBuffer, CL_FALSE, 0, sizeof(uint32_t), &dummyValue, 0, nullptr, &writeBufferEvent));
+ TRACY_CL_CHECK_ERROR(clWaitForEvents(1, &writeBufferEvent));
+
+ tcpu = Profiler::GetTime();
+
+ cl_int eventStatus;
+ TRACY_CL_CHECK_ERROR(clGetEventInfo(writeBufferEvent, CL_EVENT_COMMAND_EXECUTION_STATUS, sizeof(cl_int), &eventStatus, nullptr));
+ TRACY_CL_ASSERT(eventStatus == CL_COMPLETE);
+ TRACY_CL_CHECK_ERROR(clGetEventProfilingInfo(writeBufferEvent, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &tgpu, nullptr));
+ TRACY_CL_CHECK_ERROR(clReleaseEvent(writeBufferEvent));
+ TRACY_CL_CHECK_ERROR(clReleaseMemObject(dummyBuffer));
+ TRACY_CL_CHECK_ERROR(clReleaseCommandQueue(queue));
+
+ auto item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuNewContext);
+ MemWrite(&item->gpuNewContext.cpuTime, tcpu);
+ MemWrite(&item->gpuNewContext.gpuTime, tgpu);
+ memset(&item->gpuNewContext.thread, 0, sizeof(item->gpuNewContext.thread));
+ MemWrite(&item->gpuNewContext.period, 1.0f);
+ MemWrite(&item->gpuNewContext.type, GpuContextType::OpenCL);
+ MemWrite(&item->gpuNewContext.context, (uint8_t) m_contextId);
+ MemWrite(&item->gpuNewContext.flags, (uint8_t)0);
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem(*item);
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ void Name( const char* name, uint16_t len )
+ {
+ auto ptr = (char*)tracy_malloc( len );
+ memcpy( ptr, name, len );
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuContextName );
+ MemWrite( &item->gpuContextNameFat.context, (uint8_t)m_contextId );
+ MemWrite( &item->gpuContextNameFat.ptr, (uint64_t)ptr );
+ MemWrite( &item->gpuContextNameFat.size, len );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ void Collect()
+ {
+ ZoneScopedC(Color::Red4);
+
+ if (m_tail == m_head) return;
+
+#ifdef TRACY_ON_DEMAND
+ if (!GetProfiler().IsConnected())
+ {
+ m_head = m_tail = 0;
+ }
+#endif
+
+ for (; m_tail != m_head; m_tail = (m_tail + 1) % QueryCount)
+ {
+ EventInfo eventInfo = GetQuery(m_tail);
+ cl_int eventStatus;
+ cl_int err = clGetEventInfo(eventInfo.event, CL_EVENT_COMMAND_EXECUTION_STATUS, sizeof(cl_int), &eventStatus, nullptr);
+ if (err != CL_SUCCESS)
+ {
+ std::ostringstream oss;
+ oss << "clGetEventInfo falied with error code " << err << ", on event " << eventInfo.event << ", skipping...";
+ auto msg = oss.str();
+ TracyMessage(msg.data(), msg.size());
+ if (eventInfo.event == nullptr) {
+ TracyMessageL("A TracyCLZone must be paird with a TracyCLZoneSetEvent, check your code!");
+ }
+ assert(false && "clGetEventInfo failed, maybe a TracyCLZone is not paired with TracyCLZoneSetEvent");
+ continue;
+ }
+ if (eventStatus != CL_COMPLETE) return;
+
+ cl_int eventInfoQuery = (eventInfo.phase == EventPhase::Begin)
+ ? CL_PROFILING_COMMAND_START
+ : CL_PROFILING_COMMAND_END;
+
+ cl_ulong eventTimeStamp = 0;
+ err = clGetEventProfilingInfo(eventInfo.event, eventInfoQuery, sizeof(cl_ulong), &eventTimeStamp, nullptr);
+ if (err == CL_PROFILING_INFO_NOT_AVAILABLE)
+ {
+ TracyMessageL("command queue is not created with CL_QUEUE_PROFILING_ENABLE flag, check your code!");
+ assert(false && "command queue is not created with CL_QUEUE_PROFILING_ENABLE flag");
+ }
+ else
+ TRACY_CL_CHECK_ERROR(err);
+
+ TRACY_CL_ASSERT(eventTimeStamp != 0);
+
+ auto item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuTime);
+ MemWrite(&item->gpuTime.gpuTime, (int64_t)eventTimeStamp);
+ MemWrite(&item->gpuTime.queryId, (uint16_t)m_tail);
+ MemWrite(&item->gpuTime.context, m_contextId);
+ Profiler::QueueSerialFinish();
+
+ if (eventInfo.phase == EventPhase::End)
+ {
+ // Done with the event, so release it
+ TRACY_CL_CHECK_ERROR(clReleaseEvent(eventInfo.event));
+ }
+ }
+ }
+
+ tracy_force_inline uint8_t GetId() const
+ {
+ return m_contextId;
+ }
+
+ tracy_force_inline unsigned int NextQueryId(EventInfo eventInfo)
+ {
+ const auto id = m_head;
+ m_head = (m_head + 1) % QueryCount;
+ TRACY_CL_ASSERT(m_head != m_tail);
+ m_query[id] = eventInfo;
+ return id;
+ }
+
+ tracy_force_inline EventInfo& GetQuery(unsigned int id)
+ {
+ TRACY_CL_ASSERT(id < QueryCount);
+ return m_query[id];
+ }
+
+ private:
+
+ unsigned int m_contextId;
+
+ EventInfo m_query[QueryCount];
+ unsigned int m_head; // index at which a new event should be inserted
+ unsigned int m_tail; // oldest event
+
+ };
+
+ class OpenCLCtxScope {
+ public:
+ tracy_force_inline OpenCLCtxScope(OpenCLCtx* ctx, const SourceLocationData* srcLoc, bool is_active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(is_active&& GetProfiler().IsConnected())
+#else
+ : m_active(is_active)
+#endif
+ , m_ctx(ctx)
+ , m_event(nullptr)
+ {
+ if (!m_active) return;
+
+ m_beginQueryId = ctx->NextQueryId(EventInfo{ nullptr, EventPhase::Begin });
+
+ auto item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, (uint64_t)srcLoc);
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, (uint16_t)m_beginQueryId);
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline OpenCLCtxScope(OpenCLCtx* ctx, const SourceLocationData* srcLoc, int depth, bool is_active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(is_active&& GetProfiler().IsConnected())
+#else
+ : m_active(is_active)
+#endif
+ , m_ctx(ctx)
+ , m_event(nullptr)
+ {
+ if (!m_active) return;
+
+ m_beginQueryId = ctx->NextQueryId(EventInfo{ nullptr, EventPhase::Begin });
+
+ GetProfiler().SendCallstack(depth);
+
+ auto item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginCallstackSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, (uint64_t)srcLoc);
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, (uint16_t)m_beginQueryId);
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline OpenCLCtxScope(OpenCLCtx* ctx, uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, bool is_active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(is_active && GetProfiler().IsConnected())
+#else
+ : m_active(is_active)
+#endif
+ , m_ctx(ctx)
+ , m_event(nullptr)
+ {
+ if (!m_active) return;
+
+ m_beginQueryId = ctx->NextQueryId(EventInfo{ nullptr, EventPhase::Begin });
+
+ const auto srcloc = Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuZoneBeginAllocSrcLocSerial );
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, srcloc);
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, (uint16_t)m_beginQueryId);
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline OpenCLCtxScope(OpenCLCtx* ctx, uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, int depth, bool is_active)
+#ifdef TRACY_ON_DEMAND
+ : m_active(is_active && GetProfiler().IsConnected())
+#else
+ : m_active(is_active)
+#endif
+ , m_ctx(ctx)
+ , m_event(nullptr)
+ {
+ if (!m_active) return;
+
+ m_beginQueryId = ctx->NextQueryId(EventInfo{ nullptr, EventPhase::Begin });
+
+ const auto srcloc = Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+ auto item = Profiler::QueueSerialCallstack( Callstack( depth ) );
+ MemWrite(&item->hdr.type, QueueType::GpuZoneBeginAllocSrcLocCallstackSerial);
+ MemWrite(&item->gpuZoneBegin.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneBegin.srcloc, srcloc);
+ MemWrite(&item->gpuZoneBegin.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneBegin.queryId, (uint16_t)m_beginQueryId);
+ MemWrite(&item->gpuZoneBegin.context, ctx->GetId());
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void SetEvent(cl_event event)
+ {
+ if (!m_active) return;
+ m_event = event;
+ TRACY_CL_CHECK_ERROR(clRetainEvent(m_event));
+ m_ctx->GetQuery(m_beginQueryId).event = m_event;
+ }
+
+ tracy_force_inline ~OpenCLCtxScope()
+ {
+ if (!m_active) return;
+ const auto queryId = m_ctx->NextQueryId(EventInfo{ m_event, EventPhase::End });
+
+ auto item = Profiler::QueueSerial();
+ MemWrite(&item->hdr.type, QueueType::GpuZoneEndSerial);
+ MemWrite(&item->gpuZoneEnd.cpuTime, Profiler::GetTime());
+ MemWrite(&item->gpuZoneEnd.thread, GetThreadHandle());
+ MemWrite(&item->gpuZoneEnd.queryId, (uint16_t)queryId);
+ MemWrite(&item->gpuZoneEnd.context, m_ctx->GetId());
+ Profiler::QueueSerialFinish();
+ }
+
+ const bool m_active;
+ OpenCLCtx* m_ctx;
+ cl_event m_event;
+ unsigned int m_beginQueryId;
+ };
+
+ static inline OpenCLCtx* CreateCLContext(cl_context context, cl_device_id device)
+ {
+ auto ctx = (OpenCLCtx*)tracy_malloc(sizeof(OpenCLCtx));
+ new (ctx) OpenCLCtx(context, device);
+ return ctx;
+ }
+
+ static inline void DestroyCLContext(OpenCLCtx* ctx)
+ {
+ ctx->~OpenCLCtx();
+ tracy_free(ctx);
+ }
+
+} // namespace tracy
+
+using TracyCLCtx = tracy::OpenCLCtx*;
+
+#define TracyCLContext(context, device) tracy::CreateCLContext(context, device);
+#define TracyCLDestroy(ctx) tracy::DestroyCLContext(ctx);
+#define TracyCLContextName(context, name, size) ctx->Name(name, size);
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define TracyCLNamedZone(ctx, varname, name, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::OpenCLCtxScope varname(ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyCLNamedZoneC(ctx, varname, name, color, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::OpenCLCtxScope varname(ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyCLZone(ctx, name) TracyCLNamedZoneS(ctx, __tracy_gpu_zone, name, TRACY_CALLSTACK, true)
+# define TracyCLZoneC(ctx, name, color) TracyCLNamedZoneCS(ctx, __tracy_gpu_zone, name, color, TRACY_CALLSTACK, true)
+# define TracyCLZoneTransient( ctx, varname, name, active ) tracy::OpenCLCtxScope varname( ctx, __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), TRACY_CALLSTACK, active );
+#else
+# define TracyCLNamedZone(ctx, varname, name, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__){ name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::OpenCLCtxScope varname(ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), active);
+# define TracyCLNamedZoneC(ctx, varname, name, color, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__){ name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::OpenCLCtxScope varname(ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), active);
+# define TracyCLZone(ctx, name) TracyCLNamedZone(ctx, __tracy_gpu_zone, name, true)
+# define TracyCLZoneC(ctx, name, color) TracyCLNamedZoneC(ctx, __tracy_gpu_zone, name, color, true )
+# define TracyCLZoneTransient( ctx, varname, name, active ) tracy::OpenCLCtxScope varname( ctx, __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), active );
+#endif
+
+#ifdef TRACY_HAS_CALLSTACK
+# define TracyCLNamedZoneS(ctx, varname, name, depth, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__){ name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::OpenCLCtxScope varname(ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), depth, active);
+# define TracyCLNamedZoneCS(ctx, varname, name, color, depth, active) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__){ name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::OpenCLCtxScope varname(ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), depth, active);
+# define TracyCLZoneS(ctx, name, depth) TracyCLNamedZoneS(ctx, __tracy_gpu_zone, name, depth, true)
+# define TracyCLZoneCS(ctx, name, color, depth) TracyCLNamedZoneCS(ctx, __tracy_gpu_zone, name, color, depth, true)
+# define TracyCLZoneTransientS( ctx, varname, name, depth, active ) tracy::OpenCLCtxScope varname( ctx, __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), depth, active );
+#else
+# define TracyCLNamedZoneS(ctx, varname, name, depth, active) TracyCLNamedZone(ctx, varname, name, active)
+# define TracyCLNamedZoneCS(ctx, varname, name, color, depth, active) TracyCLNamedZoneC(ctx, varname, name, color, active)
+# define TracyCLZoneS(ctx, name, depth) TracyCLZone(ctx, name)
+# define TracyCLZoneCS(ctx, name, color, depth) TracyCLZoneC(ctx, name, color)
+# define TracyCLZoneTransientS( ctx, varname, name, depth, active ) TracyCLZoneTransient( ctx, varname, name, active )
+#endif
+
+#define TracyCLNamedZoneSetEvent(varname, event) varname.SetEvent(event)
+#define TracyCLZoneSetEvent(event) __tracy_gpu_zone.SetEvent(event)
+
+#define TracyCLCollect(ctx) ctx->Collect()
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/TracyOpenGL.hpp b/3rdparty/tracy/tracy/TracyOpenGL.hpp
new file mode 100644
index 0000000..d540422
--- /dev/null
+++ b/3rdparty/tracy/tracy/TracyOpenGL.hpp
@@ -0,0 +1,325 @@
+#ifndef __TRACYOPENGL_HPP__
+#define __TRACYOPENGL_HPP__
+
+#if !defined TRACY_ENABLE || defined __APPLE__
+
+#define TracyGpuContext
+#define TracyGpuContextName(x,y)
+#define TracyGpuNamedZone(x,y,z)
+#define TracyGpuNamedZoneC(x,y,z,w)
+#define TracyGpuZone(x)
+#define TracyGpuZoneC(x,y)
+#define TracyGpuZoneTransient(x,y,z)
+#define TracyGpuCollect
+
+#define TracyGpuNamedZoneS(x,y,z,w)
+#define TracyGpuNamedZoneCS(x,y,z,w,a)
+#define TracyGpuZoneS(x,y)
+#define TracyGpuZoneCS(x,y,z)
+#define TracyGpuZoneTransientS(x,y,z,w)
+
+namespace tracy
+{
+struct SourceLocationData;
+class GpuCtxScope
+{
+public:
+ GpuCtxScope( const SourceLocationData*, bool ) {}
+ GpuCtxScope( const SourceLocationData*, int, bool ) {}
+};
+}
+
+#else
+
+#include <atomic>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "Tracy.hpp"
+#include "client/TracyProfiler.hpp"
+#include "client/TracyCallstack.hpp"
+#include "common/TracyAlign.hpp"
+#include "common/TracyAlloc.hpp"
+
+#if !defined GL_TIMESTAMP && defined GL_TIMESTAMP_EXT
+# define GL_TIMESTAMP GL_TIMESTAMP_EXT
+# define GL_QUERY_COUNTER_BITS GL_QUERY_COUNTER_BITS_EXT
+# define glGetQueryObjectiv glGetQueryObjectivEXT
+# define glGetQueryObjectui64v glGetQueryObjectui64vEXT
+# define glQueryCounter glQueryCounterEXT
+#endif
+
+#define TracyGpuContext tracy::GetGpuCtx().ptr = (tracy::GpuCtx*)tracy::tracy_malloc( sizeof( tracy::GpuCtx ) ); new(tracy::GetGpuCtx().ptr) tracy::GpuCtx;
+#define TracyGpuContextName( name, size ) tracy::GetGpuCtx().ptr->Name( name, size );
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define TracyGpuNamedZone( varname, name, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::GpuCtxScope varname( &TracyConcat(__tracy_gpu_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyGpuNamedZoneC( varname, name, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::GpuCtxScope varname( &TracyConcat(__tracy_gpu_source_location,__LINE__), TRACY_CALLSTACK, active );
+# define TracyGpuZone( name ) TracyGpuNamedZoneS( ___tracy_gpu_zone, name, TRACY_CALLSTACK, true )
+# define TracyGpuZoneC( name, color ) TracyGpuNamedZoneCS( ___tracy_gpu_zone, name, color, TRACY_CALLSTACK, true )
+# define TracyGpuZoneTransient( varname, name, active ) tracy::GpuCtxScope varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), TRACY_CALLSTACK, active );
+#else
+# define TracyGpuNamedZone( varname, name, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::GpuCtxScope varname( &TracyConcat(__tracy_gpu_source_location,__LINE__), active );
+# define TracyGpuNamedZoneC( varname, name, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::GpuCtxScope varname( &TracyConcat(__tracy_gpu_source_location,__LINE__), active );
+# define TracyGpuZone( name ) TracyGpuNamedZone( ___tracy_gpu_zone, name, true )
+# define TracyGpuZoneC( name, color ) TracyGpuNamedZoneC( ___tracy_gpu_zone, name, color, true )
+# define TracyGpuZoneTransient( varname, name, active ) tracy::GpuCtxScope varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), active );
+#endif
+#define TracyGpuCollect tracy::GetGpuCtx().ptr->Collect();
+
+#ifdef TRACY_HAS_CALLSTACK
+# define TracyGpuNamedZoneS( varname, name, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::GpuCtxScope varname( &TracyConcat(__tracy_gpu_source_location,__LINE__), depth, active );
+# define TracyGpuNamedZoneCS( varname, name, color, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::GpuCtxScope varname( &TracyConcat(__tracy_gpu_source_location,__LINE__), depth, active );
+# define TracyGpuZoneS( name, depth ) TracyGpuNamedZoneS( ___tracy_gpu_zone, name, depth, true )
+# define TracyGpuZoneCS( name, color, depth ) TracyGpuNamedZoneCS( ___tracy_gpu_zone, name, color, depth, true )
+# define TracyGpuZoneTransientS( varname, name, depth, active ) tracy::GpuCtxScope varname( __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), depth, active );
+#else
+# define TracyGpuNamedZoneS( varname, name, depth, active ) TracyGpuNamedZone( varname, name, active )
+# define TracyGpuNamedZoneCS( varname, name, color, depth, active ) TracyGpuNamedZoneC( varname, name, color, active )
+# define TracyGpuZoneS( name, depth ) TracyGpuZone( name )
+# define TracyGpuZoneCS( name, color, depth ) TracyGpuZoneC( name, color )
+# define TracyGpuZoneTransientS( varname, name, depth, active ) TracyGpuZoneTransient( varname, name, active )
+#endif
+
+namespace tracy
+{
+
+class GpuCtx
+{
+ friend class GpuCtxScope;
+
+ enum { QueryCount = 64 * 1024 };
+
+public:
+ GpuCtx()
+ : m_context( GetGpuCtxCounter().fetch_add( 1, std::memory_order_relaxed ) )
+ , m_head( 0 )
+ , m_tail( 0 )
+ {
+ assert( m_context != 255 );
+
+ glGenQueries( QueryCount, m_query );
+
+ int64_t tgpu;
+ glGetInteger64v( GL_TIMESTAMP, &tgpu );
+ int64_t tcpu = Profiler::GetTime();
+
+ GLint bits;
+ glGetQueryiv( GL_TIMESTAMP, GL_QUERY_COUNTER_BITS, &bits );
+
+ const float period = 1.f;
+ const auto thread = GetThreadHandle();
+ TracyLfqPrepare( QueueType::GpuNewContext );
+ MemWrite( &item->gpuNewContext.cpuTime, tcpu );
+ MemWrite( &item->gpuNewContext.gpuTime, tgpu );
+ MemWrite( &item->gpuNewContext.thread, thread );
+ MemWrite( &item->gpuNewContext.period, period );
+ MemWrite( &item->gpuNewContext.context, m_context );
+ MemWrite( &item->gpuNewContext.flags, uint8_t( 0 ) );
+ MemWrite( &item->gpuNewContext.type, GpuContextType::OpenGl );
+
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+
+ TracyLfqCommit;
+ }
+
+ void Name( const char* name, uint16_t len )
+ {
+ auto ptr = (char*)tracy_malloc( len );
+ memcpy( ptr, name, len );
+
+ TracyLfqPrepare( QueueType::GpuContextName );
+ MemWrite( &item->gpuContextNameFat.context, m_context );
+ MemWrite( &item->gpuContextNameFat.ptr, (uint64_t)ptr );
+ MemWrite( &item->gpuContextNameFat.size, len );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ TracyLfqCommit;
+ }
+
+ void Collect()
+ {
+ ZoneScopedC( Color::Red4 );
+
+ if( m_tail == m_head ) return;
+
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() )
+ {
+ m_head = m_tail = 0;
+ return;
+ }
+#endif
+
+ while( m_tail != m_head )
+ {
+ GLint available;
+ glGetQueryObjectiv( m_query[m_tail], GL_QUERY_RESULT_AVAILABLE, &available );
+ if( !available ) return;
+
+ uint64_t time;
+ glGetQueryObjectui64v( m_query[m_tail], GL_QUERY_RESULT, &time );
+
+ TracyLfqPrepare( QueueType::GpuTime );
+ MemWrite( &item->gpuTime.gpuTime, (int64_t)time );
+ MemWrite( &item->gpuTime.queryId, (uint16_t)m_tail );
+ MemWrite( &item->gpuTime.context, m_context );
+ TracyLfqCommit;
+
+ m_tail = ( m_tail + 1 ) % QueryCount;
+ }
+ }
+
+private:
+ tracy_force_inline unsigned int NextQueryId()
+ {
+ const auto id = m_head;
+ m_head = ( m_head + 1 ) % QueryCount;
+ assert( m_head != m_tail );
+ return id;
+ }
+
+ tracy_force_inline unsigned int TranslateOpenGlQueryId( unsigned int id )
+ {
+ return m_query[id];
+ }
+
+ tracy_force_inline uint8_t GetId() const
+ {
+ return m_context;
+ }
+
+ unsigned int m_query[QueryCount];
+ uint8_t m_context;
+
+ unsigned int m_head;
+ unsigned int m_tail;
+};
+
+class GpuCtxScope
+{
+public:
+ tracy_force_inline GpuCtxScope( const SourceLocationData* srcloc, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+
+ const auto queryId = GetGpuCtx().ptr->NextQueryId();
+ glQueryCounter( GetGpuCtx().ptr->TranslateOpenGlQueryId( queryId ), GL_TIMESTAMP );
+
+ TracyLfqPrepare( QueueType::GpuZoneBegin );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ memset( &item->gpuZoneBegin.thread, 0, sizeof( item->gpuZoneBegin.thread ) );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, GetGpuCtx().ptr->GetId() );
+ MemWrite( &item->gpuZoneBegin.srcloc, (uint64_t)srcloc );
+ TracyLfqCommit;
+ }
+
+ tracy_force_inline GpuCtxScope( const SourceLocationData* srcloc, int depth, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+
+ const auto queryId = GetGpuCtx().ptr->NextQueryId();
+ glQueryCounter( GetGpuCtx().ptr->TranslateOpenGlQueryId( queryId ), GL_TIMESTAMP );
+
+#ifdef TRACY_FIBERS
+ TracyLfqPrepare( QueueType::GpuZoneBegin );
+ memset( &item->gpuZoneBegin.thread, 0, sizeof( item->gpuZoneBegin.thread ) );
+#else
+ GetProfiler().SendCallstack( depth );
+ TracyLfqPrepare( QueueType::GpuZoneBeginCallstack );
+ MemWrite( &item->gpuZoneBegin.thread, GetThreadHandle() );
+#endif
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, GetGpuCtx().ptr->GetId() );
+ MemWrite( &item->gpuZoneBegin.srcloc, (uint64_t)srcloc );
+ TracyLfqCommit;
+ }
+
+ tracy_force_inline GpuCtxScope( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+
+ const auto queryId = GetGpuCtx().ptr->NextQueryId();
+ glQueryCounter( GetGpuCtx().ptr->TranslateOpenGlQueryId( queryId ), GL_TIMESTAMP );
+
+ TracyLfqPrepare( QueueType::GpuZoneBeginAllocSrcLoc );
+ const auto srcloc = Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ memset( &item->gpuZoneBegin.thread, 0, sizeof( item->gpuZoneBegin.thread ) );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, GetGpuCtx().ptr->GetId() );
+ MemWrite( &item->gpuZoneBegin.srcloc, (uint64_t)srcloc );
+ TracyLfqCommit;
+ }
+
+ tracy_force_inline GpuCtxScope( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, int depth, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+
+ const auto queryId = GetGpuCtx().ptr->NextQueryId();
+ glQueryCounter( GetGpuCtx().ptr->TranslateOpenGlQueryId( queryId ), GL_TIMESTAMP );
+
+#ifdef TRACY_FIBERS
+ TracyLfqPrepare( QueueType::GpuZoneBeginAllocSrcLoc );
+ memset( &item->gpuZoneBegin.thread, 0, sizeof( item->gpuZoneBegin.thread ) );
+#else
+ GetProfiler().SendCallstack( depth );
+ TracyLfqPrepare( QueueType::GpuZoneBeginAllocSrcLocCallstack );
+ MemWrite( &item->gpuZoneBegin.thread, GetThreadHandle() );
+#endif
+ const auto srcloc = Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, GetGpuCtx().ptr->GetId() );
+ MemWrite( &item->gpuZoneBegin.srcloc, (uint64_t)srcloc );
+ TracyLfqCommit;
+ }
+
+ tracy_force_inline ~GpuCtxScope()
+ {
+ if( !m_active ) return;
+
+ const auto queryId = GetGpuCtx().ptr->NextQueryId();
+ glQueryCounter( GetGpuCtx().ptr->TranslateOpenGlQueryId( queryId ), GL_TIMESTAMP );
+
+ TracyLfqPrepare( QueueType::GpuZoneEnd );
+ MemWrite( &item->gpuZoneEnd.cpuTime, Profiler::GetTime() );
+ memset( &item->gpuZoneEnd.thread, 0, sizeof( item->gpuZoneEnd.thread ) );
+ MemWrite( &item->gpuZoneEnd.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneEnd.context, GetGpuCtx().ptr->GetId() );
+ TracyLfqCommit;
+ }
+
+private:
+ const bool m_active;
+};
+
+}
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/TracyVulkan.hpp b/3rdparty/tracy/tracy/TracyVulkan.hpp
new file mode 100644
index 0000000..447f973
--- /dev/null
+++ b/3rdparty/tracy/tracy/TracyVulkan.hpp
@@ -0,0 +1,512 @@
+#ifndef __TRACYVULKAN_HPP__
+#define __TRACYVULKAN_HPP__
+
+#if !defined TRACY_ENABLE
+
+#define TracyVkContext(x,y,z,w) nullptr
+#define TracyVkContextCalibrated(x,y,z,w,a,b) nullptr
+#define TracyVkDestroy(x)
+#define TracyVkContextName(c,x,y)
+#define TracyVkNamedZone(c,x,y,z,w)
+#define TracyVkNamedZoneC(c,x,y,z,w,a)
+#define TracyVkZone(c,x,y)
+#define TracyVkZoneC(c,x,y,z)
+#define TracyVkZoneTransient(c,x,y,z,w)
+#define TracyVkCollect(c,x)
+
+#define TracyVkNamedZoneS(c,x,y,z,w,a)
+#define TracyVkNamedZoneCS(c,x,y,z,w,v,a)
+#define TracyVkZoneS(c,x,y,z)
+#define TracyVkZoneCS(c,x,y,z,w)
+#define TracyVkZoneTransientS(c,x,y,z,w,a)
+
+namespace tracy
+{
+class VkCtxScope {};
+}
+
+using TracyVkCtx = void*;
+
+#else
+
+#if !defined VK_NULL_HANDLE
+# error "You must include Vulkan headers before including TracyVulkan.hpp"
+#endif
+
+#include <assert.h>
+#include <stdlib.h>
+#include "Tracy.hpp"
+#include "client/TracyProfiler.hpp"
+#include "client/TracyCallstack.hpp"
+
+namespace tracy
+{
+
+class VkCtx
+{
+ friend class VkCtxScope;
+
+ enum { QueryCount = 64 * 1024 };
+
+public:
+ VkCtx( VkPhysicalDevice physdev, VkDevice device, VkQueue queue, VkCommandBuffer cmdbuf, PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT _vkGetPhysicalDeviceCalibrateableTimeDomainsEXT, PFN_vkGetCalibratedTimestampsEXT _vkGetCalibratedTimestampsEXT )
+ : m_device( device )
+ , m_timeDomain( VK_TIME_DOMAIN_DEVICE_EXT )
+ , m_context( GetGpuCtxCounter().fetch_add( 1, std::memory_order_relaxed ) )
+ , m_head( 0 )
+ , m_tail( 0 )
+ , m_oldCnt( 0 )
+ , m_queryCount( QueryCount )
+ , m_vkGetCalibratedTimestampsEXT( _vkGetCalibratedTimestampsEXT )
+ {
+ assert( m_context != 255 );
+
+ if( _vkGetPhysicalDeviceCalibrateableTimeDomainsEXT && _vkGetCalibratedTimestampsEXT )
+ {
+ uint32_t num;
+ _vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( physdev, &num, nullptr );
+ if( num > 4 ) num = 4;
+ VkTimeDomainEXT data[4];
+ _vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( physdev, &num, data );
+ VkTimeDomainEXT supportedDomain = (VkTimeDomainEXT)-1;
+#if defined _WIN32
+ supportedDomain = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT;
+#elif defined __linux__ && defined CLOCK_MONOTONIC_RAW
+ supportedDomain = VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT;
+#endif
+ for( uint32_t i=0; i<num; i++ )
+ {
+ if( data[i] == supportedDomain )
+ {
+ m_timeDomain = data[i];
+ break;
+ }
+ }
+ }
+
+ VkPhysicalDeviceProperties prop;
+ vkGetPhysicalDeviceProperties( physdev, &prop );
+ const float period = prop.limits.timestampPeriod;
+
+ VkQueryPoolCreateInfo poolInfo = {};
+ poolInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+ poolInfo.queryCount = m_queryCount;
+ poolInfo.queryType = VK_QUERY_TYPE_TIMESTAMP;
+ while( vkCreateQueryPool( device, &poolInfo, nullptr, &m_query ) != VK_SUCCESS )
+ {
+ m_queryCount /= 2;
+ poolInfo.queryCount = m_queryCount;
+ }
+
+ VkCommandBufferBeginInfo beginInfo = {};
+ beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+
+ VkSubmitInfo submitInfo = {};
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &cmdbuf;
+
+ vkBeginCommandBuffer( cmdbuf, &beginInfo );
+ vkCmdResetQueryPool( cmdbuf, m_query, 0, m_queryCount );
+ vkEndCommandBuffer( cmdbuf );
+ vkQueueSubmit( queue, 1, &submitInfo, VK_NULL_HANDLE );
+ vkQueueWaitIdle( queue );
+
+ int64_t tcpu, tgpu;
+ if( m_timeDomain == VK_TIME_DOMAIN_DEVICE_EXT )
+ {
+ vkBeginCommandBuffer( cmdbuf, &beginInfo );
+ vkCmdWriteTimestamp( cmdbuf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, m_query, 0 );
+ vkEndCommandBuffer( cmdbuf );
+ vkQueueSubmit( queue, 1, &submitInfo, VK_NULL_HANDLE );
+ vkQueueWaitIdle( queue );
+
+ tcpu = Profiler::GetTime();
+ vkGetQueryPoolResults( device, m_query, 0, 1, sizeof( tgpu ), &tgpu, sizeof( tgpu ), VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT );
+
+ vkBeginCommandBuffer( cmdbuf, &beginInfo );
+ vkCmdResetQueryPool( cmdbuf, m_query, 0, 1 );
+ vkEndCommandBuffer( cmdbuf );
+ vkQueueSubmit( queue, 1, &submitInfo, VK_NULL_HANDLE );
+ vkQueueWaitIdle( queue );
+ }
+ else
+ {
+ enum { NumProbes = 32 };
+
+ VkCalibratedTimestampInfoEXT spec[2] = {
+ { VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT, nullptr, VK_TIME_DOMAIN_DEVICE_EXT },
+ { VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT, nullptr, m_timeDomain },
+ };
+ uint64_t ts[2];
+ uint64_t deviation[NumProbes];
+ for( int i=0; i<NumProbes; i++ )
+ {
+ _vkGetCalibratedTimestampsEXT( device, 2, spec, ts, deviation+i );
+ }
+ uint64_t minDeviation = deviation[0];
+ for( int i=1; i<NumProbes; i++ )
+ {
+ if( minDeviation > deviation[i] )
+ {
+ minDeviation = deviation[i];
+ }
+ }
+ m_deviation = minDeviation * 3 / 2;
+
+#if defined _WIN32
+ m_qpcToNs = int64_t( 1000000000. / GetFrequencyQpc() );
+#endif
+
+ Calibrate( device, m_prevCalibration, tgpu );
+ tcpu = Profiler::GetTime();
+ }
+
+ uint8_t flags = 0;
+ if( m_timeDomain != VK_TIME_DOMAIN_DEVICE_EXT ) flags |= GpuContextCalibration;
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuNewContext );
+ MemWrite( &item->gpuNewContext.cpuTime, tcpu );
+ MemWrite( &item->gpuNewContext.gpuTime, tgpu );
+ memset( &item->gpuNewContext.thread, 0, sizeof( item->gpuNewContext.thread ) );
+ MemWrite( &item->gpuNewContext.period, period );
+ MemWrite( &item->gpuNewContext.context, m_context );
+ MemWrite( &item->gpuNewContext.flags, flags );
+ MemWrite( &item->gpuNewContext.type, GpuContextType::Vulkan );
+
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+
+ m_res = (int64_t*)tracy_malloc( sizeof( int64_t ) * m_queryCount );
+ }
+
+ ~VkCtx()
+ {
+ tracy_free( m_res );
+ vkDestroyQueryPool( m_device, m_query, nullptr );
+ }
+
+ void Name( const char* name, uint16_t len )
+ {
+ auto ptr = (char*)tracy_malloc( len );
+ memcpy( ptr, name, len );
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuContextName );
+ MemWrite( &item->gpuContextNameFat.context, m_context );
+ MemWrite( &item->gpuContextNameFat.ptr, (uint64_t)ptr );
+ MemWrite( &item->gpuContextNameFat.size, len );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ void Collect( VkCommandBuffer cmdbuf )
+ {
+ ZoneScopedC( Color::Red4 );
+
+ if( m_tail == m_head ) return;
+
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() )
+ {
+ vkCmdResetQueryPool( cmdbuf, m_query, 0, m_queryCount );
+ m_head = m_tail = m_oldCnt = 0;
+ int64_t tgpu;
+ if( m_timeDomain != VK_TIME_DOMAIN_DEVICE_EXT ) Calibrate( m_device, m_prevCalibration, tgpu );
+ return;
+ }
+#endif
+
+ unsigned int cnt;
+ if( m_oldCnt != 0 )
+ {
+ cnt = m_oldCnt;
+ m_oldCnt = 0;
+ }
+ else
+ {
+ cnt = m_head < m_tail ? m_queryCount - m_tail : m_head - m_tail;
+ }
+
+ if( vkGetQueryPoolResults( m_device, m_query, m_tail, cnt, sizeof( int64_t ) * m_queryCount, m_res, sizeof( int64_t ), VK_QUERY_RESULT_64_BIT ) == VK_NOT_READY )
+ {
+ m_oldCnt = cnt;
+ return;
+ }
+
+ for( unsigned int idx=0; idx<cnt; idx++ )
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuTime );
+ MemWrite( &item->gpuTime.gpuTime, m_res[idx] );
+ MemWrite( &item->gpuTime.queryId, uint16_t( m_tail + idx ) );
+ MemWrite( &item->gpuTime.context, m_context );
+ Profiler::QueueSerialFinish();
+ }
+
+ if( m_timeDomain != VK_TIME_DOMAIN_DEVICE_EXT )
+ {
+ int64_t tgpu, tcpu;
+ Calibrate( m_device, tcpu, tgpu );
+ const auto refCpu = Profiler::GetTime();
+ const auto delta = tcpu - m_prevCalibration;
+ if( delta > 0 )
+ {
+ m_prevCalibration = tcpu;
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuCalibration );
+ MemWrite( &item->gpuCalibration.gpuTime, tgpu );
+ MemWrite( &item->gpuCalibration.cpuTime, refCpu );
+ MemWrite( &item->gpuCalibration.cpuDelta, delta );
+ MemWrite( &item->gpuCalibration.context, m_context );
+ Profiler::QueueSerialFinish();
+ }
+ }
+
+ vkCmdResetQueryPool( cmdbuf, m_query, m_tail, cnt );
+
+ m_tail += cnt;
+ if( m_tail == m_queryCount ) m_tail = 0;
+ }
+
+private:
+ tracy_force_inline unsigned int NextQueryId()
+ {
+ const auto id = m_head;
+ m_head = ( m_head + 1 ) % m_queryCount;
+ assert( m_head != m_tail );
+ return id;
+ }
+
+ tracy_force_inline uint8_t GetId() const
+ {
+ return m_context;
+ }
+
+ tracy_force_inline void Calibrate( VkDevice device, int64_t& tCpu, int64_t& tGpu )
+ {
+ assert( m_timeDomain != VK_TIME_DOMAIN_DEVICE_EXT );
+ VkCalibratedTimestampInfoEXT spec[2] = {
+ { VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT, nullptr, VK_TIME_DOMAIN_DEVICE_EXT },
+ { VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT, nullptr, m_timeDomain },
+ };
+ uint64_t ts[2];
+ uint64_t deviation;
+ do
+ {
+ m_vkGetCalibratedTimestampsEXT( device, 2, spec, ts, &deviation );
+ }
+ while( deviation > m_deviation );
+
+#if defined _WIN32
+ tGpu = ts[0];
+ tCpu = ts[1] * m_qpcToNs;
+#elif defined __linux__ && defined CLOCK_MONOTONIC_RAW
+ tGpu = ts[0];
+ tCpu = ts[1];
+#else
+ assert( false );
+#endif
+ }
+
+ VkDevice m_device;
+ VkQueryPool m_query;
+ VkTimeDomainEXT m_timeDomain;
+ uint64_t m_deviation;
+ int64_t m_qpcToNs;
+ int64_t m_prevCalibration;
+ uint8_t m_context;
+
+ unsigned int m_head;
+ unsigned int m_tail;
+ unsigned int m_oldCnt;
+ unsigned int m_queryCount;
+
+ int64_t* m_res;
+
+ PFN_vkGetCalibratedTimestampsEXT m_vkGetCalibratedTimestampsEXT;
+};
+
+class VkCtxScope
+{
+public:
+ tracy_force_inline VkCtxScope( VkCtx* ctx, const SourceLocationData* srcloc, VkCommandBuffer cmdbuf, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+ m_cmdbuf = cmdbuf;
+ m_ctx = ctx;
+
+ const auto queryId = ctx->NextQueryId();
+ vkCmdWriteTimestamp( cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, ctx->m_query, queryId );
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuZoneBeginSerial );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneBegin.srcloc, (uint64_t)srcloc );
+ MemWrite( &item->gpuZoneBegin.thread, GetThreadHandle() );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, ctx->GetId() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline VkCtxScope( VkCtx* ctx, const SourceLocationData* srcloc, VkCommandBuffer cmdbuf, int depth, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+ m_cmdbuf = cmdbuf;
+ m_ctx = ctx;
+
+ const auto queryId = ctx->NextQueryId();
+ vkCmdWriteTimestamp( cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, ctx->m_query, queryId );
+
+ auto item = Profiler::QueueSerialCallstack( Callstack( depth ) );
+ MemWrite( &item->hdr.type, QueueType::GpuZoneBeginCallstackSerial );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneBegin.srcloc, (uint64_t)srcloc );
+ MemWrite( &item->gpuZoneBegin.thread, GetThreadHandle() );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, ctx->GetId() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline VkCtxScope( VkCtx* ctx, uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, VkCommandBuffer cmdbuf, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+ m_cmdbuf = cmdbuf;
+ m_ctx = ctx;
+
+ const auto queryId = ctx->NextQueryId();
+ vkCmdWriteTimestamp( cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, ctx->m_query, queryId );
+
+ const auto srcloc = Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuZoneBeginAllocSrcLocSerial );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneBegin.srcloc, srcloc );
+ MemWrite( &item->gpuZoneBegin.thread, GetThreadHandle() );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, ctx->GetId() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline VkCtxScope( VkCtx* ctx, uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, VkCommandBuffer cmdbuf, int depth, bool is_active )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+ m_cmdbuf = cmdbuf;
+ m_ctx = ctx;
+
+ const auto queryId = ctx->NextQueryId();
+ vkCmdWriteTimestamp( cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, ctx->m_query, queryId );
+
+ const auto srcloc = Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+ auto item = Profiler::QueueSerialCallstack( Callstack( depth ) );
+ MemWrite( &item->hdr.type, QueueType::GpuZoneBeginAllocSrcLocCallstackSerial );
+ MemWrite( &item->gpuZoneBegin.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneBegin.srcloc, srcloc );
+ MemWrite( &item->gpuZoneBegin.thread, GetThreadHandle() );
+ MemWrite( &item->gpuZoneBegin.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneBegin.context, ctx->GetId() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline ~VkCtxScope()
+ {
+ if( !m_active ) return;
+
+ const auto queryId = m_ctx->NextQueryId();
+ vkCmdWriteTimestamp( m_cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, m_ctx->m_query, queryId );
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::GpuZoneEndSerial );
+ MemWrite( &item->gpuZoneEnd.cpuTime, Profiler::GetTime() );
+ MemWrite( &item->gpuZoneEnd.thread, GetThreadHandle() );
+ MemWrite( &item->gpuZoneEnd.queryId, uint16_t( queryId ) );
+ MemWrite( &item->gpuZoneEnd.context, m_ctx->GetId() );
+ Profiler::QueueSerialFinish();
+ }
+
+private:
+ const bool m_active;
+
+ VkCommandBuffer m_cmdbuf;
+ VkCtx* m_ctx;
+};
+
+static inline VkCtx* CreateVkContext( VkPhysicalDevice physdev, VkDevice device, VkQueue queue, VkCommandBuffer cmdbuf, PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT gpdctd, PFN_vkGetCalibratedTimestampsEXT gct )
+{
+ auto ctx = (VkCtx*)tracy_malloc( sizeof( VkCtx ) );
+ new(ctx) VkCtx( physdev, device, queue, cmdbuf, gpdctd, gct );
+ return ctx;
+}
+
+static inline void DestroyVkContext( VkCtx* ctx )
+{
+ ctx->~VkCtx();
+ tracy_free( ctx );
+}
+
+}
+
+using TracyVkCtx = tracy::VkCtx*;
+
+#define TracyVkContext( physdev, device, queue, cmdbuf ) tracy::CreateVkContext( physdev, device, queue, cmdbuf, nullptr, nullptr );
+#define TracyVkContextCalibrated( physdev, device, queue, cmdbuf, gpdctd, gct ) tracy::CreateVkContext( physdev, device, queue, cmdbuf, gpdctd, gct );
+#define TracyVkDestroy( ctx ) tracy::DestroyVkContext( ctx );
+#define TracyVkContextName( ctx, name, size ) ctx->Name( name, size );
+#if defined TRACY_HAS_CALLSTACK && defined TRACY_CALLSTACK
+# define TracyVkNamedZone( ctx, varname, cmdbuf, name, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::VkCtxScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), cmdbuf, TRACY_CALLSTACK, active );
+# define TracyVkNamedZoneC( ctx, varname, cmdbuf, name, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::VkCtxScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), cmdbuf, TRACY_CALLSTACK, active );
+# define TracyVkZone( ctx, cmdbuf, name ) TracyVkNamedZoneS( ctx, ___tracy_gpu_zone, cmdbuf, name, TRACY_CALLSTACK, true )
+# define TracyVkZoneC( ctx, cmdbuf, name, color ) TracyVkNamedZoneCS( ctx, ___tracy_gpu_zone, cmdbuf, name, color, TRACY_CALLSTACK, true )
+# define TracyVkZoneTransient( ctx, varname, cmdbuf, name, active ) TracyVkZoneTransientS( ctx, varname, cmdbuf, name, TRACY_CALLSTACK, active )
+#else
+# define TracyVkNamedZone( ctx, varname, cmdbuf, name, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::VkCtxScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), cmdbuf, active );
+# define TracyVkNamedZoneC( ctx, varname, cmdbuf, name, color, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::VkCtxScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), cmdbuf, active );
+# define TracyVkZone( ctx, cmdbuf, name ) TracyVkNamedZone( ctx, ___tracy_gpu_zone, cmdbuf, name, true )
+# define TracyVkZoneC( ctx, cmdbuf, name, color ) TracyVkNamedZoneC( ctx, ___tracy_gpu_zone, cmdbuf, name, color, true )
+# define TracyVkZoneTransient( ctx, varname, cmdbuf, name, active ) tracy::VkCtxScope varname( ctx, __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), cmdbuf, active );
+#endif
+#define TracyVkCollect( ctx, cmdbuf ) ctx->Collect( cmdbuf );
+
+#ifdef TRACY_HAS_CALLSTACK
+# define TracyVkNamedZoneS( ctx, varname, cmdbuf, name, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 }; tracy::VkCtxScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), cmdbuf, depth, active );
+# define TracyVkNamedZoneCS( ctx, varname, cmdbuf, name, color, depth, active ) static constexpr tracy::SourceLocationData TracyConcat(__tracy_gpu_source_location,__LINE__) { name, __FUNCTION__, __FILE__, (uint32_t)__LINE__, color }; tracy::VkCtxScope varname( ctx, &TracyConcat(__tracy_gpu_source_location,__LINE__), cmdbuf, depth, active );
+# define TracyVkZoneS( ctx, cmdbuf, name, depth ) TracyVkNamedZoneS( ctx, ___tracy_gpu_zone, cmdbuf, name, depth, true )
+# define TracyVkZoneCS( ctx, cmdbuf, name, color, depth ) TracyVkNamedZoneCS( ctx, ___tracy_gpu_zone, cmdbuf, name, color, depth, true )
+# define TracyVkZoneTransientS( ctx, varname, cmdbuf, name, depth, active ) tracy::VkCtxScope varname( ctx, __LINE__, __FILE__, strlen( __FILE__ ), __FUNCTION__, strlen( __FUNCTION__ ), name, strlen( name ), cmdbuf, depth, active );
+#else
+# define TracyVkNamedZoneS( ctx, varname, cmdbuf, name, depth, active ) TracyVkNamedZone( ctx, varname, cmdbuf, name, active )
+# define TracyVkNamedZoneCS( ctx, varname, cmdbuf, name, color, depth, active ) TracyVkNamedZoneC( ctx, varname, cmdbuf, name, color, active )
+# define TracyVkZoneS( ctx, cmdbuf, name, depth ) TracyVkZone( ctx, cmdbuf, name )
+# define TracyVkZoneCS( ctx, cmdbuf, name, color, depth ) TracyVkZoneC( ctx, cmdbuf, name, color )
+# define TracyVkZoneTransientS( ctx, varname, cmdbuf, name, depth, active ) TracyVkZoneTransient( ctx, varname, cmdbuf, name, active )
+#endif
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyAlloc.cpp b/3rdparty/tracy/tracy/client/TracyAlloc.cpp
new file mode 100644
index 0000000..0106a01
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyAlloc.cpp
@@ -0,0 +1,42 @@
+#ifdef TRACY_ENABLE
+
+#include <atomic>
+
+#include "../common/TracyAlloc.hpp"
+#include "../common/TracyForceInline.hpp"
+#include "../common/TracyYield.hpp"
+
+namespace tracy
+{
+
+extern thread_local bool RpThreadInitDone;
+extern std::atomic<int> RpInitDone;
+extern std::atomic<int> RpInitLock;
+
+tracy_no_inline static void InitRpmallocPlumbing()
+{
+ const auto done = RpInitDone.load( std::memory_order_acquire );
+ if( !done )
+ {
+ int expected = 0;
+ while( !RpInitLock.compare_exchange_weak( expected, 1, std::memory_order_release, std::memory_order_relaxed ) ) { expected = 0; YieldThread(); }
+ const auto done = RpInitDone.load( std::memory_order_acquire );
+ if( !done )
+ {
+ rpmalloc_initialize();
+ RpInitDone.store( 1, std::memory_order_release );
+ }
+ RpInitLock.store( 0, std::memory_order_release );
+ }
+ rpmalloc_thread_initialize();
+ RpThreadInitDone = true;
+}
+
+TRACY_API void InitRpmalloc()
+{
+ if( !RpThreadInitDone ) InitRpmallocPlumbing();
+}
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyArmCpuTable.hpp b/3rdparty/tracy/tracy/client/TracyArmCpuTable.hpp
new file mode 100644
index 0000000..8e39628
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyArmCpuTable.hpp
@@ -0,0 +1,370 @@
+namespace tracy
+{
+
+#if defined __linux__ && defined __ARM_ARCH
+
+static const char* DecodeArmImplementer( uint32_t v )
+{
+ static char buf[16];
+ switch( v )
+ {
+ case 0x41: return "ARM";
+ case 0x42: return "Broadcom";
+ case 0x43: return "Cavium";
+ case 0x44: return "DEC";
+ case 0x46: return "Fujitsu";
+ case 0x48: return "HiSilicon";
+ case 0x49: return "Infineon";
+ case 0x4d: return "Motorola";
+ case 0x4e: return "Nvidia";
+ case 0x50: return "Applied Micro";
+ case 0x51: return "Qualcomm";
+ case 0x53: return "Samsung";
+ case 0x54: return "Texas Instruments";
+ case 0x56: return "Marvell";
+ case 0x61: return "Apple";
+ case 0x66: return "Faraday";
+ case 0x68: return "HXT";
+ case 0x69: return "Intel";
+ case 0xc0: return "Ampere Computing";
+ default: break;
+ }
+ sprintf( buf, "0x%x", v );
+ return buf;
+}
+
+static const char* DecodeArmPart( uint32_t impl, uint32_t part )
+{
+ static char buf[16];
+ switch( impl )
+ {
+ case 0x41:
+ switch( part )
+ {
+ case 0x810: return "810";
+ case 0x920: return "920";
+ case 0x922: return "922";
+ case 0x926: return "926";
+ case 0x940: return "940";
+ case 0x946: return "946";
+ case 0x966: return "966";
+ case 0xa20: return "1020";
+ case 0xa22: return "1022";
+ case 0xa26: return "1026";
+ case 0xb02: return "11 MPCore";
+ case 0xb36: return "1136";
+ case 0xb56: return "1156";
+ case 0xb76: return "1176";
+ case 0xc05: return " Cortex-A5";
+ case 0xc07: return " Cortex-A7";
+ case 0xc08: return " Cortex-A8";
+ case 0xc09: return " Cortex-A9";
+ case 0xc0c: return " Cortex-A12";
+ case 0xc0d: return " Rockchip RK3288";
+ case 0xc0e: return " Cortex-A17";
+ case 0xc0f: return " Cortex-A15";
+ case 0xc14: return " Cortex-R4";
+ case 0xc15: return " Cortex-R5";
+ case 0xc17: return " Cortex-R7";
+ case 0xc18: return " Cortex-R8";
+ case 0xc20: return " Cortex-M0";
+ case 0xc21: return " Cortex-M1";
+ case 0xc23: return " Cortex-M3";
+ case 0xc24: return " Cortex-M4";
+ case 0xc27: return " Cortex-M7";
+ case 0xc60: return " Cortex-M0+";
+ case 0xd00: return " AArch64 simulator";
+ case 0xd01: return " Cortex-A32";
+ case 0xd02: return " Cortex-A34";
+ case 0xd03: return " Cortex-A53";
+ case 0xd04: return " Cortex-A35";
+ case 0xd05: return " Cortex-A55";
+ case 0xd06: return " Cortex-A65";
+ case 0xd07: return " Cortex-A57";
+ case 0xd08: return " Cortex-A72";
+ case 0xd09: return " Cortex-A73";
+ case 0xd0a: return " Cortex-A75";
+ case 0xd0b: return " Cortex-A76";
+ case 0xd0c: return " Neoverse N1";
+ case 0xd0d: return " Cortex-A77";
+ case 0xd0e: return " Cortex-A76AE";
+ case 0xd0f: return " AEMv8";
+ case 0xd13: return " Cortex-R52";
+ case 0xd20: return " Cortex-M23";
+ case 0xd21: return " Cortex-M33";
+ case 0xd40: return " Neoverse V1";
+ case 0xd41: return " Cortex-A78";
+ case 0xd42: return " Cortex-A78AE";
+ case 0xd43: return " Cortex-A65AE";
+ case 0xd44: return " Cortex-X1";
+ case 0xd47: return " Cortex-A710";
+ case 0xd48: return " Cortex-X2";
+ case 0xd49: return " Neoverse N2";
+ case 0xd4a: return " Neoverse E1";
+ case 0xd4b: return " Cortex-A78C";
+ default: break;
+ }
+ case 0x42:
+ switch( part )
+ {
+ case 0xf: return " Brahma B15";
+ case 0x100: return " Brahma B53";
+ case 0x516: return " ThunderX2";
+ default: break;
+ }
+ case 0x43:
+ switch( part )
+ {
+ case 0xa0: return " ThunderX";
+ case 0xa1: return " ThunderX 88XX";
+ case 0xa2: return " ThunderX 81XX";
+ case 0xa3: return " ThunderX 83XX";
+ case 0xaf: return " ThunderX2 99xx";
+ case 0xb0: return " OcteonTX2";
+ case 0xb1: return " OcteonTX2 T98";
+ case 0xb2: return " OcteonTX2 T96";
+ case 0xb3: return " OcteonTX2 F95";
+ case 0xb4: return " OcteonTX2 F95N";
+ case 0xb5: return " OcteonTX2 F95MM";
+ case 0xb8: return " ThunderX3 T110";
+ default: break;
+ }
+ case 0x44:
+ switch( part )
+ {
+ case 0xa10: return " SA110";
+ case 0xa11: return " SA1100";
+ default: break;
+ }
+ case 0x46:
+ switch( part )
+ {
+ case 0x1: return " A64FX";
+ default: break;
+ }
+ case 0x48:
+ switch( part )
+ {
+ case 0xd01: return " TSV100";
+ case 0xd40: return " Kirin 980";
+ default: break;
+ }
+ case 0x4e:
+ switch( part )
+ {
+ case 0x0: return " Denver";
+ case 0x3: return " Denver 2";
+ case 0x4: return " Carmel";
+ default: break;
+ }
+ case 0x50:
+ switch( part )
+ {
+ case 0x0: return " X-Gene";
+ default: break;
+ }
+ case 0x51:
+ switch( part )
+ {
+ case 0xf: return " Scorpion";
+ case 0x2d: return " Scorpion";
+ case 0x4d: return " Krait";
+ case 0x6f: return " Krait";
+ case 0x200: return " Kryo";
+ case 0x201: return " Kryo Silver (Snapdragon 821)";
+ case 0x205: return " Kryo Gold";
+ case 0x211: return " Kryo Silver (Snapdragon 820)";
+ case 0x800: return " Kryo 260 / 280 Gold";
+ case 0x801: return " Kryo 260 / 280 Silver";
+ case 0x802: return " Kryo 385 Gold";
+ case 0x803: return " Kryo 385 Silver";
+ case 0x804: return " Kryo 485 Gold";
+ case 0xc00: return " Falkor";
+ case 0xc01: return " Saphira";
+ default: break;
+ }
+ case 0x53:
+ switch( part )
+ {
+ case 0x1: return " Exynos M1/M2";
+ case 0x2: return " Exynos M3";
+ default: break;
+ }
+ case 0x56:
+ switch( part )
+ {
+ case 0x131: return " Feroceon 88FR131";
+ case 0x581: return " PJ4 / PJ4B";
+ case 0x584: return " PJ4B-MP / PJ4C";
+ default: break;
+ }
+ case 0x61:
+ switch( part )
+ {
+ case 0x1: return " Cyclone";
+ case 0x2: return " Typhoon";
+ case 0x3: return " Typhoon/Capri";
+ case 0x4: return " Twister";
+ case 0x5: return " Twister/Elba/Malta";
+ case 0x6: return " Hurricane";
+ case 0x7: return " Hurricane/Myst";
+ default: break;
+ }
+ case 0x66:
+ switch( part )
+ {
+ case 0x526: return " FA526";
+ case 0x626: return " FA626";
+ default: break;
+ }
+ case 0x68:
+ switch( part )
+ {
+ case 0x0: return " Phecda";
+ default: break;
+ }
+ default: break;
+ }
+ sprintf( buf, " 0x%x", part );
+ return buf;
+}
+
+#elif defined __APPLE__ && TARGET_OS_IPHONE == 1
+
+static const char* DecodeIosDevice( const char* id )
+{
+ static const char* DeviceTable[] = {
+ "i386", "32-bit simulator",
+ "x86_64", "64-bit simulator",
+ "iPhone1,1", "iPhone",
+ "iPhone1,2", "iPhone 3G",
+ "iPhone2,1", "iPhone 3GS",
+ "iPhone3,1", "iPhone 4 (GSM)",
+ "iPhone3,2", "iPhone 4 (GSM)",
+ "iPhone3,3", "iPhone 4 (CDMA)",
+ "iPhone4,1", "iPhone 4S",
+ "iPhone5,1", "iPhone 5 (A1428)",
+ "iPhone5,2", "iPhone 5 (A1429)",
+ "iPhone5,3", "iPhone 5c (A1456/A1532)",
+ "iPhone5,4", "iPhone 5c (A1507/A1516/1526/A1529)",
+ "iPhone6,1", "iPhone 5s (A1433/A1533)",
+ "iPhone6,2", "iPhone 5s (A1457/A1518/A1528/A1530)",
+ "iPhone7,1", "iPhone 6 Plus",
+ "iPhone7,2", "iPhone 6",
+ "iPhone8,1", "iPhone 6S",
+ "iPhone8,2", "iPhone 6S Plus",
+ "iPhone8,4", "iPhone SE",
+ "iPhone9,1", "iPhone 7 (CDMA)",
+ "iPhone9,2", "iPhone 7 Plus (CDMA)",
+ "iPhone9,3", "iPhone 7 (GSM)",
+ "iPhone9,4", "iPhone 7 Plus (GSM)",
+ "iPhone10,1", "iPhone 8 (CDMA)",
+ "iPhone10,2", "iPhone 8 Plus (CDMA)",
+ "iPhone10,3", "iPhone X (CDMA)",
+ "iPhone10,4", "iPhone 8 (GSM)",
+ "iPhone10,5", "iPhone 8 Plus (GSM)",
+ "iPhone10,6", "iPhone X (GSM)",
+ "iPhone11,2", "iPhone XS",
+ "iPhone11,4", "iPhone XS Max",
+ "iPhone11,6", "iPhone XS Max China",
+ "iPhone11,8", "iPhone XR",
+ "iPhone12,1", "iPhone 11",
+ "iPhone12,3", "iPhone 11 Pro",
+ "iPhone12,5", "iPhone 11 Pro Max",
+ "iPhone12,8", "iPhone SE 2nd Gen",
+ "iPhone13,1", "iPhone 12 Mini",
+ "iPhone13,2", "iPhone 12",
+ "iPhone13,3", "iPhone 12 Pro",
+ "iPhone13,4", "iPhone 12 Pro Max",
+ "iPad1,1", "iPad (A1219/A1337)",
+ "iPad2,1", "iPad 2 (A1395)",
+ "iPad2,2", "iPad 2 (A1396)",
+ "iPad2,3", "iPad 2 (A1397)",
+ "iPad2,4", "iPad 2 (A1395)",
+ "iPad2,5", "iPad Mini (A1432)",
+ "iPad2,6", "iPad Mini (A1454)",
+ "iPad2,7", "iPad Mini (A1455)",
+ "iPad3,1", "iPad 3 (A1416)",
+ "iPad3,2", "iPad 3 (A1403)",
+ "iPad3,3", "iPad 3 (A1430)",
+ "iPad3,4", "iPad 4 (A1458)",
+ "iPad3,5", "iPad 4 (A1459)",
+ "iPad3,6", "iPad 4 (A1460)",
+ "iPad4,1", "iPad Air (A1474)",
+ "iPad4,2", "iPad Air (A1475)",
+ "iPad4,3", "iPad Air (A1476)",
+ "iPad4,4", "iPad Mini 2 (A1489)",
+ "iPad4,5", "iPad Mini 2 (A1490)",
+ "iPad4,6", "iPad Mini 2 (A1491)",
+ "iPad4,7", "iPad Mini 3 (A1599)",
+ "iPad4,8", "iPad Mini 3 (A1600)",
+ "iPad4,9", "iPad Mini 3 (A1601)",
+ "iPad5,1", "iPad Mini 4 (A1538)",
+ "iPad5,2", "iPad Mini 4 (A1550)",
+ "iPad5,3", "iPad Air 2 (A1566)",
+ "iPad5,4", "iPad Air 2 (A1567)",
+ "iPad6,3", "iPad Pro 9.7\" (A1673)",
+ "iPad6,4", "iPad Pro 9.7\" (A1674)",
+ "iPad6,5", "iPad Pro 9.7\" (A1675)",
+ "iPad6,7", "iPad Pro 12.9\" (A1584)",
+ "iPad6,8", "iPad Pro 12.9\" (A1652)",
+ "iPad6,11", "iPad 5th gen (A1822)",
+ "iPad6,12", "iPad 5th gen (A1823)",
+ "iPad7,1", "iPad Pro 12.9\" 2nd gen (A1670)",
+ "iPad7,2", "iPad Pro 12.9\" 2nd gen (A1671/A1821)",
+ "iPad7,3", "iPad Pro 10.5\" (A1701)",
+ "iPad7,4", "iPad Pro 10.5\" (A1709)",
+ "iPad7,5", "iPad 6th gen (A1893)",
+ "iPad7,6", "iPad 6th gen (A1954)",
+ "iPad7,11", "iPad 7th gen 10.2\" (Wifi)",
+ "iPad7,12", "iPad 7th gen 10.2\" (Wifi+Cellular)",
+ "iPad8,1", "iPad Pro 11\" (A1980)",
+ "iPad8,2", "iPad Pro 11\" (A1980)",
+ "iPad8,3", "iPad Pro 11\" (A1934/A1979/A2013)",
+ "iPad8,4", "iPad Pro 11\" (A1934/A1979/A2013)",
+ "iPad8,5", "iPad Pro 12.9\" 3rd gen (A1876)",
+ "iPad8,6", "iPad Pro 12.9\" 3rd gen (A1876)",
+ "iPad8,7", "iPad Pro 12.9\" 3rd gen (A1895/A1983/A2014)",
+ "iPad8,8", "iPad Pro 12.9\" 3rd gen (A1895/A1983/A2014)",
+ "iPad8,9", "iPad Pro 11\" 2nd gen (Wifi)",
+ "iPad8,10", "iPad Pro 11\" 2nd gen (Wifi+Cellular)",
+ "iPad8,11", "iPad Pro 12.9\" 4th gen (Wifi)",
+ "iPad8,12", "iPad Pro 12.9\" 4th gen (Wifi+Cellular)",
+ "iPad11,1", "iPad Mini 5th gen (A2133)",
+ "iPad11,2", "iPad Mini 5th gen (A2124/A2125/A2126)",
+ "iPad11,3", "iPad Air 3rd gen (A2152)",
+ "iPad11,4", "iPad Air 3rd gen (A2123/A2153/A2154)",
+ "iPad11,6", "iPad 8th gen (WiFi)",
+ "iPad11,7", "iPad 8th gen (WiFi+Cellular)",
+ "iPad13,1", "iPad Air 4th gen (WiFi)",
+ "iPad13,2", "iPad Air 4th gen (WiFi+Cellular)",
+ "iPad13,4", "iPad Pro 11\" 3rd gen",
+ "iPad13,5", "iPad Pro 11\" 3rd gen",
+ "iPad13,6", "iPad Pro 11\" 3rd gen",
+ "iPad13,7", "iPad Pro 11\" 3rd gen",
+ "iPad13,8", "iPad Pro 12.9\" 5th gen",
+ "iPad13,9", "iPad Pro 12.9\" 5th gen",
+ "iPad13,10", "iPad Pro 12.9\" 5th gen",
+ "iPad13,11", "iPad Pro 12.9\" 5th gen",
+ "iPod1,1", "iPod Touch",
+ "iPod2,1", "iPod Touch 2nd gen",
+ "iPod3,1", "iPod Touch 3rd gen",
+ "iPod4,1", "iPod Touch 4th gen",
+ "iPod5,1", "iPod Touch 5th gen",
+ "iPod7,1", "iPod Touch 6th gen",
+ "iPod9,1", "iPod Touch 7th gen",
+ nullptr
+ };
+
+ auto ptr = DeviceTable;
+ while( *ptr )
+ {
+ if( strcmp( ptr[0], id ) == 0 ) return ptr[1];
+ ptr += 2;
+ }
+ return id;
+}
+
+#endif
+
+}
diff --git a/3rdparty/tracy/tracy/client/TracyCallstack.cpp b/3rdparty/tracy/tracy/client/TracyCallstack.cpp
new file mode 100644
index 0000000..3bd4309
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyCallstack.cpp
@@ -0,0 +1,1005 @@
+#include <new>
+#include <stdio.h>
+#include <string.h>
+#include "TracyCallstack.hpp"
+#include "TracyFastVector.hpp"
+#include "TracyStringHelpers.hpp"
+#include "../common/TracyAlloc.hpp"
+#include "../common/TracyStackFrames.hpp"
+#include "TracyDebug.hpp"
+
+#ifdef TRACY_HAS_CALLSTACK
+
+#if TRACY_HAS_CALLSTACK == 1
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+# include <windows.h>
+# include <psapi.h>
+# include <algorithm>
+# ifdef _MSC_VER
+# pragma warning( push )
+# pragma warning( disable : 4091 )
+# endif
+# include <dbghelp.h>
+# ifdef _MSC_VER
+# pragma warning( pop )
+# endif
+#elif TRACY_HAS_CALLSTACK == 2 || TRACY_HAS_CALLSTACK == 3 || TRACY_HAS_CALLSTACK == 4 || TRACY_HAS_CALLSTACK == 6
+# include "../libbacktrace/backtrace.hpp"
+# include <algorithm>
+# include <dlfcn.h>
+# include <cxxabi.h>
+# include <stdlib.h>
+# include "TracyFastVector.hpp"
+#elif TRACY_HAS_CALLSTACK == 5
+# include <dlfcn.h>
+# include <cxxabi.h>
+#endif
+
+#ifdef TRACY_DBGHELP_LOCK
+# include "TracyProfiler.hpp"
+
+# define DBGHELP_INIT TracyConcat( TRACY_DBGHELP_LOCK, Init() )
+# define DBGHELP_LOCK TracyConcat( TRACY_DBGHELP_LOCK, Lock() );
+# define DBGHELP_UNLOCK TracyConcat( TRACY_DBGHELP_LOCK, Unlock() );
+
+extern "C"
+{
+ void DBGHELP_INIT;
+ void DBGHELP_LOCK;
+ void DBGHELP_UNLOCK;
+};
+#endif
+
+#if TRACY_HAS_CALLSTACK == 2 || TRACY_HAS_CALLSTACK == 3 || TRACY_HAS_CALLSTACK == 4 || TRACY_HAS_CALLSTACK == 5 || TRACY_HAS_CALLSTACK == 6
+extern "C" int ___tracy_demangle( const char* mangled, char* out, size_t len );
+
+#ifndef TRACY_DEMANGLE
+extern "C" int ___tracy_demangle( const char* mangled, char* out, size_t len )
+{
+ if( !mangled || mangled[0] != '_' ) return 0;
+ int status;
+ abi::__cxa_demangle( mangled, out, &len, &status );
+ return status == 0;
+}
+#endif
+#endif
+
+namespace tracy
+{
+
+#if TRACY_HAS_CALLSTACK == 1
+
+enum { MaxCbTrace = 16 };
+enum { MaxNameSize = 8*1024 };
+
+int cb_num;
+CallstackEntry cb_data[MaxCbTrace];
+
+extern "C"
+{
+ typedef DWORD (__stdcall *t_SymAddrIncludeInlineTrace)( HANDLE hProcess, DWORD64 Address );
+ typedef BOOL (__stdcall *t_SymQueryInlineTrace)( HANDLE hProcess, DWORD64 StartAddress, DWORD StartContext, DWORD64 StartRetAddress, DWORD64 CurAddress, LPDWORD CurContext, LPDWORD CurFrameIndex );
+ typedef BOOL (__stdcall *t_SymFromInlineContext)( HANDLE hProcess, DWORD64 Address, ULONG InlineContext, PDWORD64 Displacement, PSYMBOL_INFO Symbol );
+ typedef BOOL (__stdcall *t_SymGetLineFromInlineContext)( HANDLE hProcess, DWORD64 qwAddr, ULONG InlineContext, DWORD64 qwModuleBaseAddress, PDWORD pdwDisplacement, PIMAGEHLP_LINE64 Line64 );
+
+ TRACY_API ___tracy_t_RtlWalkFrameChain ___tracy_RtlWalkFrameChain = 0;
+ t_SymAddrIncludeInlineTrace _SymAddrIncludeInlineTrace = 0;
+ t_SymQueryInlineTrace _SymQueryInlineTrace = 0;
+ t_SymFromInlineContext _SymFromInlineContext = 0;
+ t_SymGetLineFromInlineContext _SymGetLineFromInlineContext = 0;
+}
+
+
+struct ModuleCache
+{
+ uint64_t start;
+ uint64_t end;
+ char* name;
+};
+
+static FastVector<ModuleCache>* s_modCache;
+
+
+struct KernelDriver
+{
+ uint64_t addr;
+ const char* mod;
+ const char* path;
+};
+
+KernelDriver* s_krnlCache = nullptr;
+size_t s_krnlCacheCnt;
+
+
+void InitCallstack()
+{
+ ___tracy_RtlWalkFrameChain = (___tracy_t_RtlWalkFrameChain)GetProcAddress( GetModuleHandleA( "ntdll.dll" ), "RtlWalkFrameChain" );
+ _SymAddrIncludeInlineTrace = (t_SymAddrIncludeInlineTrace)GetProcAddress( GetModuleHandleA( "dbghelp.dll" ), "SymAddrIncludeInlineTrace" );
+ _SymQueryInlineTrace = (t_SymQueryInlineTrace)GetProcAddress( GetModuleHandleA( "dbghelp.dll" ), "SymQueryInlineTrace" );
+ _SymFromInlineContext = (t_SymFromInlineContext)GetProcAddress( GetModuleHandleA( "dbghelp.dll" ), "SymFromInlineContext" );
+ _SymGetLineFromInlineContext = (t_SymGetLineFromInlineContext)GetProcAddress( GetModuleHandleA( "dbghelp.dll" ), "SymGetLineFromInlineContext" );
+
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_INIT;
+ DBGHELP_LOCK;
+#endif
+
+ SymInitialize( GetCurrentProcess(), nullptr, true );
+ SymSetOptions( SYMOPT_LOAD_LINES );
+
+ DWORD needed;
+ LPVOID dev[4096];
+ if( EnumDeviceDrivers( dev, sizeof(dev), &needed ) != 0 )
+ {
+ char windir[MAX_PATH];
+ if( !GetWindowsDirectoryA( windir, sizeof( windir ) ) ) memcpy( windir, "c:\\windows", 11 );
+ const auto windirlen = strlen( windir );
+
+ const auto sz = needed / sizeof( LPVOID );
+ s_krnlCache = (KernelDriver*)tracy_malloc( sizeof(KernelDriver) * sz );
+ int cnt = 0;
+ for( size_t i=0; i<sz; i++ )
+ {
+ char fn[MAX_PATH];
+ const auto len = GetDeviceDriverBaseNameA( dev[i], fn, sizeof( fn ) );
+ if( len != 0 )
+ {
+ auto buf = (char*)tracy_malloc_fast( len+3 );
+ buf[0] = '<';
+ memcpy( buf+1, fn, len );
+ memcpy( buf+len+1, ">", 2 );
+ s_krnlCache[cnt] = KernelDriver { (uint64_t)dev[i], buf };
+
+ const auto len = GetDeviceDriverFileNameA( dev[i], fn, sizeof( fn ) );
+ if( len != 0 )
+ {
+ char full[MAX_PATH];
+ char* path = fn;
+
+ if( memcmp( fn, "\\SystemRoot\\", 12 ) == 0 )
+ {
+ memcpy( full, windir, windirlen );
+ strcpy( full + windirlen, fn + 11 );
+ path = full;
+ }
+
+ SymLoadModuleEx( GetCurrentProcess(), nullptr, path, nullptr, (DWORD64)dev[i], 0, nullptr, 0 );
+
+ const auto psz = strlen( path );
+ auto pptr = (char*)tracy_malloc_fast( psz+1 );
+ memcpy( pptr, path, psz );
+ pptr[psz] = '\0';
+ s_krnlCache[cnt].path = pptr;
+ }
+
+ cnt++;
+ }
+ }
+ s_krnlCacheCnt = cnt;
+ std::sort( s_krnlCache, s_krnlCache + s_krnlCacheCnt, []( const KernelDriver& lhs, const KernelDriver& rhs ) { return lhs.addr > rhs.addr; } );
+ }
+
+ s_modCache = (FastVector<ModuleCache>*)tracy_malloc( sizeof( FastVector<ModuleCache> ) );
+ new(s_modCache) FastVector<ModuleCache>( 512 );
+
+ HANDLE proc = GetCurrentProcess();
+ HMODULE mod[1024];
+ if( EnumProcessModules( proc, mod, sizeof( mod ), &needed ) != 0 )
+ {
+ const auto sz = needed / sizeof( HMODULE );
+ for( size_t i=0; i<sz; i++ )
+ {
+ MODULEINFO info;
+ if( GetModuleInformation( proc, mod[i], &info, sizeof( info ) ) != 0 )
+ {
+ const auto base = uint64_t( info.lpBaseOfDll );
+ char name[1024];
+ const auto res = GetModuleFileNameA( mod[i], name, 1021 );
+ if( res > 0 )
+ {
+ auto ptr = name + res;
+ while( ptr > name && *ptr != '\\' && *ptr != '/' ) ptr--;
+ if( ptr > name ) ptr++;
+ const auto namelen = name + res - ptr;
+ auto cache = s_modCache->push_next();
+ cache->start = base;
+ cache->end = base + info.SizeOfImage;
+ cache->name = (char*)tracy_malloc_fast( namelen+3 );
+ cache->name[0] = '[';
+ memcpy( cache->name+1, ptr, namelen );
+ cache->name[namelen+1] = ']';
+ cache->name[namelen+2] = '\0';
+ }
+ }
+ }
+ }
+
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_UNLOCK;
+#endif
+}
+
+const char* DecodeCallstackPtrFast( uint64_t ptr )
+{
+ static char ret[MaxNameSize];
+ const auto proc = GetCurrentProcess();
+
+ char buf[sizeof( SYMBOL_INFO ) + MaxNameSize];
+ auto si = (SYMBOL_INFO*)buf;
+ si->SizeOfStruct = sizeof( SYMBOL_INFO );
+ si->MaxNameLen = MaxNameSize;
+
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_LOCK;
+#endif
+ if( SymFromAddr( proc, ptr, nullptr, si ) == 0 )
+ {
+ *ret = '\0';
+ }
+ else
+ {
+ memcpy( ret, si->Name, si->NameLen );
+ ret[si->NameLen] = '\0';
+ }
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_UNLOCK;
+#endif
+ return ret;
+}
+
+const char* GetKernelModulePath( uint64_t addr )
+{
+ assert( addr >> 63 != 0 );
+ if( !s_krnlCache ) return nullptr;
+ auto it = std::lower_bound( s_krnlCache, s_krnlCache + s_krnlCacheCnt, addr, []( const KernelDriver& lhs, const uint64_t& rhs ) { return lhs.addr > rhs; } );
+ if( it == s_krnlCache + s_krnlCacheCnt ) return nullptr;
+ return it->path;
+}
+
+static const char* GetModuleNameAndPrepareSymbols( uint64_t addr )
+{
+ if( ( addr >> 63 ) != 0 )
+ {
+ if( s_krnlCache )
+ {
+ auto it = std::lower_bound( s_krnlCache, s_krnlCache + s_krnlCacheCnt, addr, []( const KernelDriver& lhs, const uint64_t& rhs ) { return lhs.addr > rhs; } );
+ if( it != s_krnlCache + s_krnlCacheCnt )
+ {
+ return it->mod;
+ }
+ }
+ return "<kernel>";
+ }
+
+ for( auto& v : *s_modCache )
+ {
+ if( addr >= v.start && addr < v.end )
+ {
+ return v.name;
+ }
+ }
+
+ HMODULE mod[1024];
+ DWORD needed;
+ HANDLE proc = GetCurrentProcess();
+
+ InitRpmalloc();
+ if( EnumProcessModules( proc, mod, sizeof( mod ), &needed ) != 0 )
+ {
+ const auto sz = needed / sizeof( HMODULE );
+ for( size_t i=0; i<sz; i++ )
+ {
+ MODULEINFO info;
+ if( GetModuleInformation( proc, mod[i], &info, sizeof( info ) ) != 0 )
+ {
+ const auto base = uint64_t( info.lpBaseOfDll );
+ if( addr >= base && addr < base + info.SizeOfImage )
+ {
+ char name[1024];
+ const auto res = GetModuleFileNameA( mod[i], name, 1021 );
+ if( res > 0 )
+ {
+ // since this is the first time we encounter this module, load its symbols (needed for modules loaded after SymInitialize)
+ SymLoadModuleEx(proc, NULL, name, NULL, (DWORD64)info.lpBaseOfDll, info.SizeOfImage, NULL, 0);
+ auto ptr = name + res;
+ while( ptr > name && *ptr != '\\' && *ptr != '/' ) ptr--;
+ if( ptr > name ) ptr++;
+ const auto namelen = name + res - ptr;
+ auto cache = s_modCache->push_next();
+ cache->start = base;
+ cache->end = base + info.SizeOfImage;
+ cache->name = (char*)tracy_malloc_fast( namelen+3 );
+ cache->name[0] = '[';
+ memcpy( cache->name+1, ptr, namelen );
+ cache->name[namelen+1] = ']';
+ cache->name[namelen+2] = '\0';
+ return cache->name;
+ }
+ }
+ }
+ }
+ }
+ return "[unknown]";
+}
+
+CallstackSymbolData DecodeSymbolAddress( uint64_t ptr )
+{
+ CallstackSymbolData sym;
+ IMAGEHLP_LINE64 line;
+ DWORD displacement = 0;
+ line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_LOCK;
+#endif
+ const auto res = SymGetLineFromAddr64( GetCurrentProcess(), ptr, &displacement, &line );
+ if( res == 0 || line.LineNumber >= 0xF00000 )
+ {
+ sym.file = "[unknown]";
+ sym.line = 0;
+ sym.needFree = false;
+ }
+ else
+ {
+ sym.file = CopyString( line.FileName );
+ sym.line = line.LineNumber;
+ sym.needFree = true;
+ }
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_UNLOCK;
+#endif
+ return sym;
+}
+
+CallstackSymbolData DecodeCodeAddress( uint64_t ptr )
+{
+ CallstackSymbolData sym = {};
+ const auto proc = GetCurrentProcess();
+ bool done = false;
+
+ char buf[sizeof( SYMBOL_INFO ) + MaxNameSize];
+ auto si = (SYMBOL_INFO*)buf;
+ si->SizeOfStruct = sizeof( SYMBOL_INFO );
+ si->MaxNameLen = MaxNameSize;
+
+ IMAGEHLP_LINE64 line;
+ DWORD displacement = 0;
+ line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_LOCK;
+#endif
+#if !defined TRACY_NO_CALLSTACK_INLINES
+ if( _SymAddrIncludeInlineTrace )
+ {
+ DWORD inlineNum = _SymAddrIncludeInlineTrace( proc, ptr );
+ DWORD ctx = 0;
+ DWORD idx;
+ BOOL doInline = FALSE;
+ if( inlineNum != 0 ) doInline = _SymQueryInlineTrace( proc, ptr, 0, ptr, ptr, &ctx, &idx );
+ if( doInline )
+ {
+ if( _SymGetLineFromInlineContext( proc, ptr, ctx, 0, &displacement, &line ) != 0 )
+ {
+ sym.file = CopyString( line.FileName );
+ sym.line = line.LineNumber;
+ sym.needFree = true;
+ done = true;
+
+ if( _SymFromInlineContext( proc, ptr, ctx, nullptr, si ) != 0 )
+ {
+ sym.symAddr = si->Address;
+ }
+ else
+ {
+ sym.symAddr = 0;
+ }
+ }
+ }
+ }
+#endif
+ if( !done )
+ {
+ const auto res = SymGetLineFromAddr64( proc, ptr, &displacement, &line );
+ if( res == 0 || line.LineNumber >= 0xF00000 )
+ {
+ sym.file = "[unknown]";
+ sym.line = 0;
+ sym.symAddr = 0;
+ sym.needFree = false;
+ }
+ else
+ {
+ sym.file = CopyString( line.FileName );
+ sym.line = line.LineNumber;
+ sym.needFree = true;
+
+ if( SymFromAddr( proc, ptr, nullptr, si ) != 0 )
+ {
+ sym.symAddr = si->Address;
+ }
+ else
+ {
+ sym.symAddr = 0;
+ }
+ }
+ }
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_UNLOCK;
+#endif
+ return sym;
+}
+
+CallstackEntryData DecodeCallstackPtr( uint64_t ptr )
+{
+ int write;
+ const auto proc = GetCurrentProcess();
+ InitRpmalloc();
+
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_LOCK;
+#endif
+
+ const auto moduleName = GetModuleNameAndPrepareSymbols(ptr);
+
+#if !defined TRACY_NO_CALLSTACK_INLINES
+ BOOL doInline = FALSE;
+ DWORD ctx = 0;
+ DWORD inlineNum = 0;
+ if( _SymAddrIncludeInlineTrace )
+ {
+ inlineNum = _SymAddrIncludeInlineTrace( proc, ptr );
+ if( inlineNum > MaxCbTrace - 1 ) inlineNum = MaxCbTrace - 1;
+ DWORD idx;
+ if( inlineNum != 0 ) doInline = _SymQueryInlineTrace( proc, ptr, 0, ptr, ptr, &ctx, &idx );
+ }
+ if( doInline )
+ {
+ write = inlineNum;
+ cb_num = 1 + inlineNum;
+ }
+ else
+#endif
+ {
+ write = 0;
+ cb_num = 1;
+ }
+
+ char buf[sizeof( SYMBOL_INFO ) + MaxNameSize];
+ auto si = (SYMBOL_INFO*)buf;
+ si->SizeOfStruct = sizeof( SYMBOL_INFO );
+ si->MaxNameLen = MaxNameSize;
+
+ const auto symValid = SymFromAddr( proc, ptr, nullptr, si ) != 0;
+
+ IMAGEHLP_LINE64 line;
+ DWORD displacement = 0;
+ line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+
+ {
+ const char* filename;
+ const auto res = SymGetLineFromAddr64( proc, ptr, &displacement, &line );
+ if( res == 0 || line.LineNumber >= 0xF00000 )
+ {
+ filename = "[unknown]";
+ cb_data[write].line = 0;
+ }
+ else
+ {
+ filename = line.FileName;
+ cb_data[write].line = line.LineNumber;
+ }
+
+ cb_data[write].name = symValid ? CopyStringFast( si->Name, si->NameLen ) : CopyStringFast( moduleName );
+ cb_data[write].file = CopyStringFast( filename );
+ if( symValid )
+ {
+ cb_data[write].symLen = si->Size;
+ cb_data[write].symAddr = si->Address;
+ }
+ else
+ {
+ cb_data[write].symLen = 0;
+ cb_data[write].symAddr = 0;
+ }
+ }
+
+#if !defined TRACY_NO_CALLSTACK_INLINES
+ if( doInline )
+ {
+ for( DWORD i=0; i<inlineNum; i++ )
+ {
+ auto& cb = cb_data[i];
+ const auto symInlineValid = _SymFromInlineContext( proc, ptr, ctx, nullptr, si ) != 0;
+ const char* filename;
+ if( _SymGetLineFromInlineContext( proc, ptr, ctx, 0, &displacement, &line ) == 0 )
+ {
+ filename = "[unknown]";
+ cb.line = 0;
+ }
+ else
+ {
+ filename = line.FileName;
+ cb.line = line.LineNumber;
+ }
+
+ cb.name = symInlineValid ? CopyStringFast( si->Name, si->NameLen ) : CopyStringFast( moduleName );
+ cb.file = CopyStringFast( filename );
+ if( symInlineValid )
+ {
+ cb.symLen = si->Size;
+ cb.symAddr = si->Address;
+ }
+ else
+ {
+ cb.symLen = 0;
+ cb.symAddr = 0;
+ }
+
+ ctx++;
+ }
+ }
+#endif
+#ifdef TRACY_DBGHELP_LOCK
+ DBGHELP_UNLOCK;
+#endif
+
+ return { cb_data, uint8_t( cb_num ), moduleName };
+}
+
+#elif TRACY_HAS_CALLSTACK == 2 || TRACY_HAS_CALLSTACK == 3 || TRACY_HAS_CALLSTACK == 4 || TRACY_HAS_CALLSTACK == 6
+
+enum { MaxCbTrace = 16 };
+
+struct backtrace_state* cb_bts;
+int cb_num;
+CallstackEntry cb_data[MaxCbTrace];
+int cb_fixup;
+
+#ifdef __linux
+struct KernelSymbol
+{
+ uint64_t addr;
+ const char* name;
+ const char* mod;
+};
+
+KernelSymbol* s_kernelSym = nullptr;
+size_t s_kernelSymCnt;
+
+static void InitKernelSymbols()
+{
+ FILE* f = fopen( "/proc/kallsyms", "rb" );
+ if( !f ) return;
+ tracy::FastVector<KernelSymbol> tmpSym( 1024 );
+ size_t linelen = 16 * 1024; // linelen must be big enough to prevent reallocs in getline()
+ auto linebuf = (char*)tracy_malloc( linelen );
+ ssize_t sz;
+ while( ( sz = getline( &linebuf, &linelen, f ) ) != -1 )
+ {
+ auto ptr = linebuf;
+ uint64_t addr = 0;
+ while( *ptr != ' ' )
+ {
+ auto v = *ptr;
+ if( v >= '0' && v <= '9' )
+ {
+ v -= '0';
+ }
+ else if( v >= 'a' && v <= 'f' )
+ {
+ v -= 'a';
+ v += 10;
+ }
+ else if( v >= 'A' && v <= 'F' )
+ {
+ v -= 'A';
+ v += 10;
+ }
+ else
+ {
+ assert( false );
+ }
+ assert( ( v & ~0xF ) == 0 );
+ addr <<= 4;
+ addr |= v;
+ ptr++;
+ }
+ if( addr == 0 ) continue;
+ ptr++;
+ if( *ptr != 'T' && *ptr != 't' ) continue;
+ ptr += 2;
+ const auto namestart = ptr;
+ while( *ptr != '\t' && *ptr != '\n' ) ptr++;
+ const auto nameend = ptr;
+ const char* modstart = nullptr;
+ const char* modend;
+ if( *ptr == '\t' )
+ {
+ ptr += 2;
+ modstart = ptr;
+ while( *ptr != ']' ) ptr++;
+ modend = ptr;
+ }
+
+ auto strname = (char*)tracy_malloc_fast( nameend - namestart + 1 );
+ memcpy( strname, namestart, nameend - namestart );
+ strname[nameend-namestart] = '\0';
+
+ char* strmod = nullptr;
+ if( modstart )
+ {
+ strmod = (char*)tracy_malloc_fast( modend - modstart + 1 );
+ memcpy( strmod, modstart, modend - modstart );
+ strmod[modend-modstart] = '\0';
+ }
+
+ auto sym = tmpSym.push_next();
+ sym->addr = addr;
+ sym->name = strname;
+ sym->mod = strmod;
+ }
+ tracy_free_fast( linebuf );
+ fclose( f );
+ if( tmpSym.empty() ) return;
+
+ std::sort( tmpSym.begin(), tmpSym.end(), []( const KernelSymbol& lhs, const KernelSymbol& rhs ) { return lhs.addr > rhs.addr; } );
+ s_kernelSymCnt = tmpSym.size();
+ s_kernelSym = (KernelSymbol*)tracy_malloc_fast( sizeof( KernelSymbol ) * s_kernelSymCnt );
+ memcpy( s_kernelSym, tmpSym.data(), sizeof( KernelSymbol ) * s_kernelSymCnt );
+ TracyDebug( "Loaded %zu kernel symbols\n", s_kernelSymCnt );
+}
+#endif
+
+void InitCallstack()
+{
+ cb_bts = backtrace_create_state( nullptr, 0, nullptr, nullptr );
+
+#ifdef __linux
+ InitKernelSymbols();
+#endif
+}
+
+static int FastCallstackDataCb( void* data, uintptr_t pc, uintptr_t lowaddr, const char* fn, int lineno, const char* function )
+{
+ if( function )
+ {
+ strcpy( (char*)data, function );
+ }
+ else
+ {
+ const char* symname = nullptr;
+ auto vptr = (void*)pc;
+ Dl_info dlinfo;
+ if( dladdr( vptr, &dlinfo ) )
+ {
+ symname = dlinfo.dli_sname;
+ }
+ if( symname )
+ {
+ strcpy( (char*)data, symname );
+ }
+ else
+ {
+ *(char*)data = '\0';
+ }
+ }
+ return 1;
+}
+
+static void FastCallstackErrorCb( void* data, const char* /*msg*/, int /*errnum*/ )
+{
+ *(char*)data = '\0';
+}
+
+const char* DecodeCallstackPtrFast( uint64_t ptr )
+{
+ static char ret[1024];
+ backtrace_pcinfo( cb_bts, ptr, FastCallstackDataCb, FastCallstackErrorCb, ret );
+ return ret;
+}
+
+static int SymbolAddressDataCb( void* data, uintptr_t pc, uintptr_t lowaddr, const char* fn, int lineno, const char* function )
+{
+ auto& sym = *(CallstackSymbolData*)data;
+ if( !fn )
+ {
+ sym.file = "[unknown]";
+ sym.line = 0;
+ sym.needFree = false;
+ }
+ else
+ {
+ sym.file = CopyString( fn );
+ sym.line = lineno;
+ sym.needFree = true;
+ }
+
+ return 1;
+}
+
+static void SymbolAddressErrorCb( void* data, const char* /*msg*/, int /*errnum*/ )
+{
+ auto& sym = *(CallstackSymbolData*)data;
+ sym.file = "[unknown]";
+ sym.line = 0;
+ sym.needFree = false;
+}
+
+CallstackSymbolData DecodeSymbolAddress( uint64_t ptr )
+{
+ CallstackSymbolData sym;
+ backtrace_pcinfo( cb_bts, ptr, SymbolAddressDataCb, SymbolAddressErrorCb, &sym );
+ return sym;
+}
+
+static int CodeDataCb( void* data, uintptr_t pc, uintptr_t lowaddr, const char* fn, int lineno, const char* function )
+{
+ if( !fn ) return 1;
+
+ const auto fnsz = strlen( fn );
+ if( fnsz >= s_tracySkipSubframesMinLen )
+ {
+ auto ptr = s_tracySkipSubframes;
+ do
+ {
+ if( fnsz >= ptr->len && memcmp( fn + fnsz - ptr->len, ptr->str, ptr->len ) == 0 ) return 0;
+ ptr++;
+ }
+ while( ptr->str );
+ }
+
+ auto& sym = *(CallstackSymbolData*)data;
+ sym.file = CopyString( fn );
+ sym.line = lineno;
+ sym.needFree = true;
+ sym.symAddr = lowaddr;
+ return 1;
+}
+
+static void CodeErrorCb( void* /*data*/, const char* /*msg*/, int /*errnum*/ )
+{
+}
+
+CallstackSymbolData DecodeCodeAddress( uint64_t ptr )
+{
+ CallstackSymbolData sym = { "[unknown]", 0, false, 0 };
+ backtrace_pcinfo( cb_bts, ptr, CodeDataCb, CodeErrorCb, &sym );
+ return sym;
+}
+
+static int CallstackDataCb( void* /*data*/, uintptr_t pc, uintptr_t lowaddr, const char* fn, int lineno, const char* function )
+{
+ enum { DemangleBufLen = 64*1024 };
+ char demangled[DemangleBufLen];
+
+ cb_data[cb_num].symLen = 0;
+ cb_data[cb_num].symAddr = (uint64_t)lowaddr;
+
+ if( !fn && !function )
+ {
+ const char* symname = nullptr;
+ auto vptr = (void*)pc;
+ ptrdiff_t symoff = 0;
+
+ Dl_info dlinfo;
+ if( dladdr( vptr, &dlinfo ) )
+ {
+ symname = dlinfo.dli_sname;
+ symoff = (char*)pc - (char*)dlinfo.dli_saddr;
+ if( ___tracy_demangle( symname, demangled, DemangleBufLen ) ) symname = demangled;
+ }
+
+ if( !symname ) symname = "[unknown]";
+
+ if( symoff == 0 )
+ {
+ cb_data[cb_num].name = CopyStringFast( symname );
+ }
+ else
+ {
+ char buf[32];
+ const auto offlen = sprintf( buf, " + %td", symoff );
+ const auto namelen = strlen( symname );
+ auto name = (char*)tracy_malloc_fast( namelen + offlen + 1 );
+ memcpy( name, symname, namelen );
+ memcpy( name + namelen, buf, offlen );
+ name[namelen + offlen] = '\0';
+ cb_data[cb_num].name = name;
+ }
+
+ cb_data[cb_num].file = CopyStringFast( "[unknown]" );
+ cb_data[cb_num].line = 0;
+ }
+ else
+ {
+ if( !fn ) fn = "[unknown]";
+ if( !function )
+ {
+ function = "[unknown]";
+ }
+ else if( ___tracy_demangle( function, demangled, DemangleBufLen ) )
+ {
+ function = demangled;
+ }
+
+ cb_data[cb_num].name = CopyStringFast( function );
+ cb_data[cb_num].file = CopyStringFast( fn );
+ cb_data[cb_num].line = lineno;
+ }
+
+ if( ++cb_num >= MaxCbTrace )
+ {
+ return 1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+static void CallstackErrorCb( void* /*data*/, const char* /*msg*/, int /*errnum*/ )
+{
+ for( int i=0; i<cb_num; i++ )
+ {
+ tracy_free_fast( (void*)cb_data[i].name );
+ tracy_free_fast( (void*)cb_data[i].file );
+ }
+
+ cb_data[0].name = CopyStringFast( "[error]" );
+ cb_data[0].file = CopyStringFast( "[error]" );
+ cb_data[0].line = 0;
+
+ cb_num = 1;
+}
+
+void SymInfoCallback( void* /*data*/, uintptr_t pc, const char* symname, uintptr_t symval, uintptr_t symsize )
+{
+ cb_data[cb_num-1].symLen = (uint32_t)symsize;
+ cb_data[cb_num-1].symAddr = (uint64_t)symval;
+}
+
+void SymInfoError( void* /*data*/, const char* /*msg*/, int /*errnum*/ )
+{
+ cb_data[cb_num-1].symLen = 0;
+ cb_data[cb_num-1].symAddr = 0;
+}
+
+CallstackEntryData DecodeCallstackPtr( uint64_t ptr )
+{
+ InitRpmalloc();
+ if( ptr >> 63 == 0 )
+ {
+ cb_num = 0;
+ backtrace_pcinfo( cb_bts, ptr, CallstackDataCb, CallstackErrorCb, nullptr );
+ assert( cb_num > 0 );
+
+ backtrace_syminfo( cb_bts, ptr, SymInfoCallback, SymInfoError, nullptr );
+
+ const char* symloc = nullptr;
+ Dl_info dlinfo;
+ if( dladdr( (void*)ptr, &dlinfo ) ) symloc = dlinfo.dli_fname;
+
+ return { cb_data, uint8_t( cb_num ), symloc ? symloc : "[unknown]" };
+ }
+#ifdef __linux
+ else if( s_kernelSym )
+ {
+ auto it = std::lower_bound( s_kernelSym, s_kernelSym + s_kernelSymCnt, ptr, []( const KernelSymbol& lhs, const uint64_t& rhs ) { return lhs.addr > rhs; } );
+ if( it != s_kernelSym + s_kernelSymCnt )
+ {
+ cb_data[0].name = CopyStringFast( it->name );
+ cb_data[0].file = CopyStringFast( "<kernel>" );
+ cb_data[0].line = 0;
+ cb_data[0].symLen = 0;
+ cb_data[0].symAddr = it->addr;
+ return { cb_data, 1, it->mod ? it->mod : "<kernel>" };
+ }
+ }
+#endif
+
+ cb_data[0].name = CopyStringFast( "[unknown]" );
+ cb_data[0].file = CopyStringFast( "<kernel>" );
+ cb_data[0].line = 0;
+ cb_data[0].symLen = 0;
+ cb_data[0].symAddr = 0;
+ return { cb_data, 1, "<kernel>" };
+}
+
+#elif TRACY_HAS_CALLSTACK == 5
+
+void InitCallstack()
+{
+}
+
+const char* DecodeCallstackPtrFast( uint64_t ptr )
+{
+ static char ret[1024];
+ auto vptr = (void*)ptr;
+ const char* symname = nullptr;
+ Dl_info dlinfo;
+ if( dladdr( vptr, &dlinfo ) && dlinfo.dli_sname )
+ {
+ symname = dlinfo.dli_sname;
+ }
+ if( symname )
+ {
+ strcpy( ret, symname );
+ }
+ else
+ {
+ *ret = '\0';
+ }
+ return ret;
+}
+
+CallstackSymbolData DecodeSymbolAddress( uint64_t ptr )
+{
+ const char* symloc = nullptr;
+ Dl_info dlinfo;
+ if( dladdr( (void*)ptr, &dlinfo ) ) symloc = dlinfo.dli_fname;
+ if( !symloc ) symloc = "[unknown]";
+ return CallstackSymbolData { symloc, 0, false, 0 };
+}
+
+CallstackSymbolData DecodeCodeAddress( uint64_t ptr )
+{
+ return DecodeSymbolAddress( ptr );
+}
+
+CallstackEntryData DecodeCallstackPtr( uint64_t ptr )
+{
+ static CallstackEntry cb;
+ cb.line = 0;
+
+ enum { DemangleBufLen = 64*1024 };
+ char demangled[DemangleBufLen];
+
+ const char* symname = nullptr;
+ const char* symloc = nullptr;
+ auto vptr = (void*)ptr;
+ ptrdiff_t symoff = 0;
+ void* symaddr = nullptr;
+
+ Dl_info dlinfo;
+ if( dladdr( vptr, &dlinfo ) )
+ {
+ symloc = dlinfo.dli_fname;
+ symname = dlinfo.dli_sname;
+ symoff = (char*)ptr - (char*)dlinfo.dli_saddr;
+ symaddr = dlinfo.dli_saddr;
+ if( ___tracy_demangle( symname, demangled, DemangleBufLen ) ) symname = demangled;
+ }
+
+ if( !symname ) symname = "[unknown]";
+ if( !symloc ) symloc = "[unknown]";
+
+ if( symoff == 0 )
+ {
+ cb.name = CopyString( symname );
+ }
+ else
+ {
+ char buf[32];
+ const auto offlen = sprintf( buf, " + %td", symoff );
+ const auto namelen = strlen( symname );
+ auto name = (char*)tracy_malloc( namelen + offlen + 1 );
+ memcpy( name, symname, namelen );
+ memcpy( name + namelen, buf, offlen );
+ name[namelen + offlen] = '\0';
+ cb.name = name;
+ }
+
+ cb.file = CopyString( "[unknown]" );
+ cb.symLen = 0;
+ cb.symAddr = (uint64_t)symaddr;
+
+ return { &cb, 1, symloc };
+}
+
+#endif
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyCallstack.h b/3rdparty/tracy/tracy/client/TracyCallstack.h
new file mode 100644
index 0000000..2c7ecad
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyCallstack.h
@@ -0,0 +1,35 @@
+#ifndef __TRACYCALLSTACK_H__
+#define __TRACYCALLSTACK_H__
+
+#ifndef TRACY_NO_CALLSTACK
+
+# if !defined _WIN32
+# include <sys/param.h>
+# endif
+
+# if defined _WIN32
+# include "../common/TracyUwp.hpp"
+# ifndef TRACY_UWP
+# define TRACY_HAS_CALLSTACK 1
+# endif
+# elif defined __ANDROID__
+# if !defined __arm__ || __ANDROID_API__ >= 21
+# define TRACY_HAS_CALLSTACK 2
+# else
+# define TRACY_HAS_CALLSTACK 5
+# endif
+# elif defined __linux
+# if defined _GNU_SOURCE && defined __GLIBC__
+# define TRACY_HAS_CALLSTACK 3
+# else
+# define TRACY_HAS_CALLSTACK 2
+# endif
+# elif defined __APPLE__
+# define TRACY_HAS_CALLSTACK 4
+# elif defined BSD
+# define TRACY_HAS_CALLSTACK 6
+# endif
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyCallstack.hpp b/3rdparty/tracy/tracy/client/TracyCallstack.hpp
new file mode 100644
index 0000000..217d69e
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyCallstack.hpp
@@ -0,0 +1,125 @@
+#ifndef __TRACYCALLSTACK_HPP__
+#define __TRACYCALLSTACK_HPP__
+
+#include "../common/TracyApi.h"
+#include "TracyCallstack.h"
+
+#if TRACY_HAS_CALLSTACK == 2 || TRACY_HAS_CALLSTACK == 5
+# include <unwind.h>
+#elif TRACY_HAS_CALLSTACK >= 3
+# include <execinfo.h>
+#endif
+
+
+#ifdef TRACY_HAS_CALLSTACK
+
+#include <assert.h>
+#include <stdint.h>
+
+#include "../common/TracyAlloc.hpp"
+#include "../common/TracyForceInline.hpp"
+
+namespace tracy
+{
+
+struct CallstackSymbolData
+{
+ const char* file;
+ uint32_t line;
+ bool needFree;
+ uint64_t symAddr;
+};
+
+struct CallstackEntry
+{
+ const char* name;
+ const char* file;
+ uint32_t line;
+ uint32_t symLen;
+ uint64_t symAddr;
+};
+
+struct CallstackEntryData
+{
+ const CallstackEntry* data;
+ uint8_t size;
+ const char* imageName;
+};
+
+CallstackSymbolData DecodeSymbolAddress( uint64_t ptr );
+CallstackSymbolData DecodeCodeAddress( uint64_t ptr );
+const char* DecodeCallstackPtrFast( uint64_t ptr );
+CallstackEntryData DecodeCallstackPtr( uint64_t ptr );
+void InitCallstack();
+const char* GetKernelModulePath( uint64_t addr );
+
+#if TRACY_HAS_CALLSTACK == 1
+
+extern "C"
+{
+ typedef unsigned long (__stdcall *___tracy_t_RtlWalkFrameChain)( void**, unsigned long, unsigned long );
+ TRACY_API extern ___tracy_t_RtlWalkFrameChain ___tracy_RtlWalkFrameChain;
+}
+
+static tracy_force_inline void* Callstack( int depth )
+{
+ assert( depth >= 1 && depth < 63 );
+ auto trace = (uintptr_t*)tracy_malloc( ( 1 + depth ) * sizeof( uintptr_t ) );
+ const auto num = ___tracy_RtlWalkFrameChain( (void**)( trace + 1 ), depth, 0 );
+ *trace = num;
+ return trace;
+}
+
+#elif TRACY_HAS_CALLSTACK == 2 || TRACY_HAS_CALLSTACK == 5
+
+struct BacktraceState
+{
+ void** current;
+ void** end;
+};
+
+static _Unwind_Reason_Code tracy_unwind_callback( struct _Unwind_Context* ctx, void* arg )
+{
+ auto state = (BacktraceState*)arg;
+ uintptr_t pc = _Unwind_GetIP( ctx );
+ if( pc )
+ {
+ if( state->current == state->end ) return _URC_END_OF_STACK;
+ *state->current++ = (void*)pc;
+ }
+ return _URC_NO_REASON;
+}
+
+static tracy_force_inline void* Callstack( int depth )
+{
+ assert( depth >= 1 && depth < 63 );
+
+ auto trace = (uintptr_t*)tracy_malloc( ( 1 + depth ) * sizeof( uintptr_t ) );
+ BacktraceState state = { (void**)(trace+1), (void**)(trace+1+depth) };
+ _Unwind_Backtrace( tracy_unwind_callback, &state );
+
+ *trace = (uintptr_t*)state.current - trace + 1;
+
+ return trace;
+}
+
+#elif TRACY_HAS_CALLSTACK == 3 || TRACY_HAS_CALLSTACK == 4 || TRACY_HAS_CALLSTACK == 6
+
+static tracy_force_inline void* Callstack( int depth )
+{
+ assert( depth >= 1 );
+
+ auto trace = (uintptr_t*)tracy_malloc( ( 1 + (size_t)depth ) * sizeof( uintptr_t ) );
+ const auto num = (size_t)backtrace( (void**)(trace+1), depth );
+ *trace = num;
+
+ return trace;
+}
+
+#endif
+
+}
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyDebug.hpp b/3rdparty/tracy/tracy/client/TracyDebug.hpp
new file mode 100644
index 0000000..8723356
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyDebug.hpp
@@ -0,0 +1,11 @@
+#ifndef __TRACYPRINT_HPP__
+#define __TRACYPRINT_HPP__
+
+#ifdef TRACY_VERBOSE
+# include <stdio.h>
+# define TracyDebug(...) fprintf( stderr, __VA_ARGS__ );
+#else
+# define TracyDebug(...)
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyDxt1.cpp b/3rdparty/tracy/tracy/client/TracyDxt1.cpp
new file mode 100644
index 0000000..f1fb1e4
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyDxt1.cpp
@@ -0,0 +1,641 @@
+#include "TracyDxt1.hpp"
+#include "../common/TracyForceInline.hpp"
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __ARM_NEON
+# include <arm_neon.h>
+#endif
+
+#if defined __AVX__ && !defined __SSE4_1__
+# define __SSE4_1__
+#endif
+
+#if defined __SSE4_1__ || defined __AVX2__
+# ifdef _MSC_VER
+# include <intrin.h>
+# else
+# include <x86intrin.h>
+# ifndef _mm256_cvtsi256_si32
+# define _mm256_cvtsi256_si32( v ) ( _mm_cvtsi128_si32( _mm256_castsi256_si128( v ) ) )
+# endif
+# endif
+#endif
+
+namespace tracy
+{
+
+static inline uint16_t to565( uint8_t r, uint8_t g, uint8_t b )
+{
+ return ( ( r & 0xF8 ) << 8 ) | ( ( g & 0xFC ) << 3 ) | ( b >> 3 );
+}
+
+static inline uint16_t to565( uint32_t c )
+{
+ return
+ ( ( c & 0xF80000 ) >> 19 ) |
+ ( ( c & 0x00FC00 ) >> 5 ) |
+ ( ( c & 0x0000F8 ) << 8 );
+}
+
+static const uint16_t DivTable[255*3+1] = {
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xcccc, 0xaaaa, 0x9249, 0x8000, 0x71c7, 0x6666, 0x5d17, 0x5555, 0x4ec4, 0x4924, 0x4444, 0x4000,
+ 0x3c3c, 0x38e3, 0x35e5, 0x3333, 0x30c3, 0x2e8b, 0x2c85, 0x2aaa, 0x28f5, 0x2762, 0x25ed, 0x2492, 0x234f, 0x2222, 0x2108, 0x2000,
+ 0x1f07, 0x1e1e, 0x1d41, 0x1c71, 0x1bac, 0x1af2, 0x1a41, 0x1999, 0x18f9, 0x1861, 0x17d0, 0x1745, 0x16c1, 0x1642, 0x15c9, 0x1555,
+ 0x14e5, 0x147a, 0x1414, 0x13b1, 0x1352, 0x12f6, 0x129e, 0x1249, 0x11f7, 0x11a7, 0x115b, 0x1111, 0x10c9, 0x1084, 0x1041, 0x1000,
+ 0x0fc0, 0x0f83, 0x0f48, 0x0f0f, 0x0ed7, 0x0ea0, 0x0e6c, 0x0e38, 0x0e07, 0x0dd6, 0x0da7, 0x0d79, 0x0d4c, 0x0d20, 0x0cf6, 0x0ccc,
+ 0x0ca4, 0x0c7c, 0x0c56, 0x0c30, 0x0c0c, 0x0be8, 0x0bc5, 0x0ba2, 0x0b81, 0x0b60, 0x0b40, 0x0b21, 0x0b02, 0x0ae4, 0x0ac7, 0x0aaa,
+ 0x0a8e, 0x0a72, 0x0a57, 0x0a3d, 0x0a23, 0x0a0a, 0x09f1, 0x09d8, 0x09c0, 0x09a9, 0x0991, 0x097b, 0x0964, 0x094f, 0x0939, 0x0924,
+ 0x090f, 0x08fb, 0x08e7, 0x08d3, 0x08c0, 0x08ad, 0x089a, 0x0888, 0x0876, 0x0864, 0x0853, 0x0842, 0x0831, 0x0820, 0x0810, 0x0800,
+ 0x07f0, 0x07e0, 0x07d1, 0x07c1, 0x07b3, 0x07a4, 0x0795, 0x0787, 0x0779, 0x076b, 0x075d, 0x0750, 0x0743, 0x0736, 0x0729, 0x071c,
+ 0x070f, 0x0703, 0x06f7, 0x06eb, 0x06df, 0x06d3, 0x06c8, 0x06bc, 0x06b1, 0x06a6, 0x069b, 0x0690, 0x0685, 0x067b, 0x0670, 0x0666,
+ 0x065c, 0x0652, 0x0648, 0x063e, 0x0634, 0x062b, 0x0621, 0x0618, 0x060f, 0x0606, 0x05fd, 0x05f4, 0x05eb, 0x05e2, 0x05d9, 0x05d1,
+ 0x05c9, 0x05c0, 0x05b8, 0x05b0, 0x05a8, 0x05a0, 0x0598, 0x0590, 0x0588, 0x0581, 0x0579, 0x0572, 0x056b, 0x0563, 0x055c, 0x0555,
+ 0x054e, 0x0547, 0x0540, 0x0539, 0x0532, 0x052b, 0x0525, 0x051e, 0x0518, 0x0511, 0x050b, 0x0505, 0x04fe, 0x04f8, 0x04f2, 0x04ec,
+ 0x04e6, 0x04e0, 0x04da, 0x04d4, 0x04ce, 0x04c8, 0x04c3, 0x04bd, 0x04b8, 0x04b2, 0x04ad, 0x04a7, 0x04a2, 0x049c, 0x0497, 0x0492,
+ 0x048d, 0x0487, 0x0482, 0x047d, 0x0478, 0x0473, 0x046e, 0x0469, 0x0465, 0x0460, 0x045b, 0x0456, 0x0452, 0x044d, 0x0448, 0x0444,
+ 0x043f, 0x043b, 0x0436, 0x0432, 0x042d, 0x0429, 0x0425, 0x0421, 0x041c, 0x0418, 0x0414, 0x0410, 0x040c, 0x0408, 0x0404, 0x0400,
+ 0x03fc, 0x03f8, 0x03f4, 0x03f0, 0x03ec, 0x03e8, 0x03e4, 0x03e0, 0x03dd, 0x03d9, 0x03d5, 0x03d2, 0x03ce, 0x03ca, 0x03c7, 0x03c3,
+ 0x03c0, 0x03bc, 0x03b9, 0x03b5, 0x03b2, 0x03ae, 0x03ab, 0x03a8, 0x03a4, 0x03a1, 0x039e, 0x039b, 0x0397, 0x0394, 0x0391, 0x038e,
+ 0x038b, 0x0387, 0x0384, 0x0381, 0x037e, 0x037b, 0x0378, 0x0375, 0x0372, 0x036f, 0x036c, 0x0369, 0x0366, 0x0364, 0x0361, 0x035e,
+ 0x035b, 0x0358, 0x0355, 0x0353, 0x0350, 0x034d, 0x034a, 0x0348, 0x0345, 0x0342, 0x0340, 0x033d, 0x033a, 0x0338, 0x0335, 0x0333,
+ 0x0330, 0x032e, 0x032b, 0x0329, 0x0326, 0x0324, 0x0321, 0x031f, 0x031c, 0x031a, 0x0317, 0x0315, 0x0313, 0x0310, 0x030e, 0x030c,
+ 0x0309, 0x0307, 0x0305, 0x0303, 0x0300, 0x02fe, 0x02fc, 0x02fa, 0x02f7, 0x02f5, 0x02f3, 0x02f1, 0x02ef, 0x02ec, 0x02ea, 0x02e8,
+ 0x02e6, 0x02e4, 0x02e2, 0x02e0, 0x02de, 0x02dc, 0x02da, 0x02d8, 0x02d6, 0x02d4, 0x02d2, 0x02d0, 0x02ce, 0x02cc, 0x02ca, 0x02c8,
+ 0x02c6, 0x02c4, 0x02c2, 0x02c0, 0x02be, 0x02bc, 0x02bb, 0x02b9, 0x02b7, 0x02b5, 0x02b3, 0x02b1, 0x02b0, 0x02ae, 0x02ac, 0x02aa,
+ 0x02a8, 0x02a7, 0x02a5, 0x02a3, 0x02a1, 0x02a0, 0x029e, 0x029c, 0x029b, 0x0299, 0x0297, 0x0295, 0x0294, 0x0292, 0x0291, 0x028f,
+ 0x028d, 0x028c, 0x028a, 0x0288, 0x0287, 0x0285, 0x0284, 0x0282, 0x0280, 0x027f, 0x027d, 0x027c, 0x027a, 0x0279, 0x0277, 0x0276,
+ 0x0274, 0x0273, 0x0271, 0x0270, 0x026e, 0x026d, 0x026b, 0x026a, 0x0268, 0x0267, 0x0265, 0x0264, 0x0263, 0x0261, 0x0260, 0x025e,
+ 0x025d, 0x025c, 0x025a, 0x0259, 0x0257, 0x0256, 0x0255, 0x0253, 0x0252, 0x0251, 0x024f, 0x024e, 0x024d, 0x024b, 0x024a, 0x0249,
+ 0x0247, 0x0246, 0x0245, 0x0243, 0x0242, 0x0241, 0x0240, 0x023e, 0x023d, 0x023c, 0x023b, 0x0239, 0x0238, 0x0237, 0x0236, 0x0234,
+ 0x0233, 0x0232, 0x0231, 0x0230, 0x022e, 0x022d, 0x022c, 0x022b, 0x022a, 0x0229, 0x0227, 0x0226, 0x0225, 0x0224, 0x0223, 0x0222,
+ 0x0220, 0x021f, 0x021e, 0x021d, 0x021c, 0x021b, 0x021a, 0x0219, 0x0218, 0x0216, 0x0215, 0x0214, 0x0213, 0x0212, 0x0211, 0x0210,
+ 0x020f, 0x020e, 0x020d, 0x020c, 0x020b, 0x020a, 0x0209, 0x0208, 0x0207, 0x0206, 0x0205, 0x0204, 0x0203, 0x0202, 0x0201, 0x0200,
+ 0x01ff, 0x01fe, 0x01fd, 0x01fc, 0x01fb, 0x01fa, 0x01f9, 0x01f8, 0x01f7, 0x01f6, 0x01f5, 0x01f4, 0x01f3, 0x01f2, 0x01f1, 0x01f0,
+ 0x01ef, 0x01ee, 0x01ed, 0x01ec, 0x01eb, 0x01ea, 0x01e9, 0x01e9, 0x01e8, 0x01e7, 0x01e6, 0x01e5, 0x01e4, 0x01e3, 0x01e2, 0x01e1,
+ 0x01e0, 0x01e0, 0x01df, 0x01de, 0x01dd, 0x01dc, 0x01db, 0x01da, 0x01da, 0x01d9, 0x01d8, 0x01d7, 0x01d6, 0x01d5, 0x01d4, 0x01d4,
+ 0x01d3, 0x01d2, 0x01d1, 0x01d0, 0x01cf, 0x01cf, 0x01ce, 0x01cd, 0x01cc, 0x01cb, 0x01cb, 0x01ca, 0x01c9, 0x01c8, 0x01c7, 0x01c7,
+ 0x01c6, 0x01c5, 0x01c4, 0x01c3, 0x01c3, 0x01c2, 0x01c1, 0x01c0, 0x01c0, 0x01bf, 0x01be, 0x01bd, 0x01bd, 0x01bc, 0x01bb, 0x01ba,
+ 0x01ba, 0x01b9, 0x01b8, 0x01b7, 0x01b7, 0x01b6, 0x01b5, 0x01b4, 0x01b4, 0x01b3, 0x01b2, 0x01b2, 0x01b1, 0x01b0, 0x01af, 0x01af,
+ 0x01ae, 0x01ad, 0x01ad, 0x01ac, 0x01ab, 0x01aa, 0x01aa, 0x01a9, 0x01a8, 0x01a8, 0x01a7, 0x01a6, 0x01a6, 0x01a5, 0x01a4, 0x01a4,
+ 0x01a3, 0x01a2, 0x01a2, 0x01a1, 0x01a0, 0x01a0, 0x019f, 0x019e, 0x019e, 0x019d, 0x019c, 0x019c, 0x019b, 0x019a, 0x019a, 0x0199,
+ 0x0198, 0x0198, 0x0197, 0x0197, 0x0196, 0x0195, 0x0195, 0x0194, 0x0193, 0x0193, 0x0192, 0x0192, 0x0191, 0x0190, 0x0190, 0x018f,
+ 0x018f, 0x018e, 0x018d, 0x018d, 0x018c, 0x018b, 0x018b, 0x018a, 0x018a, 0x0189, 0x0189, 0x0188, 0x0187, 0x0187, 0x0186, 0x0186,
+ 0x0185, 0x0184, 0x0184, 0x0183, 0x0183, 0x0182, 0x0182, 0x0181, 0x0180, 0x0180, 0x017f, 0x017f, 0x017e, 0x017e, 0x017d, 0x017d,
+ 0x017c, 0x017b, 0x017b, 0x017a, 0x017a, 0x0179, 0x0179, 0x0178, 0x0178, 0x0177, 0x0177, 0x0176, 0x0175, 0x0175, 0x0174, 0x0174,
+ 0x0173, 0x0173, 0x0172, 0x0172, 0x0171, 0x0171, 0x0170, 0x0170, 0x016f, 0x016f, 0x016e, 0x016e, 0x016d, 0x016d, 0x016c, 0x016c,
+ 0x016b, 0x016b, 0x016a, 0x016a, 0x0169, 0x0169, 0x0168, 0x0168, 0x0167, 0x0167, 0x0166, 0x0166, 0x0165, 0x0165, 0x0164, 0x0164,
+ 0x0163, 0x0163, 0x0162, 0x0162, 0x0161, 0x0161, 0x0160, 0x0160, 0x015f, 0x015f, 0x015e, 0x015e, 0x015d, 0x015d, 0x015d, 0x015c,
+ 0x015c, 0x015b, 0x015b, 0x015a, 0x015a, 0x0159, 0x0159, 0x0158, 0x0158, 0x0158, 0x0157, 0x0157, 0x0156, 0x0156
+};
+static const uint16_t DivTableNEON[255*3+1] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x1c71, 0x1af2, 0x1999, 0x1861, 0x1745, 0x1642, 0x1555, 0x147a, 0x13b1, 0x12f6, 0x1249, 0x11a7, 0x1111, 0x1084, 0x1000,
+ 0x0f83, 0x0f0f, 0x0ea0, 0x0e38, 0x0dd6, 0x0d79, 0x0d20, 0x0ccc, 0x0c7c, 0x0c30, 0x0be8, 0x0ba2, 0x0b60, 0x0b21, 0x0ae4, 0x0aaa,
+ 0x0a72, 0x0a3d, 0x0a0a, 0x09d8, 0x09a9, 0x097b, 0x094f, 0x0924, 0x08fb, 0x08d3, 0x08ad, 0x0888, 0x0864, 0x0842, 0x0820, 0x0800,
+ 0x07e0, 0x07c1, 0x07a4, 0x0787, 0x076b, 0x0750, 0x0736, 0x071c, 0x0703, 0x06eb, 0x06d3, 0x06bc, 0x06a6, 0x0690, 0x067b, 0x0666,
+ 0x0652, 0x063e, 0x062b, 0x0618, 0x0606, 0x05f4, 0x05e2, 0x05d1, 0x05c0, 0x05b0, 0x05a0, 0x0590, 0x0581, 0x0572, 0x0563, 0x0555,
+ 0x0547, 0x0539, 0x052b, 0x051e, 0x0511, 0x0505, 0x04f8, 0x04ec, 0x04e0, 0x04d4, 0x04c8, 0x04bd, 0x04b2, 0x04a7, 0x049c, 0x0492,
+ 0x0487, 0x047d, 0x0473, 0x0469, 0x0460, 0x0456, 0x044d, 0x0444, 0x043b, 0x0432, 0x0429, 0x0421, 0x0418, 0x0410, 0x0408, 0x0400,
+ 0x03f8, 0x03f0, 0x03e8, 0x03e0, 0x03d9, 0x03d2, 0x03ca, 0x03c3, 0x03bc, 0x03b5, 0x03ae, 0x03a8, 0x03a1, 0x039b, 0x0394, 0x038e,
+ 0x0387, 0x0381, 0x037b, 0x0375, 0x036f, 0x0369, 0x0364, 0x035e, 0x0358, 0x0353, 0x034d, 0x0348, 0x0342, 0x033d, 0x0338, 0x0333,
+ 0x032e, 0x0329, 0x0324, 0x031f, 0x031a, 0x0315, 0x0310, 0x030c, 0x0307, 0x0303, 0x02fe, 0x02fa, 0x02f5, 0x02f1, 0x02ec, 0x02e8,
+ 0x02e4, 0x02e0, 0x02dc, 0x02d8, 0x02d4, 0x02d0, 0x02cc, 0x02c8, 0x02c4, 0x02c0, 0x02bc, 0x02b9, 0x02b5, 0x02b1, 0x02ae, 0x02aa,
+ 0x02a7, 0x02a3, 0x02a0, 0x029c, 0x0299, 0x0295, 0x0292, 0x028f, 0x028c, 0x0288, 0x0285, 0x0282, 0x027f, 0x027c, 0x0279, 0x0276,
+ 0x0273, 0x0270, 0x026d, 0x026a, 0x0267, 0x0264, 0x0261, 0x025e, 0x025c, 0x0259, 0x0256, 0x0253, 0x0251, 0x024e, 0x024b, 0x0249,
+ 0x0246, 0x0243, 0x0241, 0x023e, 0x023c, 0x0239, 0x0237, 0x0234, 0x0232, 0x0230, 0x022d, 0x022b, 0x0229, 0x0226, 0x0224, 0x0222,
+ 0x021f, 0x021d, 0x021b, 0x0219, 0x0216, 0x0214, 0x0212, 0x0210, 0x020e, 0x020c, 0x020a, 0x0208, 0x0206, 0x0204, 0x0202, 0x0200,
+ 0x01fe, 0x01fc, 0x01fa, 0x01f8, 0x01f6, 0x01f4, 0x01f2, 0x01f0, 0x01ee, 0x01ec, 0x01ea, 0x01e9, 0x01e7, 0x01e5, 0x01e3, 0x01e1,
+ 0x01e0, 0x01de, 0x01dc, 0x01da, 0x01d9, 0x01d7, 0x01d5, 0x01d4, 0x01d2, 0x01d0, 0x01cf, 0x01cd, 0x01cb, 0x01ca, 0x01c8, 0x01c7,
+ 0x01c5, 0x01c3, 0x01c2, 0x01c0, 0x01bf, 0x01bd, 0x01bc, 0x01ba, 0x01b9, 0x01b7, 0x01b6, 0x01b4, 0x01b3, 0x01b2, 0x01b0, 0x01af,
+ 0x01ad, 0x01ac, 0x01aa, 0x01a9, 0x01a8, 0x01a6, 0x01a5, 0x01a4, 0x01a2, 0x01a1, 0x01a0, 0x019e, 0x019d, 0x019c, 0x019a, 0x0199,
+ 0x0198, 0x0197, 0x0195, 0x0194, 0x0193, 0x0192, 0x0190, 0x018f, 0x018e, 0x018d, 0x018b, 0x018a, 0x0189, 0x0188, 0x0187, 0x0186,
+ 0x0184, 0x0183, 0x0182, 0x0181, 0x0180, 0x017f, 0x017e, 0x017d, 0x017b, 0x017a, 0x0179, 0x0178, 0x0177, 0x0176, 0x0175, 0x0174,
+ 0x0173, 0x0172, 0x0171, 0x0170, 0x016f, 0x016e, 0x016d, 0x016c, 0x016b, 0x016a, 0x0169, 0x0168, 0x0167, 0x0166, 0x0165, 0x0164,
+ 0x0163, 0x0162, 0x0161, 0x0160, 0x015f, 0x015e, 0x015d, 0x015c, 0x015b, 0x015a, 0x0159, 0x0158, 0x0158, 0x0157, 0x0156, 0x0155,
+ 0x0154, 0x0153, 0x0152, 0x0151, 0x0150, 0x0150, 0x014f, 0x014e, 0x014d, 0x014c, 0x014b, 0x014a, 0x014a, 0x0149, 0x0148, 0x0147,
+ 0x0146, 0x0146, 0x0145, 0x0144, 0x0143, 0x0142, 0x0142, 0x0141, 0x0140, 0x013f, 0x013e, 0x013e, 0x013d, 0x013c, 0x013b, 0x013b,
+ 0x013a, 0x0139, 0x0138, 0x0138, 0x0137, 0x0136, 0x0135, 0x0135, 0x0134, 0x0133, 0x0132, 0x0132, 0x0131, 0x0130, 0x0130, 0x012f,
+ 0x012e, 0x012e, 0x012d, 0x012c, 0x012b, 0x012b, 0x012a, 0x0129, 0x0129, 0x0128, 0x0127, 0x0127, 0x0126, 0x0125, 0x0125, 0x0124,
+ 0x0123, 0x0123, 0x0122, 0x0121, 0x0121, 0x0120, 0x0120, 0x011f, 0x011e, 0x011e, 0x011d, 0x011c, 0x011c, 0x011b, 0x011b, 0x011a,
+ 0x0119, 0x0119, 0x0118, 0x0118, 0x0117, 0x0116, 0x0116, 0x0115, 0x0115, 0x0114, 0x0113, 0x0113, 0x0112, 0x0112, 0x0111, 0x0111,
+ 0x0110, 0x010f, 0x010f, 0x010e, 0x010e, 0x010d, 0x010d, 0x010c, 0x010c, 0x010b, 0x010a, 0x010a, 0x0109, 0x0109, 0x0108, 0x0108,
+ 0x0107, 0x0107, 0x0106, 0x0106, 0x0105, 0x0105, 0x0104, 0x0104, 0x0103, 0x0103, 0x0102, 0x0102, 0x0101, 0x0101, 0x0100, 0x0100,
+ 0x00ff, 0x00ff, 0x00fe, 0x00fe, 0x00fd, 0x00fd, 0x00fc, 0x00fc, 0x00fb, 0x00fb, 0x00fa, 0x00fa, 0x00f9, 0x00f9, 0x00f8, 0x00f8,
+ 0x00f7, 0x00f7, 0x00f6, 0x00f6, 0x00f5, 0x00f5, 0x00f4, 0x00f4, 0x00f4, 0x00f3, 0x00f3, 0x00f2, 0x00f2, 0x00f1, 0x00f1, 0x00f0,
+ 0x00f0, 0x00f0, 0x00ef, 0x00ef, 0x00ee, 0x00ee, 0x00ed, 0x00ed, 0x00ed, 0x00ec, 0x00ec, 0x00eb, 0x00eb, 0x00ea, 0x00ea, 0x00ea,
+ 0x00e9, 0x00e9, 0x00e8, 0x00e8, 0x00e7, 0x00e7, 0x00e7, 0x00e6, 0x00e6, 0x00e5, 0x00e5, 0x00e5, 0x00e4, 0x00e4, 0x00e3, 0x00e3,
+ 0x00e3, 0x00e2, 0x00e2, 0x00e1, 0x00e1, 0x00e1, 0x00e0, 0x00e0, 0x00e0, 0x00df, 0x00df, 0x00de, 0x00de, 0x00de, 0x00dd, 0x00dd,
+ 0x00dd, 0x00dc, 0x00dc, 0x00db, 0x00db, 0x00db, 0x00da, 0x00da, 0x00da, 0x00d9, 0x00d9, 0x00d9, 0x00d8, 0x00d8, 0x00d7, 0x00d7,
+ 0x00d7, 0x00d6, 0x00d6, 0x00d6, 0x00d5, 0x00d5, 0x00d5, 0x00d4, 0x00d4, 0x00d4, 0x00d3, 0x00d3, 0x00d3, 0x00d2, 0x00d2, 0x00d2,
+ 0x00d1, 0x00d1, 0x00d1, 0x00d0, 0x00d0, 0x00d0, 0x00cf, 0x00cf, 0x00cf, 0x00ce, 0x00ce, 0x00ce, 0x00cd, 0x00cd, 0x00cd, 0x00cc,
+ 0x00cc, 0x00cc, 0x00cb, 0x00cb, 0x00cb, 0x00ca, 0x00ca, 0x00ca, 0x00c9, 0x00c9, 0x00c9, 0x00c9, 0x00c8, 0x00c8, 0x00c8, 0x00c7,
+ 0x00c7, 0x00c7, 0x00c6, 0x00c6, 0x00c6, 0x00c5, 0x00c5, 0x00c5, 0x00c5, 0x00c4, 0x00c4, 0x00c4, 0x00c3, 0x00c3, 0x00c3, 0x00c3,
+ 0x00c2, 0x00c2, 0x00c2, 0x00c1, 0x00c1, 0x00c1, 0x00c1, 0x00c0, 0x00c0, 0x00c0, 0x00bf, 0x00bf, 0x00bf, 0x00bf, 0x00be, 0x00be,
+ 0x00be, 0x00bd, 0x00bd, 0x00bd, 0x00bd, 0x00bc, 0x00bc, 0x00bc, 0x00bc, 0x00bb, 0x00bb, 0x00bb, 0x00ba, 0x00ba, 0x00ba, 0x00ba,
+ 0x00b9, 0x00b9, 0x00b9, 0x00b9, 0x00b8, 0x00b8, 0x00b8, 0x00b8, 0x00b7, 0x00b7, 0x00b7, 0x00b7, 0x00b6, 0x00b6, 0x00b6, 0x00b6,
+ 0x00b5, 0x00b5, 0x00b5, 0x00b5, 0x00b4, 0x00b4, 0x00b4, 0x00b4, 0x00b3, 0x00b3, 0x00b3, 0x00b3, 0x00b2, 0x00b2, 0x00b2, 0x00b2,
+ 0x00b1, 0x00b1, 0x00b1, 0x00b1, 0x00b0, 0x00b0, 0x00b0, 0x00b0, 0x00af, 0x00af, 0x00af, 0x00af, 0x00ae, 0x00ae, 0x00ae, 0x00ae,
+ 0x00ae, 0x00ad, 0x00ad, 0x00ad, 0x00ad, 0x00ac, 0x00ac, 0x00ac, 0x00ac, 0x00ac, 0x00ab, 0x00ab, 0x00ab, 0x00ab,
+};
+
+
+static tracy_force_inline uint64_t ProcessRGB( const uint8_t* src )
+{
+#ifdef __SSE4_1__
+ __m128i px0 = _mm_loadu_si128(((__m128i*)src) + 0);
+ __m128i px1 = _mm_loadu_si128(((__m128i*)src) + 1);
+ __m128i px2 = _mm_loadu_si128(((__m128i*)src) + 2);
+ __m128i px3 = _mm_loadu_si128(((__m128i*)src) + 3);
+
+ __m128i smask = _mm_set1_epi32( 0xF8FCF8 );
+ __m128i sd0 = _mm_and_si128( px0, smask );
+ __m128i sd1 = _mm_and_si128( px1, smask );
+ __m128i sd2 = _mm_and_si128( px2, smask );
+ __m128i sd3 = _mm_and_si128( px3, smask );
+
+ __m128i sc = _mm_shuffle_epi32(sd0, _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128i sc0 = _mm_cmpeq_epi8(sd0, sc);
+ __m128i sc1 = _mm_cmpeq_epi8(sd1, sc);
+ __m128i sc2 = _mm_cmpeq_epi8(sd2, sc);
+ __m128i sc3 = _mm_cmpeq_epi8(sd3, sc);
+
+ __m128i sm0 = _mm_and_si128(sc0, sc1);
+ __m128i sm1 = _mm_and_si128(sc2, sc3);
+ __m128i sm = _mm_and_si128(sm0, sm1);
+
+ if( _mm_testc_si128(sm, _mm_set1_epi32(-1)) )
+ {
+ return uint64_t( to565( src[0], src[1], src[2] ) ) << 16;
+ }
+
+ __m128i amask = _mm_set1_epi32( 0xFFFFFF );
+ px0 = _mm_and_si128( px0, amask );
+ px1 = _mm_and_si128( px1, amask );
+ px2 = _mm_and_si128( px2, amask );
+ px3 = _mm_and_si128( px3, amask );
+
+ __m128i min0 = _mm_min_epu8( px0, px1 );
+ __m128i min1 = _mm_min_epu8( px2, px3 );
+ __m128i min2 = _mm_min_epu8( min0, min1 );
+
+ __m128i max0 = _mm_max_epu8( px0, px1 );
+ __m128i max1 = _mm_max_epu8( px2, px3 );
+ __m128i max2 = _mm_max_epu8( max0, max1 );
+
+ __m128i min3 = _mm_shuffle_epi32( min2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m128i max3 = _mm_shuffle_epi32( max2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m128i min4 = _mm_min_epu8( min2, min3 );
+ __m128i max4 = _mm_max_epu8( max2, max3 );
+
+ __m128i min5 = _mm_shuffle_epi32( min4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i max5 = _mm_shuffle_epi32( max4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i rmin = _mm_min_epu8( min4, min5 );
+ __m128i rmax = _mm_max_epu8( max4, max5 );
+
+ __m128i range1 = _mm_subs_epu8( rmax, rmin );
+ __m128i range2 = _mm_sad_epu8( rmax, rmin );
+
+ uint32_t vrange = _mm_cvtsi128_si32( range2 ) >> 1;
+ __m128i range = _mm_set1_epi16( DivTable[vrange] );
+
+ __m128i inset1 = _mm_srli_epi16( range1, 4 );
+ __m128i inset = _mm_and_si128( inset1, _mm_set1_epi8( 0xF ) );
+ __m128i min = _mm_adds_epu8( rmin, inset );
+ __m128i max = _mm_subs_epu8( rmax, inset );
+
+ __m128i c0 = _mm_subs_epu8( px0, rmin );
+ __m128i c1 = _mm_subs_epu8( px1, rmin );
+ __m128i c2 = _mm_subs_epu8( px2, rmin );
+ __m128i c3 = _mm_subs_epu8( px3, rmin );
+
+ __m128i is0 = _mm_maddubs_epi16( c0, _mm_set1_epi8( 1 ) );
+ __m128i is1 = _mm_maddubs_epi16( c1, _mm_set1_epi8( 1 ) );
+ __m128i is2 = _mm_maddubs_epi16( c2, _mm_set1_epi8( 1 ) );
+ __m128i is3 = _mm_maddubs_epi16( c3, _mm_set1_epi8( 1 ) );
+
+ __m128i s0 = _mm_hadd_epi16( is0, is1 );
+ __m128i s1 = _mm_hadd_epi16( is2, is3 );
+
+ __m128i m0 = _mm_mulhi_epu16( s0, range );
+ __m128i m1 = _mm_mulhi_epu16( s1, range );
+
+ __m128i p0 = _mm_packus_epi16( m0, m1 );
+
+ __m128i p1 = _mm_or_si128( _mm_srai_epi32( p0, 6 ), _mm_srai_epi32( p0, 12 ) );
+ __m128i p2 = _mm_or_si128( _mm_srai_epi32( p0, 18 ), p0 );
+ __m128i p3 = _mm_or_si128( p1, p2 );
+ __m128i p =_mm_shuffle_epi8( p3, _mm_set1_epi32( 0x0C080400 ) );
+
+ uint32_t vmin = _mm_cvtsi128_si32( min );
+ uint32_t vmax = _mm_cvtsi128_si32( max );
+ uint32_t vp = _mm_cvtsi128_si32( p );
+
+ return uint64_t( ( uint64_t( to565( vmin ) ) << 16 ) | to565( vmax ) | ( uint64_t( vp ) << 32 ) );
+#elif defined __ARM_NEON
+# ifdef __aarch64__
+ uint8x16x4_t px = vld4q_u8( src );
+
+ uint8x16_t lr = px.val[0];
+ uint8x16_t lg = px.val[1];
+ uint8x16_t lb = px.val[2];
+
+ uint8_t rmaxr = vmaxvq_u8( lr );
+ uint8_t rmaxg = vmaxvq_u8( lg );
+ uint8_t rmaxb = vmaxvq_u8( lb );
+
+ uint8_t rminr = vminvq_u8( lr );
+ uint8_t rming = vminvq_u8( lg );
+ uint8_t rminb = vminvq_u8( lb );
+
+ int rr = rmaxr - rminr;
+ int rg = rmaxg - rming;
+ int rb = rmaxb - rminb;
+
+ int vrange1 = rr + rg + rb;
+ uint16_t vrange2 = DivTableNEON[vrange1];
+
+ uint8_t insetr = rr >> 4;
+ uint8_t insetg = rg >> 4;
+ uint8_t insetb = rb >> 4;
+
+ uint8_t minr = rminr + insetr;
+ uint8_t ming = rming + insetg;
+ uint8_t minb = rminb + insetb;
+
+ uint8_t maxr = rmaxr - insetr;
+ uint8_t maxg = rmaxg - insetg;
+ uint8_t maxb = rmaxb - insetb;
+
+ uint8x16_t cr = vsubq_u8( lr, vdupq_n_u8( rminr ) );
+ uint8x16_t cg = vsubq_u8( lg, vdupq_n_u8( rming ) );
+ uint8x16_t cb = vsubq_u8( lb, vdupq_n_u8( rminb ) );
+
+ uint16x8_t is0l = vaddl_u8( vget_low_u8( cr ), vget_low_u8( cg ) );
+ uint16x8_t is0h = vaddl_u8( vget_high_u8( cr ), vget_high_u8( cg ) );
+ uint16x8_t is1l = vaddw_u8( is0l, vget_low_u8( cb ) );
+ uint16x8_t is1h = vaddw_u8( is0h, vget_high_u8( cb ) );
+
+ int16x8_t range = vdupq_n_s16( vrange2 );
+ uint16x8_t m0 = vreinterpretq_u16_s16( vqdmulhq_s16( vreinterpretq_s16_u16( is1l ), range ) );
+ uint16x8_t m1 = vreinterpretq_u16_s16( vqdmulhq_s16( vreinterpretq_s16_u16( is1h ), range ) );
+
+ uint8x8_t p00 = vmovn_u16( m0 );
+ uint8x8_t p01 = vmovn_u16( m1 );
+ uint8x16_t p0 = vcombine_u8( p00, p01 );
+
+ uint32x4_t p1 = vaddq_u32( vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 6 ), vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 12 ) );
+ uint32x4_t p2 = vaddq_u32( vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 18 ), vreinterpretq_u32_u8( p0 ) );
+ uint32x4_t p3 = vaddq_u32( p1, p2 );
+
+ uint16x4x2_t p4 = vuzp_u16( vget_low_u16( vreinterpretq_u16_u32( p3 ) ), vget_high_u16( vreinterpretq_u16_u32( p3 ) ) );
+ uint8x8x2_t p = vuzp_u8( vreinterpret_u8_u16( p4.val[0] ), vreinterpret_u8_u16( p4.val[0] ) );
+
+ uint32_t vp;
+ vst1_lane_u32( &vp, vreinterpret_u32_u8( p.val[0] ), 0 );
+
+ return uint64_t( ( uint64_t( to565( minr, ming, minb ) ) << 16 ) | to565( maxr, maxg, maxb ) | ( uint64_t( vp ) << 32 ) );
+# else
+ uint32x4_t px0 = vld1q_u32( (uint32_t*)src );
+ uint32x4_t px1 = vld1q_u32( (uint32_t*)src + 4 );
+ uint32x4_t px2 = vld1q_u32( (uint32_t*)src + 8 );
+ uint32x4_t px3 = vld1q_u32( (uint32_t*)src + 12 );
+
+ uint32x4_t smask = vdupq_n_u32( 0xF8FCF8 );
+ uint32x4_t sd0 = vandq_u32( smask, px0 );
+ uint32x4_t sd1 = vandq_u32( smask, px1 );
+ uint32x4_t sd2 = vandq_u32( smask, px2 );
+ uint32x4_t sd3 = vandq_u32( smask, px3 );
+
+ uint32x4_t sc = vdupq_n_u32( sd0[0] );
+
+ uint32x4_t sc0 = vceqq_u32( sd0, sc );
+ uint32x4_t sc1 = vceqq_u32( sd1, sc );
+ uint32x4_t sc2 = vceqq_u32( sd2, sc );
+ uint32x4_t sc3 = vceqq_u32( sd3, sc );
+
+ uint32x4_t sm0 = vandq_u32( sc0, sc1 );
+ uint32x4_t sm1 = vandq_u32( sc2, sc3 );
+ int64x2_t sm = vreinterpretq_s64_u32( vandq_u32( sm0, sm1 ) );
+
+ if( sm[0] == -1 && sm[1] == -1 )
+ {
+ return uint64_t( to565( src[0], src[1], src[2] ) ) << 16;
+ }
+
+ uint32x4_t mask = vdupq_n_u32( 0xFFFFFF );
+ uint8x16_t l0 = vreinterpretq_u8_u32( vandq_u32( mask, px0 ) );
+ uint8x16_t l1 = vreinterpretq_u8_u32( vandq_u32( mask, px1 ) );
+ uint8x16_t l2 = vreinterpretq_u8_u32( vandq_u32( mask, px2 ) );
+ uint8x16_t l3 = vreinterpretq_u8_u32( vandq_u32( mask, px3 ) );
+
+ uint8x16_t min0 = vminq_u8( l0, l1 );
+ uint8x16_t min1 = vminq_u8( l2, l3 );
+ uint8x16_t min2 = vminq_u8( min0, min1 );
+
+ uint8x16_t max0 = vmaxq_u8( l0, l1 );
+ uint8x16_t max1 = vmaxq_u8( l2, l3 );
+ uint8x16_t max2 = vmaxq_u8( max0, max1 );
+
+ uint8x16_t min3 = vreinterpretq_u8_u32( vrev64q_u32( vreinterpretq_u32_u8( min2 ) ) );
+ uint8x16_t max3 = vreinterpretq_u8_u32( vrev64q_u32( vreinterpretq_u32_u8( max2 ) ) );
+
+ uint8x16_t min4 = vminq_u8( min2, min3 );
+ uint8x16_t max4 = vmaxq_u8( max2, max3 );
+
+ uint8x16_t min5 = vcombine_u8( vget_high_u8( min4 ), vget_low_u8( min4 ) );
+ uint8x16_t max5 = vcombine_u8( vget_high_u8( max4 ), vget_low_u8( max4 ) );
+
+ uint8x16_t rmin = vminq_u8( min4, min5 );
+ uint8x16_t rmax = vmaxq_u8( max4, max5 );
+
+ uint8x16_t range1 = vsubq_u8( rmax, rmin );
+ uint8x8_t range2 = vget_low_u8( range1 );
+ uint8x8x2_t range3 = vzip_u8( range2, vdup_n_u8( 0 ) );
+ uint16x4_t range4 = vreinterpret_u16_u8( range3.val[0] );
+
+ uint16_t vrange1;
+ uint16x4_t range5 = vpadd_u16( range4, range4 );
+ uint16x4_t range6 = vpadd_u16( range5, range5 );
+ vst1_lane_u16( &vrange1, range6, 0 );
+
+ uint32_t vrange2 = ( 2 << 16 ) / uint32_t( vrange1 + 1 );
+ uint16x8_t range = vdupq_n_u16( vrange2 );
+
+ uint8x16_t inset = vshrq_n_u8( range1, 4 );
+ uint8x16_t min = vaddq_u8( rmin, inset );
+ uint8x16_t max = vsubq_u8( rmax, inset );
+
+ uint8x16_t c0 = vsubq_u8( l0, rmin );
+ uint8x16_t c1 = vsubq_u8( l1, rmin );
+ uint8x16_t c2 = vsubq_u8( l2, rmin );
+ uint8x16_t c3 = vsubq_u8( l3, rmin );
+
+ uint16x8_t is0 = vpaddlq_u8( c0 );
+ uint16x8_t is1 = vpaddlq_u8( c1 );
+ uint16x8_t is2 = vpaddlq_u8( c2 );
+ uint16x8_t is3 = vpaddlq_u8( c3 );
+
+ uint16x4_t is4 = vpadd_u16( vget_low_u16( is0 ), vget_high_u16( is0 ) );
+ uint16x4_t is5 = vpadd_u16( vget_low_u16( is1 ), vget_high_u16( is1 ) );
+ uint16x4_t is6 = vpadd_u16( vget_low_u16( is2 ), vget_high_u16( is2 ) );
+ uint16x4_t is7 = vpadd_u16( vget_low_u16( is3 ), vget_high_u16( is3 ) );
+
+ uint16x8_t s0 = vcombine_u16( is4, is5 );
+ uint16x8_t s1 = vcombine_u16( is6, is7 );
+
+ uint16x8_t m0 = vreinterpretq_u16_s16( vqdmulhq_s16( vreinterpretq_s16_u16( s0 ), vreinterpretq_s16_u16( range ) ) );
+ uint16x8_t m1 = vreinterpretq_u16_s16( vqdmulhq_s16( vreinterpretq_s16_u16( s1 ), vreinterpretq_s16_u16( range ) ) );
+
+ uint8x8_t p00 = vmovn_u16( m0 );
+ uint8x8_t p01 = vmovn_u16( m1 );
+ uint8x16_t p0 = vcombine_u8( p00, p01 );
+
+ uint32x4_t p1 = vaddq_u32( vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 6 ), vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 12 ) );
+ uint32x4_t p2 = vaddq_u32( vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 18 ), vreinterpretq_u32_u8( p0 ) );
+ uint32x4_t p3 = vaddq_u32( p1, p2 );
+
+ uint16x4x2_t p4 = vuzp_u16( vget_low_u16( vreinterpretq_u16_u32( p3 ) ), vget_high_u16( vreinterpretq_u16_u32( p3 ) ) );
+ uint8x8x2_t p = vuzp_u8( vreinterpret_u8_u16( p4.val[0] ), vreinterpret_u8_u16( p4.val[0] ) );
+
+ uint32_t vmin, vmax, vp;
+ vst1q_lane_u32( &vmin, vreinterpretq_u32_u8( min ), 0 );
+ vst1q_lane_u32( &vmax, vreinterpretq_u32_u8( max ), 0 );
+ vst1_lane_u32( &vp, vreinterpret_u32_u8( p.val[0] ), 0 );
+
+ return uint64_t( ( uint64_t( to565( vmin ) ) << 16 ) | to565( vmax ) | ( uint64_t( vp ) << 32 ) );
+# endif
+#else
+ uint32_t ref;
+ memcpy( &ref, src, 4 );
+ uint32_t refMask = ref & 0xF8FCF8;
+ auto stmp = src + 4;
+ for( int i=1; i<16; i++ )
+ {
+ uint32_t px;
+ memcpy( &px, stmp, 4 );
+ if( ( px & 0xF8FCF8 ) != refMask ) break;
+ stmp += 4;
+ }
+ if( stmp == src + 64 )
+ {
+ return uint64_t( to565( ref ) ) << 16;
+ }
+
+ uint8_t min[3] = { src[0], src[1], src[2] };
+ uint8_t max[3] = { src[0], src[1], src[2] };
+ auto tmp = src + 4;
+ for( int i=1; i<16; i++ )
+ {
+ for( int j=0; j<3; j++ )
+ {
+ if( tmp[j] < min[j] ) min[j] = tmp[j];
+ else if( tmp[j] > max[j] ) max[j] = tmp[j];
+ }
+ tmp += 4;
+ }
+
+ const uint32_t range = DivTable[max[0] - min[0] + max[1] - min[1] + max[2] - min[2]];
+ const uint32_t rmin = min[0] + min[1] + min[2];
+ for( int i=0; i<3; i++ )
+ {
+ const uint8_t inset = ( max[i] - min[i] ) >> 4;
+ min[i] += inset;
+ max[i] -= inset;
+ }
+
+ uint32_t data = 0;
+ for( int i=0; i<16; i++ )
+ {
+ const uint32_t c = src[0] + src[1] + src[2] - rmin;
+ const uint8_t idx = ( c * range ) >> 16;
+ data |= idx << (i*2);
+ src += 4;
+ }
+
+ return uint64_t( ( uint64_t( to565( min[0], min[1], min[2] ) ) << 16 ) | to565( max[0], max[1], max[2] ) | ( uint64_t( data ) << 32 ) );
+#endif
+}
+
+#ifdef __AVX2__
+static tracy_force_inline void ProcessRGB_AVX( const uint8_t* src, char*& dst )
+{
+ __m256i px0 = _mm256_loadu_si256(((__m256i*)src) + 0);
+ __m256i px1 = _mm256_loadu_si256(((__m256i*)src) + 1);
+ __m256i px2 = _mm256_loadu_si256(((__m256i*)src) + 2);
+ __m256i px3 = _mm256_loadu_si256(((__m256i*)src) + 3);
+
+ __m256i smask = _mm256_set1_epi32( 0xF8FCF8 );
+ __m256i sd0 = _mm256_and_si256( px0, smask );
+ __m256i sd1 = _mm256_and_si256( px1, smask );
+ __m256i sd2 = _mm256_and_si256( px2, smask );
+ __m256i sd3 = _mm256_and_si256( px3, smask );
+
+ __m256i sc = _mm256_shuffle_epi32(sd0, _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m256i sc0 = _mm256_cmpeq_epi8( sd0, sc );
+ __m256i sc1 = _mm256_cmpeq_epi8( sd1, sc );
+ __m256i sc2 = _mm256_cmpeq_epi8( sd2, sc );
+ __m256i sc3 = _mm256_cmpeq_epi8( sd3, sc );
+
+ __m256i sm0 = _mm256_and_si256( sc0, sc1 );
+ __m256i sm1 = _mm256_and_si256( sc2, sc3 );
+ __m256i sm = _mm256_and_si256( sm0, sm1 );
+
+ const int64_t solid0 = 1 - _mm_testc_si128( _mm256_castsi256_si128( sm ), _mm_set1_epi32( -1 ) );
+ const int64_t solid1 = 1 - _mm_testc_si128( _mm256_extracti128_si256( sm, 1 ), _mm_set1_epi32( -1 ) );
+
+ if( solid0 + solid1 == 0 )
+ {
+ const auto c0 = uint64_t( to565( src[0], src[1], src[2] ) ) << 16;
+ const auto c1 = uint64_t( to565( src[16], src[17], src[18] ) ) << 16;
+ memcpy( dst, &c0, 8 );
+ memcpy( dst+8, &c1, 8 );
+ dst += 16;
+ return;
+ }
+
+ __m256i amask = _mm256_set1_epi32( 0xFFFFFF );
+ px0 = _mm256_and_si256( px0, amask );
+ px1 = _mm256_and_si256( px1, amask );
+ px2 = _mm256_and_si256( px2, amask );
+ px3 = _mm256_and_si256( px3, amask );
+
+ __m256i min0 = _mm256_min_epu8( px0, px1 );
+ __m256i min1 = _mm256_min_epu8( px2, px3 );
+ __m256i min2 = _mm256_min_epu8( min0, min1 );
+
+ __m256i max0 = _mm256_max_epu8( px0, px1 );
+ __m256i max1 = _mm256_max_epu8( px2, px3 );
+ __m256i max2 = _mm256_max_epu8( max0, max1 );
+
+ __m256i min3 = _mm256_shuffle_epi32( min2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m256i max3 = _mm256_shuffle_epi32( max2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m256i min4 = _mm256_min_epu8( min2, min3 );
+ __m256i max4 = _mm256_max_epu8( max2, max3 );
+
+ __m256i min5 = _mm256_shuffle_epi32( min4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m256i max5 = _mm256_shuffle_epi32( max4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m256i rmin = _mm256_min_epu8( min4, min5 );
+ __m256i rmax = _mm256_max_epu8( max4, max5 );
+
+ __m256i range1 = _mm256_subs_epu8( rmax, rmin );
+ __m256i range2 = _mm256_sad_epu8( rmax, rmin );
+
+ uint16_t vrange0 = DivTable[_mm256_cvtsi256_si32( range2 ) >> 1];
+ uint16_t vrange1 = DivTable[_mm256_extract_epi16( range2, 8 ) >> 1];
+ __m256i range00 = _mm256_set1_epi16( vrange0 );
+ __m256i range = _mm256_inserti128_si256( range00, _mm_set1_epi16( vrange1 ), 1 );
+
+ __m256i inset1 = _mm256_srli_epi16( range1, 4 );
+ __m256i inset = _mm256_and_si256( inset1, _mm256_set1_epi8( 0xF ) );
+ __m256i min = _mm256_adds_epu8( rmin, inset );
+ __m256i max = _mm256_subs_epu8( rmax, inset );
+
+ __m256i c0 = _mm256_subs_epu8( px0, rmin );
+ __m256i c1 = _mm256_subs_epu8( px1, rmin );
+ __m256i c2 = _mm256_subs_epu8( px2, rmin );
+ __m256i c3 = _mm256_subs_epu8( px3, rmin );
+
+ __m256i is0 = _mm256_maddubs_epi16( c0, _mm256_set1_epi8( 1 ) );
+ __m256i is1 = _mm256_maddubs_epi16( c1, _mm256_set1_epi8( 1 ) );
+ __m256i is2 = _mm256_maddubs_epi16( c2, _mm256_set1_epi8( 1 ) );
+ __m256i is3 = _mm256_maddubs_epi16( c3, _mm256_set1_epi8( 1 ) );
+
+ __m256i s0 = _mm256_hadd_epi16( is0, is1 );
+ __m256i s1 = _mm256_hadd_epi16( is2, is3 );
+
+ __m256i m0 = _mm256_mulhi_epu16( s0, range );
+ __m256i m1 = _mm256_mulhi_epu16( s1, range );
+
+ __m256i p0 = _mm256_packus_epi16( m0, m1 );
+
+ __m256i p1 = _mm256_or_si256( _mm256_srai_epi32( p0, 6 ), _mm256_srai_epi32( p0, 12 ) );
+ __m256i p2 = _mm256_or_si256( _mm256_srai_epi32( p0, 18 ), p0 );
+ __m256i p3 = _mm256_or_si256( p1, p2 );
+ __m256i p =_mm256_shuffle_epi8( p3, _mm256_set1_epi32( 0x0C080400 ) );
+
+ __m256i mm0 = _mm256_unpacklo_epi8( _mm256_setzero_si256(), min );
+ __m256i mm1 = _mm256_unpacklo_epi8( _mm256_setzero_si256(), max );
+ __m256i mm2 = _mm256_unpacklo_epi64( mm1, mm0 );
+ __m256i mmr = _mm256_slli_epi64( _mm256_srli_epi64( mm2, 11 ), 11 );
+ __m256i mmg = _mm256_slli_epi64( _mm256_srli_epi64( mm2, 26 ), 5 );
+ __m256i mmb = _mm256_srli_epi64( _mm256_slli_epi64( mm2, 16 ), 59 );
+ __m256i mm3 = _mm256_or_si256( mmr, mmg );
+ __m256i mm4 = _mm256_or_si256( mm3, mmb );
+ __m256i mm5 = _mm256_shuffle_epi8( mm4, _mm256_set1_epi32( 0x09080100 ) );
+
+ __m256i d0 = _mm256_unpacklo_epi32( mm5, p );
+ __m256i d1 = _mm256_permute4x64_epi64( d0, _MM_SHUFFLE( 3, 2, 2, 0 ) );
+ __m128i d2 = _mm256_castsi256_si128( d1 );
+
+ __m128i mask = _mm_set_epi64x( 0xFFFF0000 | -solid1, 0xFFFF0000 | -solid0 );
+ __m128i d3 = _mm_and_si128( d2, mask );
+ _mm_storeu_si128( (__m128i*)dst, d3 );
+ dst += 16;
+}
+#endif
+
+void CompressImageDxt1( const char* src, char* dst, int w, int h )
+{
+ assert( (w % 4) == 0 && (h % 4) == 0 );
+
+#ifdef __AVX2__
+ if( w%8 == 0 )
+ {
+ uint32_t buf[8*4];
+ int i = 0;
+
+ auto blocks = w * h / 32;
+ do
+ {
+ auto tmp = (char*)buf;
+ memcpy( tmp, src, 8*4 );
+ memcpy( tmp + 8*4, src + w * 4, 8*4 );
+ memcpy( tmp + 16*4, src + w * 8, 8*4 );
+ memcpy( tmp + 24*4, src + w * 12, 8*4 );
+ src += 8*4;
+ if( ++i == w/8 )
+ {
+ src += w * 3 * 4;
+ i = 0;
+ }
+
+ ProcessRGB_AVX( (uint8_t*)buf, dst );
+ }
+ while( --blocks );
+ }
+ else
+#endif
+ {
+ uint32_t buf[4*4];
+ int i = 0;
+
+ auto ptr = dst;
+ auto blocks = w * h / 16;
+ do
+ {
+ auto tmp = (char*)buf;
+ memcpy( tmp, src, 4*4 );
+ memcpy( tmp + 4*4, src + w * 4, 4*4 );
+ memcpy( tmp + 8*4, src + w * 8, 4*4 );
+ memcpy( tmp + 12*4, src + w * 12, 4*4 );
+ src += 4*4;
+ if( ++i == w/4 )
+ {
+ src += w * 3 * 4;
+ i = 0;
+ }
+
+ const auto c = ProcessRGB( (uint8_t*)buf );
+ memcpy( ptr, &c, sizeof( uint64_t ) );
+ ptr += sizeof( uint64_t );
+ }
+ while( --blocks );
+ }
+}
+
+}
diff --git a/3rdparty/tracy/tracy/client/TracyDxt1.hpp b/3rdparty/tracy/tracy/client/TracyDxt1.hpp
new file mode 100644
index 0000000..c231354
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyDxt1.hpp
@@ -0,0 +1,11 @@
+#ifndef __TRACYDXT1_HPP__
+#define __TRACYDXT1_HPP__
+
+namespace tracy
+{
+
+void CompressImageDxt1( const char* src, char* dst, int w, int h );
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyFastVector.hpp b/3rdparty/tracy/tracy/client/TracyFastVector.hpp
new file mode 100644
index 0000000..38accc9
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyFastVector.hpp
@@ -0,0 +1,118 @@
+#ifndef __TRACYFASTVECTOR_HPP__
+#define __TRACYFASTVECTOR_HPP__
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "../common/TracyAlloc.hpp"
+#include "../common/TracyForceInline.hpp"
+
+namespace tracy
+{
+
+template<typename T>
+class FastVector
+{
+public:
+ using iterator = T*;
+ using const_iterator = const T*;
+
+ FastVector( size_t capacity )
+ : m_ptr( (T*)tracy_malloc( sizeof( T ) * capacity ) )
+ , m_write( m_ptr )
+ , m_end( m_ptr + capacity )
+ {
+ assert( capacity != 0 );
+ }
+
+ FastVector( const FastVector& ) = delete;
+ FastVector( FastVector&& ) = delete;
+
+ ~FastVector()
+ {
+ tracy_free( m_ptr );
+ }
+
+ FastVector& operator=( const FastVector& ) = delete;
+ FastVector& operator=( FastVector&& ) = delete;
+
+ bool empty() const { return m_ptr == m_write; }
+ size_t size() const { return m_write - m_ptr; }
+
+ T* data() { return m_ptr; }
+ const T* data() const { return m_ptr; };
+
+ T* begin() { return m_ptr; }
+ const T* begin() const { return m_ptr; }
+ T* end() { return m_write; }
+ const T* end() const { return m_write; }
+
+ T& front() { assert( !empty() ); return m_ptr[0]; }
+ const T& front() const { assert( !empty() ); return m_ptr[0]; }
+
+ T& back() { assert( !empty() ); return m_write[-1]; }
+ const T& back() const { assert( !empty() ); return m_write[-1]; }
+
+ T& operator[]( size_t idx ) { return m_ptr[idx]; }
+ const T& operator[]( size_t idx ) const { return m_ptr[idx]; }
+
+ T* push_next()
+ {
+ if( m_write == m_end ) AllocMore();
+ return m_write++;
+ }
+
+ T* prepare_next()
+ {
+ if( m_write == m_end ) AllocMore();
+ return m_write;
+ }
+
+ void commit_next()
+ {
+ m_write++;
+ }
+
+ void clear()
+ {
+ m_write = m_ptr;
+ }
+
+ void swap( FastVector& vec )
+ {
+ const auto ptr1 = m_ptr;
+ const auto ptr2 = vec.m_ptr;
+ const auto write1 = m_write;
+ const auto write2 = vec.m_write;
+ const auto end1 = m_end;
+ const auto end2 = vec.m_end;
+
+ m_ptr = ptr2;
+ vec.m_ptr = ptr1;
+ m_write = write2;
+ vec.m_write = write1;
+ m_end = end2;
+ vec.m_end = end1;
+ }
+
+private:
+ tracy_no_inline void AllocMore()
+ {
+ const auto cap = size_t( m_end - m_ptr ) * 2;
+ const auto size = size_t( m_write - m_ptr );
+ T* ptr = (T*)tracy_malloc( sizeof( T ) * cap );
+ memcpy( ptr, m_ptr, size * sizeof( T ) );
+ tracy_free_fast( m_ptr );
+ m_ptr = ptr;
+ m_write = m_ptr + size;
+ m_end = m_ptr + cap;
+ }
+
+ T* m_ptr;
+ T* m_write;
+ T* m_end;
+};
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyLock.hpp b/3rdparty/tracy/tracy/client/TracyLock.hpp
new file mode 100644
index 0000000..e513cdc
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyLock.hpp
@@ -0,0 +1,548 @@
+#ifndef __TRACYLOCK_HPP__
+#define __TRACYLOCK_HPP__
+
+#include <atomic>
+#include <limits>
+
+#include "../common/TracySystem.hpp"
+#include "../common/TracyAlign.hpp"
+#include "TracyProfiler.hpp"
+
+namespace tracy
+{
+
+class LockableCtx
+{
+public:
+ tracy_force_inline LockableCtx( const SourceLocationData* srcloc )
+ : m_id( GetLockCounter().fetch_add( 1, std::memory_order_relaxed ) )
+#ifdef TRACY_ON_DEMAND
+ , m_lockCount( 0 )
+ , m_active( false )
+#endif
+ {
+ assert( m_id != std::numeric_limits<uint32_t>::max() );
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockAnnounce );
+ MemWrite( &item->lockAnnounce.id, m_id );
+ MemWrite( &item->lockAnnounce.time, Profiler::GetTime() );
+ MemWrite( &item->lockAnnounce.lckloc, (uint64_t)srcloc );
+ MemWrite( &item->lockAnnounce.type, LockType::Lockable );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ LockableCtx( const LockableCtx& ) = delete;
+ LockableCtx& operator=( const LockableCtx& ) = delete;
+
+ tracy_force_inline ~LockableCtx()
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockTerminate );
+ MemWrite( &item->lockTerminate.id, m_id );
+ MemWrite( &item->lockTerminate.time, Profiler::GetTime() );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline bool BeforeLock()
+ {
+#ifdef TRACY_ON_DEMAND
+ bool queue = false;
+ const auto locks = m_lockCount.fetch_add( 1, std::memory_order_relaxed );
+ const auto active = m_active.load( std::memory_order_relaxed );
+ if( locks == 0 || active )
+ {
+ const bool connected = GetProfiler().IsConnected();
+ if( active != connected ) m_active.store( connected, std::memory_order_relaxed );
+ if( connected ) queue = true;
+ }
+ if( !queue ) return false;
+#endif
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockWait );
+ MemWrite( &item->lockWait.thread, GetThreadHandle() );
+ MemWrite( &item->lockWait.id, m_id );
+ MemWrite( &item->lockWait.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ return true;
+ }
+
+ tracy_force_inline void AfterLock()
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockObtain );
+ MemWrite( &item->lockObtain.thread, GetThreadHandle() );
+ MemWrite( &item->lockObtain.id, m_id );
+ MemWrite( &item->lockObtain.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void AfterUnlock()
+ {
+#ifdef TRACY_ON_DEMAND
+ m_lockCount.fetch_sub( 1, std::memory_order_relaxed );
+ if( !m_active.load( std::memory_order_relaxed ) ) return;
+ if( !GetProfiler().IsConnected() )
+ {
+ m_active.store( false, std::memory_order_relaxed );
+ return;
+ }
+#endif
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockRelease );
+ MemWrite( &item->lockRelease.thread, GetThreadHandle() );
+ MemWrite( &item->lockRelease.id, m_id );
+ MemWrite( &item->lockRelease.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void AfterTryLock( bool acquired )
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !acquired ) return;
+
+ bool queue = false;
+ const auto locks = m_lockCount.fetch_add( 1, std::memory_order_relaxed );
+ const auto active = m_active.load( std::memory_order_relaxed );
+ if( locks == 0 || active )
+ {
+ const bool connected = GetProfiler().IsConnected();
+ if( active != connected ) m_active.store( connected, std::memory_order_relaxed );
+ if( connected ) queue = true;
+ }
+ if( !queue ) return;
+#endif
+
+ if( acquired )
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockObtain );
+ MemWrite( &item->lockObtain.thread, GetThreadHandle() );
+ MemWrite( &item->lockObtain.id, m_id );
+ MemWrite( &item->lockObtain.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+ }
+
+ tracy_force_inline void Mark( const SourceLocationData* srcloc )
+ {
+#ifdef TRACY_ON_DEMAND
+ const auto active = m_active.load( std::memory_order_relaxed );
+ if( !active ) return;
+ const auto connected = GetProfiler().IsConnected();
+ if( !connected )
+ {
+ if( active ) m_active.store( false, std::memory_order_relaxed );
+ return;
+ }
+#endif
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockMark );
+ MemWrite( &item->lockMark.thread, GetThreadHandle() );
+ MemWrite( &item->lockMark.id, m_id );
+ MemWrite( &item->lockMark.srcloc, (uint64_t)srcloc );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void CustomName( const char* name, size_t size )
+ {
+ assert( size < std::numeric_limits<uint16_t>::max() );
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, name, size );
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockName );
+ MemWrite( &item->lockNameFat.id, m_id );
+ MemWrite( &item->lockNameFat.name, (uint64_t)ptr );
+ MemWrite( &item->lockNameFat.size, (uint16_t)size );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+private:
+ uint32_t m_id;
+
+#ifdef TRACY_ON_DEMAND
+ std::atomic<uint32_t> m_lockCount;
+ std::atomic<bool> m_active;
+#endif
+};
+
+template<class T>
+class Lockable
+{
+public:
+ tracy_force_inline Lockable( const SourceLocationData* srcloc )
+ : m_ctx( srcloc )
+ {
+ }
+
+ Lockable( const Lockable& ) = delete;
+ Lockable& operator=( const Lockable& ) = delete;
+
+ tracy_force_inline void lock()
+ {
+ const auto runAfter = m_ctx.BeforeLock();
+ m_lockable.lock();
+ if( runAfter ) m_ctx.AfterLock();
+ }
+
+ tracy_force_inline void unlock()
+ {
+ m_lockable.unlock();
+ m_ctx.AfterUnlock();
+ }
+
+ tracy_force_inline bool try_lock()
+ {
+ const auto acquired = m_lockable.try_lock();
+ m_ctx.AfterTryLock( acquired );
+ return acquired;
+ }
+
+ tracy_force_inline void Mark( const SourceLocationData* srcloc )
+ {
+ m_ctx.Mark( srcloc );
+ }
+
+ tracy_force_inline void CustomName( const char* name, size_t size )
+ {
+ m_ctx.CustomName( name, size );
+ }
+
+private:
+ T m_lockable;
+ LockableCtx m_ctx;
+};
+
+
+class SharedLockableCtx
+{
+public:
+ tracy_force_inline SharedLockableCtx( const SourceLocationData* srcloc )
+ : m_id( GetLockCounter().fetch_add( 1, std::memory_order_relaxed ) )
+#ifdef TRACY_ON_DEMAND
+ , m_lockCount( 0 )
+ , m_active( false )
+#endif
+ {
+ assert( m_id != std::numeric_limits<uint32_t>::max() );
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockAnnounce );
+ MemWrite( &item->lockAnnounce.id, m_id );
+ MemWrite( &item->lockAnnounce.time, Profiler::GetTime() );
+ MemWrite( &item->lockAnnounce.lckloc, (uint64_t)srcloc );
+ MemWrite( &item->lockAnnounce.type, LockType::SharedLockable );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ SharedLockableCtx( const SharedLockableCtx& ) = delete;
+ SharedLockableCtx& operator=( const SharedLockableCtx& ) = delete;
+
+ tracy_force_inline ~SharedLockableCtx()
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockTerminate );
+ MemWrite( &item->lockTerminate.id, m_id );
+ MemWrite( &item->lockTerminate.time, Profiler::GetTime() );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline bool BeforeLock()
+ {
+#ifdef TRACY_ON_DEMAND
+ bool queue = false;
+ const auto locks = m_lockCount.fetch_add( 1, std::memory_order_relaxed );
+ const auto active = m_active.load( std::memory_order_relaxed );
+ if( locks == 0 || active )
+ {
+ const bool connected = GetProfiler().IsConnected();
+ if( active != connected ) m_active.store( connected, std::memory_order_relaxed );
+ if( connected ) queue = true;
+ }
+ if( !queue ) return false;
+#endif
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockWait );
+ MemWrite( &item->lockWait.thread, GetThreadHandle() );
+ MemWrite( &item->lockWait.id, m_id );
+ MemWrite( &item->lockWait.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ return true;
+ }
+
+ tracy_force_inline void AfterLock()
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockObtain );
+ MemWrite( &item->lockObtain.thread, GetThreadHandle() );
+ MemWrite( &item->lockObtain.id, m_id );
+ MemWrite( &item->lockObtain.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void AfterUnlock()
+ {
+#ifdef TRACY_ON_DEMAND
+ m_lockCount.fetch_sub( 1, std::memory_order_relaxed );
+ if( !m_active.load( std::memory_order_relaxed ) ) return;
+ if( !GetProfiler().IsConnected() )
+ {
+ m_active.store( false, std::memory_order_relaxed );
+ return;
+ }
+#endif
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockRelease );
+ MemWrite( &item->lockRelease.thread, GetThreadHandle() );
+ MemWrite( &item->lockRelease.id, m_id );
+ MemWrite( &item->lockRelease.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void AfterTryLock( bool acquired )
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !acquired ) return;
+
+ bool queue = false;
+ const auto locks = m_lockCount.fetch_add( 1, std::memory_order_relaxed );
+ const auto active = m_active.load( std::memory_order_relaxed );
+ if( locks == 0 || active )
+ {
+ const bool connected = GetProfiler().IsConnected();
+ if( active != connected ) m_active.store( connected, std::memory_order_relaxed );
+ if( connected ) queue = true;
+ }
+ if( !queue ) return;
+#endif
+
+ if( acquired )
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockObtain );
+ MemWrite( &item->lockObtain.thread, GetThreadHandle() );
+ MemWrite( &item->lockObtain.id, m_id );
+ MemWrite( &item->lockObtain.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+ }
+
+ tracy_force_inline bool BeforeLockShared()
+ {
+#ifdef TRACY_ON_DEMAND
+ bool queue = false;
+ const auto locks = m_lockCount.fetch_add( 1, std::memory_order_relaxed );
+ const auto active = m_active.load( std::memory_order_relaxed );
+ if( locks == 0 || active )
+ {
+ const bool connected = GetProfiler().IsConnected();
+ if( active != connected ) m_active.store( connected, std::memory_order_relaxed );
+ if( connected ) queue = true;
+ }
+ if( !queue ) return false;
+#endif
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockSharedWait );
+ MemWrite( &item->lockWait.thread, GetThreadHandle() );
+ MemWrite( &item->lockWait.id, m_id );
+ MemWrite( &item->lockWait.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ return true;
+ }
+
+ tracy_force_inline void AfterLockShared()
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockSharedObtain );
+ MemWrite( &item->lockObtain.thread, GetThreadHandle() );
+ MemWrite( &item->lockObtain.id, m_id );
+ MemWrite( &item->lockObtain.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void AfterUnlockShared()
+ {
+#ifdef TRACY_ON_DEMAND
+ m_lockCount.fetch_sub( 1, std::memory_order_relaxed );
+ if( !m_active.load( std::memory_order_relaxed ) ) return;
+ if( !GetProfiler().IsConnected() )
+ {
+ m_active.store( false, std::memory_order_relaxed );
+ return;
+ }
+#endif
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockSharedRelease );
+ MemWrite( &item->lockRelease.thread, GetThreadHandle() );
+ MemWrite( &item->lockRelease.id, m_id );
+ MemWrite( &item->lockRelease.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void AfterTryLockShared( bool acquired )
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !acquired ) return;
+
+ bool queue = false;
+ const auto locks = m_lockCount.fetch_add( 1, std::memory_order_relaxed );
+ const auto active = m_active.load( std::memory_order_relaxed );
+ if( locks == 0 || active )
+ {
+ const bool connected = GetProfiler().IsConnected();
+ if( active != connected ) m_active.store( connected, std::memory_order_relaxed );
+ if( connected ) queue = true;
+ }
+ if( !queue ) return;
+#endif
+
+ if( acquired )
+ {
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockSharedObtain );
+ MemWrite( &item->lockObtain.thread, GetThreadHandle() );
+ MemWrite( &item->lockObtain.id, m_id );
+ MemWrite( &item->lockObtain.time, Profiler::GetTime() );
+ Profiler::QueueSerialFinish();
+ }
+ }
+
+ tracy_force_inline void Mark( const SourceLocationData* srcloc )
+ {
+#ifdef TRACY_ON_DEMAND
+ const auto active = m_active.load( std::memory_order_relaxed );
+ if( !active ) return;
+ const auto connected = GetProfiler().IsConnected();
+ if( !connected )
+ {
+ if( active ) m_active.store( false, std::memory_order_relaxed );
+ return;
+ }
+#endif
+
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockMark );
+ MemWrite( &item->lockMark.thread, GetThreadHandle() );
+ MemWrite( &item->lockMark.id, m_id );
+ MemWrite( &item->lockMark.srcloc, (uint64_t)srcloc );
+ Profiler::QueueSerialFinish();
+ }
+
+ tracy_force_inline void CustomName( const char* name, size_t size )
+ {
+ assert( size < std::numeric_limits<uint16_t>::max() );
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, name, size );
+ auto item = Profiler::QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::LockName );
+ MemWrite( &item->lockNameFat.id, m_id );
+ MemWrite( &item->lockNameFat.name, (uint64_t)ptr );
+ MemWrite( &item->lockNameFat.size, (uint16_t)size );
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+ Profiler::QueueSerialFinish();
+ }
+
+private:
+ uint32_t m_id;
+
+#ifdef TRACY_ON_DEMAND
+ std::atomic<uint32_t> m_lockCount;
+ std::atomic<bool> m_active;
+#endif
+};
+
+template<class T>
+class SharedLockable
+{
+public:
+ tracy_force_inline SharedLockable( const SourceLocationData* srcloc )
+ : m_ctx( srcloc )
+ {
+ }
+
+ SharedLockable( const SharedLockable& ) = delete;
+ SharedLockable& operator=( const SharedLockable& ) = delete;
+
+ tracy_force_inline void lock()
+ {
+ const auto runAfter = m_ctx.BeforeLock();
+ m_lockable.lock();
+ if( runAfter ) m_ctx.AfterLock();
+ }
+
+ tracy_force_inline void unlock()
+ {
+ m_lockable.unlock();
+ m_ctx.AfterUnlock();
+ }
+
+ tracy_force_inline bool try_lock()
+ {
+ const auto acquired = m_lockable.try_lock();
+ m_ctx.AfterTryLock( acquired );
+ return acquired;
+ }
+
+ tracy_force_inline void lock_shared()
+ {
+ const auto runAfter = m_ctx.BeforeLockShared();
+ m_lockable.lock_shared();
+ if( runAfter ) m_ctx.AfterLockShared();
+ }
+
+ tracy_force_inline void unlock_shared()
+ {
+ m_lockable.unlock_shared();
+ m_ctx.AfterUnlockShared();
+ }
+
+ tracy_force_inline bool try_lock_shared()
+ {
+ const auto acquired = m_lockable.try_lock_shared();
+ m_ctx.AfterTryLockShared( acquired );
+ return acquired;
+ }
+
+ tracy_force_inline void Mark( const SourceLocationData* srcloc )
+ {
+ m_ctx.Mark( srcloc );
+ }
+
+ tracy_force_inline void CustomName( const char* name, size_t size )
+ {
+ m_ctx.CustomName( name, size );
+ }
+
+private:
+ T m_lockable;
+ SharedLockableCtx m_ctx;
+};
+
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyProfiler.cpp b/3rdparty/tracy/tracy/client/TracyProfiler.cpp
new file mode 100644
index 0000000..a9f674b
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyProfiler.cpp
@@ -0,0 +1,4238 @@
+#ifdef TRACY_ENABLE
+
+#ifdef _WIN32
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+# include <winsock2.h>
+# include <windows.h>
+# include <tlhelp32.h>
+# include <inttypes.h>
+# include <intrin.h>
+# include "../common/TracyUwp.hpp"
+#else
+# include <sys/time.h>
+# include <sys/param.h>
+#endif
+
+#ifdef _GNU_SOURCE
+# include <errno.h>
+#endif
+
+#ifdef __linux__
+# include <dirent.h>
+# include <signal.h>
+# include <pthread.h>
+# include <sys/types.h>
+# include <sys/syscall.h>
+#endif
+
+#if defined __APPLE__ || defined BSD
+# include <sys/types.h>
+# include <sys/sysctl.h>
+#endif
+
+#if defined __APPLE__
+# include "TargetConditionals.h"
+# include <mach-o/dyld.h>
+#endif
+
+#ifdef __ANDROID__
+# include <sys/mman.h>
+# include <sys/system_properties.h>
+# include <stdio.h>
+# include <stdint.h>
+# include <algorithm>
+# include <vector>
+#endif
+
+#include <algorithm>
+#include <assert.h>
+#include <atomic>
+#include <chrono>
+#include <limits>
+#include <new>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <thread>
+
+#include "../common/TracyAlign.hpp"
+#include "../common/TracySocket.hpp"
+#include "../common/TracySystem.hpp"
+#include "../common/TracyYield.hpp"
+#include "../common/tracy_lz4.hpp"
+#include "tracy_rpmalloc.hpp"
+#include "TracyCallstack.hpp"
+#include "TracyDxt1.hpp"
+#include "TracyScoped.hpp"
+#include "TracyProfiler.hpp"
+#include "TracyThread.hpp"
+#include "TracyArmCpuTable.hpp"
+#include "TracySysTrace.hpp"
+#include "../TracyC.h"
+
+#ifdef TRACY_PORT
+# ifndef TRACY_DATA_PORT
+# define TRACY_DATA_PORT TRACY_PORT
+# endif
+# ifndef TRACY_BROADCAST_PORT
+# define TRACY_BROADCAST_PORT TRACY_PORT
+# endif
+#endif
+
+#ifdef __APPLE__
+# define TRACY_DELAYED_INIT
+#else
+# ifdef __GNUC__
+# define init_order( val ) __attribute__ ((init_priority(val)))
+# else
+# define init_order(x)
+# endif
+#endif
+
+#if defined _WIN32
+# include <lmcons.h>
+extern "C" typedef LONG (WINAPI *t_RtlGetVersion)( PRTL_OSVERSIONINFOW );
+extern "C" typedef BOOL (WINAPI *t_GetLogicalProcessorInformationEx)( LOGICAL_PROCESSOR_RELATIONSHIP, PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX, PDWORD );
+#else
+# include <unistd.h>
+# include <limits.h>
+#endif
+#if defined __linux__
+# include <sys/sysinfo.h>
+# include <sys/utsname.h>
+#endif
+
+#if !defined _WIN32 && ( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 )
+# include <cpuid.h>
+#endif
+
+#if !( ( defined _WIN32 && _WIN32_WINNT >= _WIN32_WINNT_VISTA ) || defined __linux__ )
+# include <mutex>
+#endif
+
+namespace tracy
+{
+
+#ifdef __ANDROID__
+// Implementation helpers of EnsureReadable(address).
+// This is so far only needed on Android, where it is common for libraries to be mapped
+// with only executable, not readable, permissions. Typical example (line from /proc/self/maps):
+/*
+746b63b000-746b6dc000 --xp 00042000 07:48 35 /apex/com.android.runtime/lib64/bionic/libc.so
+*/
+// See https://github.com/wolfpld/tracy/issues/125 .
+// To work around this, we parse /proc/self/maps and we use mprotect to set read permissions
+// on any mappings that contain symbols addresses hit by HandleSymbolCodeQuery.
+
+namespace {
+// Holds some information about a single memory mapping.
+struct MappingInfo {
+ // Start of address range. Inclusive.
+ uintptr_t start_address;
+ // End of address range. Exclusive, so the mapping is the half-open interval
+ // [start, end) and its length in bytes is `end - start`. As in /proc/self/maps.
+ uintptr_t end_address;
+ // Read/Write/Executable permissions.
+ bool perm_r, perm_w, perm_x;
+};
+} // anonymous namespace
+
+ // Internal implementation helper for LookUpMapping(address).
+ //
+ // Parses /proc/self/maps returning a vector<MappingInfo>.
+ // /proc/self/maps is assumed to be sorted by ascending address, so the resulting
+ // vector is sorted by ascending address too.
+static std::vector<MappingInfo> ParseMappings()
+{
+ std::vector<MappingInfo> result;
+ FILE* file = fopen( "/proc/self/maps", "r" );
+ if( !file ) return result;
+ char line[1024];
+ while( fgets( line, sizeof( line ), file ) )
+ {
+ uintptr_t start_addr;
+ uintptr_t end_addr;
+ if( sscanf( line, "%lx-%lx", &start_addr, &end_addr ) != 2 ) continue;
+ char* first_space = strchr( line, ' ' );
+ if( !first_space ) continue;
+ char* perm = first_space + 1;
+ char* second_space = strchr( perm, ' ' );
+ if( !second_space || second_space - perm != 4 ) continue;
+ result.emplace_back();
+ auto& mapping = result.back();
+ mapping.start_address = start_addr;
+ mapping.end_address = end_addr;
+ mapping.perm_r = perm[0] == 'r';
+ mapping.perm_w = perm[1] == 'w';
+ mapping.perm_x = perm[2] == 'x';
+ }
+ fclose( file );
+ return result;
+}
+
+// Internal implementation helper for LookUpMapping(address).
+//
+// Takes as input an `address` and a known vector `mappings`, assumed to be
+// sorted by increasing addresses, as /proc/self/maps seems to be.
+// Returns a pointer to the MappingInfo describing the mapping that this
+// address belongs to, or nullptr if the address isn't in `mappings`.
+static MappingInfo* LookUpMapping(std::vector<MappingInfo>& mappings, uintptr_t address)
+{
+ // Comparison function for std::lower_bound. Returns true if all addresses in `m1`
+ // are lower than `addr`.
+ auto Compare = []( const MappingInfo& m1, uintptr_t addr ) {
+ // '<=' because the address ranges are half-open intervals, [start, end).
+ return m1.end_address <= addr;
+ };
+ auto iter = std::lower_bound( mappings.begin(), mappings.end(), address, Compare );
+ if( iter == mappings.end() || iter->start_address > address) {
+ return nullptr;
+ }
+ return &*iter;
+}
+
+// Internal implementation helper for EnsureReadable(address).
+//
+// Takes as input an `address` and returns a pointer to a MappingInfo
+// describing the mapping that this address belongs to, or nullptr if
+// the address isn't in any known mapping.
+//
+// This function is stateful and not reentrant (assumes to be called from
+// only one thread). It holds a vector of mappings parsed from /proc/self/maps.
+//
+// Attempts to react to mappings changes by re-parsing /proc/self/maps.
+static MappingInfo* LookUpMapping(uintptr_t address)
+{
+ // Static state managed by this function. Not constant, we mutate that state as
+ // we turn some mappings readable. Initially parsed once here, updated as needed below.
+ static std::vector<MappingInfo> s_mappings = ParseMappings();
+ MappingInfo* mapping = LookUpMapping( s_mappings, address );
+ if( mapping ) return mapping;
+
+ // This address isn't in any known mapping. Try parsing again, maybe
+ // mappings changed.
+ s_mappings = ParseMappings();
+ return LookUpMapping( s_mappings, address );
+}
+
+// Internal implementation helper for EnsureReadable(address).
+//
+// Attempts to make the specified `mapping` readable if it isn't already.
+// Returns true if and only if the mapping is readable.
+static bool EnsureReadable( MappingInfo& mapping )
+{
+ if( mapping.perm_r )
+ {
+ // The mapping is already readable.
+ return true;
+ }
+ int prot = PROT_READ;
+ if( mapping.perm_w ) prot |= PROT_WRITE;
+ if( mapping.perm_x ) prot |= PROT_EXEC;
+ if( mprotect( reinterpret_cast<void*>( mapping.start_address ),
+ mapping.end_address - mapping.start_address, prot ) == -1 )
+ {
+ // Failed to make the mapping readable. Shouldn't happen, hasn't
+ // been observed yet. If it happened in practice, we should consider
+ // adding a bool to MappingInfo to track this to avoid retrying mprotect
+ // everytime on such mappings.
+ return false;
+ }
+ // The mapping is now readable. Update `mapping` so the next call will be fast.
+ mapping.perm_r = true;
+ return true;
+}
+
+// Attempts to set the read permission on the entire mapping containing the
+// specified address. Returns true if and only if the mapping is now readable.
+static bool EnsureReadable( uintptr_t address )
+{
+ MappingInfo* mapping = LookUpMapping(address);
+ return mapping && EnsureReadable( *mapping );
+}
+
+#endif // defined __ANDROID__
+
+#ifndef TRACY_DELAYED_INIT
+
+struct InitTimeWrapper
+{
+ int64_t val;
+};
+
+struct ProducerWrapper
+{
+ tracy::moodycamel::ConcurrentQueue<QueueItem>::ExplicitProducer* ptr;
+};
+
+struct ThreadHandleWrapper
+{
+ uint32_t val;
+};
+#endif
+
+
+#if defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+static inline void CpuId( uint32_t* regs, uint32_t leaf )
+{
+ memset(regs, 0, sizeof(uint32_t) * 4);
+#if defined _WIN32
+ __cpuidex( (int*)regs, leaf, 0 );
+#else
+ __get_cpuid( leaf, regs, regs+1, regs+2, regs+3 );
+#endif
+}
+
+static void InitFailure( const char* msg )
+{
+#if defined _WIN32
+ bool hasConsole = false;
+ bool reopen = false;
+ const auto attached = AttachConsole( ATTACH_PARENT_PROCESS );
+ if( attached )
+ {
+ hasConsole = true;
+ reopen = true;
+ }
+ else
+ {
+ const auto err = GetLastError();
+ if( err == ERROR_ACCESS_DENIED )
+ {
+ hasConsole = true;
+ }
+ }
+ if( hasConsole )
+ {
+ fprintf( stderr, "Tracy Profiler initialization failure: %s\n", msg );
+ if( reopen )
+ {
+ freopen( "CONOUT$", "w", stderr );
+ fprintf( stderr, "Tracy Profiler initialization failure: %s\n", msg );
+ }
+ }
+ else
+ {
+# ifndef TRACY_UWP
+ MessageBoxA( nullptr, msg, "Tracy Profiler initialization failure", MB_ICONSTOP );
+# endif
+ }
+#else
+ fprintf( stderr, "Tracy Profiler initialization failure: %s\n", msg );
+#endif
+ exit( 1 );
+}
+
+static bool CheckHardwareSupportsInvariantTSC()
+{
+ const char* noCheck = GetEnvVar( "TRACY_NO_INVARIANT_CHECK" );
+ if( noCheck && noCheck[0] == '1' ) return true;
+
+ uint32_t regs[4];
+ CpuId( regs, 1 );
+ if( !( regs[3] & ( 1 << 4 ) ) )
+ {
+#if !defined TRACY_TIMER_QPC && !defined TRACY_TIMER_FALLBACK
+ InitFailure( "CPU doesn't support RDTSC instruction." );
+#endif
+ return false;
+ }
+ CpuId( regs, 0x80000007 );
+ if( regs[3] & ( 1 << 8 ) ) return true;
+
+ return false;
+}
+
+#if defined TRACY_TIMER_FALLBACK && defined TRACY_HW_TIMER
+bool HardwareSupportsInvariantTSC()
+{
+ static bool cachedResult = CheckHardwareSupportsInvariantTSC();
+ return cachedResult;
+}
+#endif
+
+static int64_t SetupHwTimer()
+{
+#if !defined TRACY_TIMER_QPC && !defined TRACY_TIMER_FALLBACK
+ if( !CheckHardwareSupportsInvariantTSC() )
+ {
+#if defined _WIN32
+ InitFailure( "CPU doesn't support invariant TSC.\nDefine TRACY_NO_INVARIANT_CHECK=1 to ignore this error, *if you know what you are doing*.\nAlternatively you may rebuild the application with the TRACY_TIMER_QPC or TRACY_TIMER_FALLBACK define to use lower resolution timer." );
+#else
+ InitFailure( "CPU doesn't support invariant TSC.\nDefine TRACY_NO_INVARIANT_CHECK=1 to ignore this error, *if you know what you are doing*.\nAlternatively you may rebuild the application with the TRACY_TIMER_FALLBACK define to use lower resolution timer." );
+#endif
+ }
+#endif
+
+ return Profiler::GetTime();
+}
+#else
+static int64_t SetupHwTimer()
+{
+ return Profiler::GetTime();
+}
+#endif
+
+static const char* GetProcessName()
+{
+ const char* processName = "unknown";
+#ifdef _WIN32
+ static char buf[_MAX_PATH];
+ GetModuleFileNameA( nullptr, buf, _MAX_PATH );
+ const char* ptr = buf;
+ while( *ptr != '\0' ) ptr++;
+ while( ptr > buf && *ptr != '\\' && *ptr != '/' ) ptr--;
+ if( ptr > buf ) ptr++;
+ processName = ptr;
+#elif defined __ANDROID__
+# if __ANDROID_API__ >= 21
+ auto buf = getprogname();
+ if( buf ) processName = buf;
+# endif
+#elif defined __linux__ && defined _GNU_SOURCE
+ if( program_invocation_short_name ) processName = program_invocation_short_name;
+#elif defined __APPLE__ || defined BSD
+ auto buf = getprogname();
+ if( buf ) processName = buf;
+#endif
+ return processName;
+}
+
+static const char* GetProcessExecutablePath()
+{
+#ifdef _WIN32
+ static char buf[_MAX_PATH];
+ GetModuleFileNameA( nullptr, buf, _MAX_PATH );
+ return buf;
+#elif defined __ANDROID__
+ return nullptr;
+#elif defined __linux__ && defined _GNU_SOURCE
+ return program_invocation_name;
+#elif defined __APPLE__
+ static char buf[1024];
+ uint32_t size = 1024;
+ _NSGetExecutablePath( buf, &size );
+ return buf;
+#elif defined __DragonFly__
+ static char buf[1024];
+ readlink( "/proc/curproc/file", buf, 1024 );
+ return buf;
+#elif defined __FreeBSD__
+ static char buf[1024];
+ int mib[4];
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PATHNAME;
+ mib[3] = -1;
+ size_t cb = 1024;
+ sysctl( mib, 4, buf, &cb, nullptr, 0 );
+ return buf;
+#elif defined __NetBSD__
+ static char buf[1024];
+ readlink( "/proc/curproc/exe", buf, 1024 );
+ return buf;
+#else
+ return nullptr;
+#endif
+}
+
+#if defined __linux__ && defined __ARM_ARCH
+static uint32_t GetHex( char*& ptr, int skip )
+{
+ uint32_t ret;
+ ptr += skip;
+ char* end;
+ if( ptr[0] == '0' && ptr[1] == 'x' )
+ {
+ ptr += 2;
+ ret = strtol( ptr, &end, 16 );
+ }
+ else
+ {
+ ret = strtol( ptr, &end, 10 );
+ }
+ ptr = end;
+ return ret;
+}
+#endif
+
+static const char* GetHostInfo()
+{
+ static char buf[1024];
+ auto ptr = buf;
+#if defined _WIN32
+# ifdef TRACY_UWP
+ auto GetVersion = &::GetVersionEx;
+# else
+ auto GetVersion = (t_RtlGetVersion)GetProcAddress( GetModuleHandleA( "ntdll.dll" ), "RtlGetVersion" );
+# endif
+ if( !GetVersion )
+ {
+# ifdef __MINGW32__
+ ptr += sprintf( ptr, "OS: Windows (MingW)\n" );
+# else
+ ptr += sprintf( ptr, "OS: Windows\n" );
+# endif
+ }
+ else
+ {
+ RTL_OSVERSIONINFOW ver = { sizeof( RTL_OSVERSIONINFOW ) };
+ GetVersion( &ver );
+
+# ifdef __MINGW32__
+ ptr += sprintf( ptr, "OS: Windows %i.%i.%i (MingW)\n", (int)ver.dwMajorVersion, (int)ver.dwMinorVersion, (int)ver.dwBuildNumber );
+# else
+ ptr += sprintf( ptr, "OS: Windows %i.%i.%i\n", ver.dwMajorVersion, ver.dwMinorVersion, ver.dwBuildNumber );
+# endif
+ }
+#elif defined __linux__
+ struct utsname utsName;
+ uname( &utsName );
+# if defined __ANDROID__
+ ptr += sprintf( ptr, "OS: Linux %s (Android)\n", utsName.release );
+# else
+ ptr += sprintf( ptr, "OS: Linux %s\n", utsName.release );
+# endif
+#elif defined __APPLE__
+# if TARGET_OS_IPHONE == 1
+ ptr += sprintf( ptr, "OS: Darwin (iOS)\n" );
+# elif TARGET_OS_MAC == 1
+ ptr += sprintf( ptr, "OS: Darwin (OSX)\n" );
+# else
+ ptr += sprintf( ptr, "OS: Darwin (unknown)\n" );
+# endif
+#elif defined __DragonFly__
+ ptr += sprintf( ptr, "OS: BSD (DragonFly)\n" );
+#elif defined __FreeBSD__
+ ptr += sprintf( ptr, "OS: BSD (FreeBSD)\n" );
+#elif defined __NetBSD__
+ ptr += sprintf( ptr, "OS: BSD (NetBSD)\n" );
+#elif defined __OpenBSD__
+ ptr += sprintf( ptr, "OS: BSD (OpenBSD)\n" );
+#else
+ ptr += sprintf( ptr, "OS: unknown\n" );
+#endif
+
+#if defined _MSC_VER
+# if defined __clang__
+ ptr += sprintf( ptr, "Compiler: MSVC clang-cl %i.%i.%i\n", __clang_major__, __clang_minor__, __clang_patchlevel__ );
+# else
+ ptr += sprintf( ptr, "Compiler: MSVC %i\n", _MSC_VER );
+# endif
+#elif defined __clang__
+ ptr += sprintf( ptr, "Compiler: clang %i.%i.%i\n", __clang_major__, __clang_minor__, __clang_patchlevel__ );
+#elif defined __GNUC__
+ ptr += sprintf( ptr, "Compiler: gcc %i.%i\n", __GNUC__, __GNUC_MINOR__ );
+#else
+ ptr += sprintf( ptr, "Compiler: unknown\n" );
+#endif
+
+#if defined _WIN32
+ InitWinSock();
+
+ char hostname[512];
+ gethostname( hostname, 512 );
+
+# ifdef TRACY_UWP
+ const char* user = "";
+# else
+ DWORD userSz = UNLEN+1;
+ char user[UNLEN+1];
+ GetUserNameA( user, &userSz );
+# endif
+
+ ptr += sprintf( ptr, "User: %s@%s\n", user, hostname );
+#else
+ char hostname[_POSIX_HOST_NAME_MAX]{};
+ char user[_POSIX_LOGIN_NAME_MAX]{};
+
+ gethostname( hostname, _POSIX_HOST_NAME_MAX );
+# if defined __ANDROID__
+ const auto login = getlogin();
+ if( login )
+ {
+ strcpy( user, login );
+ }
+ else
+ {
+ memcpy( user, "(?)", 4 );
+ }
+# else
+ getlogin_r( user, _POSIX_LOGIN_NAME_MAX );
+# endif
+
+ ptr += sprintf( ptr, "User: %s@%s\n", user, hostname );
+#endif
+
+#if defined __i386 || defined _M_IX86
+ ptr += sprintf( ptr, "Arch: x86\n" );
+#elif defined __x86_64__ || defined _M_X64
+ ptr += sprintf( ptr, "Arch: x64\n" );
+#elif defined __aarch64__
+ ptr += sprintf( ptr, "Arch: ARM64\n" );
+#elif defined __ARM_ARCH
+ ptr += sprintf( ptr, "Arch: ARM\n" );
+#else
+ ptr += sprintf( ptr, "Arch: unknown\n" );
+#endif
+
+#if defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+ uint32_t regs[4];
+ char cpuModel[4*4*3];
+ auto modelPtr = cpuModel;
+ for( uint32_t i=0x80000002; i<0x80000005; ++i )
+ {
+ CpuId( regs, i );
+ memcpy( modelPtr, regs, sizeof( regs ) ); modelPtr += sizeof( regs );
+ }
+
+ ptr += sprintf( ptr, "CPU: %s\n", cpuModel );
+#elif defined __linux__ && defined __ARM_ARCH
+ bool cpuFound = false;
+ FILE* fcpuinfo = fopen( "/proc/cpuinfo", "rb" );
+ if( fcpuinfo )
+ {
+ enum { BufSize = 4*1024 };
+ char buf[BufSize];
+ const auto sz = fread( buf, 1, BufSize, fcpuinfo );
+ fclose( fcpuinfo );
+ const auto end = buf + sz;
+ auto cptr = buf;
+
+ uint32_t impl = 0;
+ uint32_t var = 0;
+ uint32_t part = 0;
+ uint32_t rev = 0;
+
+ while( end - cptr > 20 )
+ {
+ while( end - cptr > 20 && memcmp( cptr, "CPU ", 4 ) != 0 )
+ {
+ cptr += 4;
+ while( end - cptr > 20 && *cptr != '\n' ) cptr++;
+ cptr++;
+ }
+ if( end - cptr <= 20 ) break;
+ cptr += 4;
+ if( memcmp( cptr, "implementer\t: ", 14 ) == 0 )
+ {
+ if( impl != 0 ) break;
+ impl = GetHex( cptr, 14 );
+ }
+ else if( memcmp( cptr, "variant\t: ", 10 ) == 0 ) var = GetHex( cptr, 10 );
+ else if( memcmp( cptr, "part\t: ", 7 ) == 0 ) part = GetHex( cptr, 7 );
+ else if( memcmp( cptr, "revision\t: ", 11 ) == 0 ) rev = GetHex( cptr, 11 );
+ while( *cptr != '\n' && *cptr != '\0' ) cptr++;
+ cptr++;
+ }
+
+ if( impl != 0 || var != 0 || part != 0 || rev != 0 )
+ {
+ cpuFound = true;
+ ptr += sprintf( ptr, "CPU: %s%s r%ip%i\n", DecodeArmImplementer( impl ), DecodeArmPart( impl, part ), var, rev );
+ }
+ }
+ if( !cpuFound )
+ {
+ ptr += sprintf( ptr, "CPU: unknown\n" );
+ }
+#elif defined __APPLE__ && TARGET_OS_IPHONE == 1
+ {
+ size_t sz;
+ sysctlbyname( "hw.machine", nullptr, &sz, nullptr, 0 );
+ auto str = (char*)tracy_malloc( sz );
+ sysctlbyname( "hw.machine", str, &sz, nullptr, 0 );
+ ptr += sprintf( ptr, "Device: %s\n", DecodeIosDevice( str ) );
+ tracy_free( str );
+ }
+#else
+ ptr += sprintf( ptr, "CPU: unknown\n" );
+#endif
+#ifdef __ANDROID__
+ char deviceModel[PROP_VALUE_MAX+1];
+ char deviceManufacturer[PROP_VALUE_MAX+1];
+ __system_property_get( "ro.product.model", deviceModel );
+ __system_property_get( "ro.product.manufacturer", deviceManufacturer );
+ ptr += sprintf( ptr, "Device: %s %s\n", deviceManufacturer, deviceModel );
+#endif
+
+ ptr += sprintf( ptr, "CPU cores: %i\n", std::thread::hardware_concurrency() );
+
+#if defined _WIN32
+ MEMORYSTATUSEX statex;
+ statex.dwLength = sizeof( statex );
+ GlobalMemoryStatusEx( &statex );
+# ifdef _MSC_VER
+ ptr += sprintf( ptr, "RAM: %I64u MB\n", statex.ullTotalPhys / 1024 / 1024 );
+# else
+ ptr += sprintf( ptr, "RAM: %llu MB\n", statex.ullTotalPhys / 1024 / 1024 );
+# endif
+#elif defined __linux__
+ struct sysinfo sysInfo;
+ sysinfo( &sysInfo );
+ ptr += sprintf( ptr, "RAM: %lu MB\n", sysInfo.totalram / 1024 / 1024 );
+#elif defined __APPLE__
+ size_t memSize;
+ size_t sz = sizeof( memSize );
+ sysctlbyname( "hw.memsize", &memSize, &sz, nullptr, 0 );
+ ptr += sprintf( ptr, "RAM: %zu MB\n", memSize / 1024 / 1024 );
+#elif defined BSD
+ size_t memSize;
+ size_t sz = sizeof( memSize );
+ sysctlbyname( "hw.physmem", &memSize, &sz, nullptr, 0 );
+ ptr += sprintf( ptr, "RAM: %zu MB\n", memSize / 1024 / 1024 );
+#else
+ ptr += sprintf( ptr, "RAM: unknown\n" );
+#endif
+
+ return buf;
+}
+
+static uint64_t GetPid()
+{
+#if defined _WIN32
+ return uint64_t( GetCurrentProcessId() );
+#else
+ return uint64_t( getpid() );
+#endif
+}
+
+void Profiler::AckServerQuery()
+{
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::AckServerQueryNoop );
+ NeedDataSize( QueueDataSize[(int)QueueType::AckServerQueryNoop] );
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::AckServerQueryNoop] );
+}
+
+void Profiler::AckSourceCodeNotAvailable()
+{
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::AckSourceCodeNotAvailable );
+ NeedDataSize( QueueDataSize[(int)QueueType::AckSourceCodeNotAvailable] );
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::AckSourceCodeNotAvailable] );
+}
+
+void Profiler::AckSymbolCodeNotAvailable()
+{
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::AckSymbolCodeNotAvailable );
+ NeedDataSize( QueueDataSize[(int)QueueType::AckSymbolCodeNotAvailable] );
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::AckSymbolCodeNotAvailable] );
+}
+
+static BroadcastMessage& GetBroadcastMessage( const char* procname, size_t pnsz, int& len, int port )
+{
+ static BroadcastMessage msg;
+
+ msg.broadcastVersion = BroadcastVersion;
+ msg.protocolVersion = ProtocolVersion;
+ msg.listenPort = port;
+
+ memcpy( msg.programName, procname, pnsz );
+ memset( msg.programName + pnsz, 0, WelcomeMessageProgramNameSize - pnsz );
+
+ len = int( offsetof( BroadcastMessage, programName ) + pnsz + 1 );
+ return msg;
+}
+
+#if defined _WIN32 && !defined TRACY_UWP
+static DWORD s_profilerThreadId = 0;
+static char s_crashText[1024];
+
+LONG WINAPI CrashFilter( PEXCEPTION_POINTERS pExp )
+{
+ if( !GetProfiler().IsConnected() ) return EXCEPTION_CONTINUE_SEARCH;
+
+ const unsigned ec = pExp->ExceptionRecord->ExceptionCode;
+ auto msgPtr = s_crashText;
+ switch( ec )
+ {
+ case EXCEPTION_ACCESS_VIOLATION:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_ACCESS_VIOLATION (0x%x). ", ec );
+ switch( pExp->ExceptionRecord->ExceptionInformation[0] )
+ {
+ case 0:
+ msgPtr += sprintf( msgPtr, "Read violation at address 0x%" PRIxPTR ".", pExp->ExceptionRecord->ExceptionInformation[1] );
+ break;
+ case 1:
+ msgPtr += sprintf( msgPtr, "Write violation at address 0x%" PRIxPTR ".", pExp->ExceptionRecord->ExceptionInformation[1] );
+ break;
+ case 8:
+ msgPtr += sprintf( msgPtr, "DEP violation at address 0x%" PRIxPTR ".", pExp->ExceptionRecord->ExceptionInformation[1] );
+ break;
+ default:
+ break;
+ }
+ break;
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_ARRAY_BOUNDS_EXCEEDED (0x%x). ", ec );
+ break;
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_DATATYPE_MISALIGNMENT (0x%x). ", ec );
+ break;
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_FLT_DIVIDE_BY_ZERO (0x%x). ", ec );
+ break;
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_ILLEGAL_INSTRUCTION (0x%x). ", ec );
+ break;
+ case EXCEPTION_IN_PAGE_ERROR:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_IN_PAGE_ERROR (0x%x). ", ec );
+ break;
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_INT_DIVIDE_BY_ZERO (0x%x). ", ec );
+ break;
+ case EXCEPTION_PRIV_INSTRUCTION:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_PRIV_INSTRUCTION (0x%x). ", ec );
+ break;
+ case EXCEPTION_STACK_OVERFLOW:
+ msgPtr += sprintf( msgPtr, "Exception EXCEPTION_STACK_OVERFLOW (0x%x). ", ec );
+ break;
+ default:
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ {
+ GetProfiler().SendCallstack( 60, "KiUserExceptionDispatcher" );
+
+ TracyQueuePrepare( QueueType::CrashReport );
+ item->crashReport.time = Profiler::GetTime();
+ item->crashReport.text = (uint64_t)s_crashText;
+ TracyQueueCommit( crashReportThread );
+ }
+
+ HANDLE h = CreateToolhelp32Snapshot( TH32CS_SNAPTHREAD, 0 );
+ if( h == INVALID_HANDLE_VALUE ) return EXCEPTION_CONTINUE_SEARCH;
+
+ THREADENTRY32 te = { sizeof( te ) };
+ if( !Thread32First( h, &te ) )
+ {
+ CloseHandle( h );
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ const auto pid = GetCurrentProcessId();
+ const auto tid = GetCurrentThreadId();
+
+ do
+ {
+ if( te.th32OwnerProcessID == pid && te.th32ThreadID != tid && te.th32ThreadID != s_profilerThreadId )
+ {
+ HANDLE th = OpenThread( THREAD_SUSPEND_RESUME, FALSE, te.th32ThreadID );
+ if( th != INVALID_HANDLE_VALUE )
+ {
+ SuspendThread( th );
+ CloseHandle( th );
+ }
+ }
+ }
+ while( Thread32Next( h, &te ) );
+ CloseHandle( h );
+
+ {
+ TracyLfqPrepare( QueueType::Crash );
+ TracyLfqCommit;
+ }
+
+ std::this_thread::sleep_for( std::chrono::milliseconds( 500 ) );
+ GetProfiler().RequestShutdown();
+ while( !GetProfiler().HasShutdownFinished() ) { std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) ); };
+
+ TerminateProcess( GetCurrentProcess(), 1 );
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+#endif
+
+#ifdef __linux__
+# ifndef TRACY_CRASH_SIGNAL
+# define TRACY_CRASH_SIGNAL SIGPWR
+# endif
+
+static long s_profilerTid = 0;
+static char s_crashText[1024];
+static std::atomic<bool> s_alreadyCrashed( false );
+
+static void ThreadFreezer( int /*signal*/ )
+{
+ for(;;) sleep( 1000 );
+}
+
+static inline void HexPrint( char*& ptr, uint64_t val )
+{
+ if( val == 0 )
+ {
+ *ptr++ = '0';
+ return;
+ }
+
+ static const char HexTable[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+ char buf[16];
+ auto bptr = buf;
+
+ do
+ {
+ *bptr++ = HexTable[val%16];
+ val /= 16;
+ }
+ while( val > 0 );
+
+ do
+ {
+ *ptr++ = *--bptr;
+ }
+ while( bptr != buf );
+}
+
+static void CrashHandler( int signal, siginfo_t* info, void* /*ucontext*/ )
+{
+ bool expected = false;
+ if( !s_alreadyCrashed.compare_exchange_strong( expected, true ) ) ThreadFreezer( signal );
+
+ struct sigaction act = {};
+ act.sa_handler = SIG_DFL;
+ sigaction( SIGABRT, &act, nullptr );
+
+ auto msgPtr = s_crashText;
+ switch( signal )
+ {
+ case SIGILL:
+ strcpy( msgPtr, "Illegal Instruction.\n" );
+ while( *msgPtr ) msgPtr++;
+ switch( info->si_code )
+ {
+ case ILL_ILLOPC:
+ strcpy( msgPtr, "Illegal opcode.\n" );
+ break;
+ case ILL_ILLOPN:
+ strcpy( msgPtr, "Illegal operand.\n" );
+ break;
+ case ILL_ILLADR:
+ strcpy( msgPtr, "Illegal addressing mode.\n" );
+ break;
+ case ILL_ILLTRP:
+ strcpy( msgPtr, "Illegal trap.\n" );
+ break;
+ case ILL_PRVOPC:
+ strcpy( msgPtr, "Privileged opcode.\n" );
+ break;
+ case ILL_PRVREG:
+ strcpy( msgPtr, "Privileged register.\n" );
+ break;
+ case ILL_COPROC:
+ strcpy( msgPtr, "Coprocessor error.\n" );
+ break;
+ case ILL_BADSTK:
+ strcpy( msgPtr, "Internal stack error.\n" );
+ break;
+ default:
+ break;
+ }
+ break;
+ case SIGFPE:
+ strcpy( msgPtr, "Floating-point exception.\n" );
+ while( *msgPtr ) msgPtr++;
+ switch( info->si_code )
+ {
+ case FPE_INTDIV:
+ strcpy( msgPtr, "Integer divide by zero.\n" );
+ break;
+ case FPE_INTOVF:
+ strcpy( msgPtr, "Integer overflow.\n" );
+ break;
+ case FPE_FLTDIV:
+ strcpy( msgPtr, "Floating-point divide by zero.\n" );
+ break;
+ case FPE_FLTOVF:
+ strcpy( msgPtr, "Floating-point overflow.\n" );
+ break;
+ case FPE_FLTUND:
+ strcpy( msgPtr, "Floating-point underflow.\n" );
+ break;
+ case FPE_FLTRES:
+ strcpy( msgPtr, "Floating-point inexact result.\n" );
+ break;
+ case FPE_FLTINV:
+ strcpy( msgPtr, "Floating-point invalid operation.\n" );
+ break;
+ case FPE_FLTSUB:
+ strcpy( msgPtr, "Subscript out of range.\n" );
+ break;
+ default:
+ break;
+ }
+ break;
+ case SIGSEGV:
+ strcpy( msgPtr, "Invalid memory reference.\n" );
+ while( *msgPtr ) msgPtr++;
+ switch( info->si_code )
+ {
+ case SEGV_MAPERR:
+ strcpy( msgPtr, "Address not mapped to object.\n" );
+ break;
+ case SEGV_ACCERR:
+ strcpy( msgPtr, "Invalid permissions for mapped object.\n" );
+ break;
+# ifdef SEGV_BNDERR
+ case SEGV_BNDERR:
+ strcpy( msgPtr, "Failed address bound checks.\n" );
+ break;
+# endif
+# ifdef SEGV_PKUERR
+ case SEGV_PKUERR:
+ strcpy( msgPtr, "Access was denied by memory protection keys.\n" );
+ break;
+# endif
+ default:
+ break;
+ }
+ break;
+ case SIGPIPE:
+ strcpy( msgPtr, "Broken pipe.\n" );
+ while( *msgPtr ) msgPtr++;
+ break;
+ case SIGBUS:
+ strcpy( msgPtr, "Bus error.\n" );
+ while( *msgPtr ) msgPtr++;
+ switch( info->si_code )
+ {
+ case BUS_ADRALN:
+ strcpy( msgPtr, "Invalid address alignment.\n" );
+ break;
+ case BUS_ADRERR:
+ strcpy( msgPtr, "Nonexistent physical address.\n" );
+ break;
+ case BUS_OBJERR:
+ strcpy( msgPtr, "Object-specific hardware error.\n" );
+ break;
+# ifdef BUS_MCEERR_AR
+ case BUS_MCEERR_AR:
+ strcpy( msgPtr, "Hardware memory error consumed on a machine check; action required.\n" );
+ break;
+# endif
+# ifdef BUS_MCEERR_AO
+ case BUS_MCEERR_AO:
+ strcpy( msgPtr, "Hardware memory error detected in process but not consumed; action optional.\n" );
+ break;
+# endif
+ default:
+ break;
+ }
+ break;
+ case SIGABRT:
+ strcpy( msgPtr, "Abort signal from abort().\n" );
+ break;
+ default:
+ abort();
+ }
+ while( *msgPtr ) msgPtr++;
+
+ if( signal != SIGPIPE )
+ {
+ strcpy( msgPtr, "Fault address: 0x" );
+ while( *msgPtr ) msgPtr++;
+ HexPrint( msgPtr, uint64_t( info->si_addr ) );
+ *msgPtr++ = '\n';
+ }
+
+ {
+ GetProfiler().SendCallstack( 60, "__kernel_rt_sigreturn" );
+
+ TracyQueuePrepare( QueueType::CrashReport );
+ item->crashReport.time = Profiler::GetTime();
+ item->crashReport.text = (uint64_t)s_crashText;
+ TracyQueueCommit( crashReportThread );
+ }
+
+ DIR* dp = opendir( "/proc/self/task" );
+ if( !dp ) abort();
+
+ const auto selfTid = syscall( SYS_gettid );
+
+ struct dirent* ep;
+ while( ( ep = readdir( dp ) ) != nullptr )
+ {
+ if( ep->d_name[0] == '.' ) continue;
+ int tid = atoi( ep->d_name );
+ if( tid != selfTid && tid != s_profilerTid )
+ {
+ syscall( SYS_tkill, tid, TRACY_CRASH_SIGNAL );
+ }
+ }
+ closedir( dp );
+
+ {
+ TracyLfqPrepare( QueueType::Crash );
+ TracyLfqCommit;
+ }
+
+ std::this_thread::sleep_for( std::chrono::milliseconds( 500 ) );
+ GetProfiler().RequestShutdown();
+ while( !GetProfiler().HasShutdownFinished() ) { std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) ); };
+
+ abort();
+}
+#endif
+
+
+enum { QueuePrealloc = 256 * 1024 };
+
+static Profiler* s_instance = nullptr;
+static Thread* s_thread;
+#ifndef TRACY_NO_FRAME_IMAGE
+static Thread* s_compressThread;
+#endif
+#ifdef TRACY_HAS_CALLSTACK
+static Thread* s_symbolThread;
+std::atomic<bool> s_symbolThreadGone { false };
+#endif
+#ifdef TRACY_HAS_SYSTEM_TRACING
+static Thread* s_sysTraceThread = nullptr;
+#endif
+
+TRACY_API int64_t GetFrequencyQpc()
+{
+#if defined _WIN32
+ LARGE_INTEGER t;
+ QueryPerformanceFrequency( &t );
+ return t.QuadPart;
+#else
+ return 0;
+#endif
+}
+
+#ifdef TRACY_DELAYED_INIT
+struct ThreadNameData;
+TRACY_API moodycamel::ConcurrentQueue<QueueItem>& GetQueue();
+
+struct ProfilerData
+{
+ int64_t initTime = SetupHwTimer();
+ moodycamel::ConcurrentQueue<QueueItem> queue;
+ Profiler profiler;
+ std::atomic<uint32_t> lockCounter { 0 };
+ std::atomic<uint8_t> gpuCtxCounter { 0 };
+ std::atomic<ThreadNameData*> threadNameData { nullptr };
+};
+
+struct ProducerWrapper
+{
+ ProducerWrapper( ProfilerData& data ) : detail( data.queue ), ptr( data.queue.get_explicit_producer( detail ) ) {}
+ moodycamel::ProducerToken detail;
+ tracy::moodycamel::ConcurrentQueue<QueueItem>::ExplicitProducer* ptr;
+};
+
+struct ProfilerThreadData
+{
+ ProfilerThreadData( ProfilerData& data ) : token( data ), gpuCtx( { nullptr } ) {}
+ ProducerWrapper token;
+ GpuCtxWrapper gpuCtx;
+# ifdef TRACY_ON_DEMAND
+ LuaZoneState luaZoneState;
+# endif
+};
+
+std::atomic<int> RpInitDone { 0 };
+std::atomic<int> RpInitLock { 0 };
+thread_local bool RpThreadInitDone = false;
+thread_local bool RpThreadShutdown = false;
+
+# ifdef TRACY_MANUAL_LIFETIME
+ProfilerData* s_profilerData = nullptr;
+static ProfilerThreadData& GetProfilerThreadData();
+TRACY_API void StartupProfiler()
+{
+ s_profilerData = (ProfilerData*)tracy_malloc( sizeof( ProfilerData ) );
+ new (s_profilerData) ProfilerData();
+ s_profilerData->profiler.SpawnWorkerThreads();
+ GetProfilerThreadData().token = ProducerWrapper( *s_profilerData );
+}
+static ProfilerData& GetProfilerData()
+{
+ assert( s_profilerData );
+ return *s_profilerData;
+}
+TRACY_API void ShutdownProfiler()
+{
+ s_profilerData->~ProfilerData();
+ tracy_free( s_profilerData );
+ s_profilerData = nullptr;
+ rpmalloc_finalize();
+ RpThreadInitDone = false;
+ RpInitDone.store( 0, std::memory_order_release );
+}
+# else
+static std::atomic<int> profilerDataLock { 0 };
+static std::atomic<ProfilerData*> profilerData { nullptr };
+
+static ProfilerData& GetProfilerData()
+{
+ auto ptr = profilerData.load( std::memory_order_acquire );
+ if( !ptr )
+ {
+ int expected = 0;
+ while( !profilerDataLock.compare_exchange_weak( expected, 1, std::memory_order_release, std::memory_order_relaxed ) ) { expected = 0; YieldThread(); }
+ ptr = profilerData.load( std::memory_order_acquire );
+ if( !ptr )
+ {
+ ptr = (ProfilerData*)tracy_malloc( sizeof( ProfilerData ) );
+ new (ptr) ProfilerData();
+ profilerData.store( ptr, std::memory_order_release );
+ }
+ profilerDataLock.store( 0, std::memory_order_release );
+ }
+ return *ptr;
+}
+# endif
+
+// GCC prior to 8.4 had a bug with function-inline thread_local variables. Versions of glibc beginning with
+// 2.18 may attempt to work around this issue, which manifests as a crash while running static destructors
+// if this function is compiled into a shared object. Unfortunately, centos7 ships with glibc 2.17. If running
+// on old GCC, use the old-fashioned way as a workaround
+// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85400
+#if defined(__GNUC__) && ((__GNUC__ < 8) || ((__GNUC__ == 8) && (__GNUC_MINOR__ < 4)))
+struct ProfilerThreadDataKey
+{
+public:
+ ProfilerThreadDataKey()
+ {
+ int val = pthread_key_create(&m_key, sDestructor);
+ static_cast<void>(val); // unused
+ assert(val == 0);
+ }
+ ~ProfilerThreadDataKey()
+ {
+ int val = pthread_key_delete(m_key);
+ static_cast<void>(val); // unused
+ assert(val == 0);
+ }
+ ProfilerThreadData& get()
+ {
+ void* p = pthread_getspecific(m_key);
+ if (!p)
+ {
+ p = (ProfilerThreadData*)tracy_malloc( sizeof( ProfilerThreadData ) );
+ new (p) ProfilerThreadData(GetProfilerData());
+ pthread_setspecific(m_key, p);
+ }
+ return *static_cast<ProfilerThreadData*>(p);
+ }
+private:
+ pthread_key_t m_key;
+
+ static void sDestructor(void* p)
+ {
+ ((ProfilerThreadData*)p)->~ProfilerThreadData();
+ tracy_free(p);
+ }
+};
+
+static ProfilerThreadData& GetProfilerThreadData()
+{
+ static ProfilerThreadDataKey key;
+ return key.get();
+}
+#else
+static ProfilerThreadData& GetProfilerThreadData()
+{
+ thread_local ProfilerThreadData data( GetProfilerData() );
+ return data;
+}
+#endif
+
+TRACY_API moodycamel::ConcurrentQueue<QueueItem>::ExplicitProducer* GetToken() { return GetProfilerThreadData().token.ptr; }
+TRACY_API Profiler& GetProfiler() { return GetProfilerData().profiler; }
+TRACY_API moodycamel::ConcurrentQueue<QueueItem>& GetQueue() { return GetProfilerData().queue; }
+TRACY_API int64_t GetInitTime() { return GetProfilerData().initTime; }
+TRACY_API std::atomic<uint32_t>& GetLockCounter() { return GetProfilerData().lockCounter; }
+TRACY_API std::atomic<uint8_t>& GetGpuCtxCounter() { return GetProfilerData().gpuCtxCounter; }
+TRACY_API GpuCtxWrapper& GetGpuCtx() { return GetProfilerThreadData().gpuCtx; }
+TRACY_API uint32_t GetThreadHandle() { return detail::GetThreadHandleImpl(); }
+std::atomic<ThreadNameData*>& GetThreadNameData() { return GetProfilerData().threadNameData; }
+
+# ifdef TRACY_ON_DEMAND
+TRACY_API LuaZoneState& GetLuaZoneState() { return GetProfilerThreadData().luaZoneState; }
+# endif
+
+# ifndef TRACY_MANUAL_LIFETIME
+namespace
+{
+ const auto& __profiler_init = GetProfiler();
+}
+# endif
+
+#else
+
+// MSVC static initialization order solution. gcc/clang uses init_order() to avoid all this.
+
+// 1a. But s_queue is needed for initialization of variables in point 2.
+extern moodycamel::ConcurrentQueue<QueueItem> s_queue;
+
+// 2. If these variables would be in the .CRT$XCB section, they would be initialized only in main thread.
+thread_local moodycamel::ProducerToken init_order(107) s_token_detail( s_queue );
+thread_local ProducerWrapper init_order(108) s_token { s_queue.get_explicit_producer( s_token_detail ) };
+thread_local ThreadHandleWrapper init_order(104) s_threadHandle { detail::GetThreadHandleImpl() };
+
+# ifdef _MSC_VER
+// 1. Initialize these static variables before all other variables.
+# pragma warning( disable : 4075 )
+# pragma init_seg( ".CRT$XCB" )
+# endif
+
+static InitTimeWrapper init_order(101) s_initTime { SetupHwTimer() };
+std::atomic<int> init_order(102) RpInitDone( 0 );
+std::atomic<int> init_order(102) RpInitLock( 0 );
+thread_local bool RpThreadInitDone = false;
+thread_local bool RpThreadShutdown = false;
+moodycamel::ConcurrentQueue<QueueItem> init_order(103) s_queue( QueuePrealloc );
+std::atomic<uint32_t> init_order(104) s_lockCounter( 0 );
+std::atomic<uint8_t> init_order(104) s_gpuCtxCounter( 0 );
+
+thread_local GpuCtxWrapper init_order(104) s_gpuCtx { nullptr };
+
+struct ThreadNameData;
+static std::atomic<ThreadNameData*> init_order(104) s_threadNameDataInstance( nullptr );
+std::atomic<ThreadNameData*>& s_threadNameData = s_threadNameDataInstance;
+
+# ifdef TRACY_ON_DEMAND
+thread_local LuaZoneState init_order(104) s_luaZoneState { 0, false };
+# endif
+
+static Profiler init_order(105) s_profiler;
+
+TRACY_API moodycamel::ConcurrentQueue<QueueItem>::ExplicitProducer* GetToken() { return s_token.ptr; }
+TRACY_API Profiler& GetProfiler() { return s_profiler; }
+TRACY_API moodycamel::ConcurrentQueue<QueueItem>& GetQueue() { return s_queue; }
+TRACY_API int64_t GetInitTime() { return s_initTime.val; }
+TRACY_API std::atomic<uint32_t>& GetLockCounter() { return s_lockCounter; }
+TRACY_API std::atomic<uint8_t>& GetGpuCtxCounter() { return s_gpuCtxCounter; }
+TRACY_API GpuCtxWrapper& GetGpuCtx() { return s_gpuCtx; }
+TRACY_API uint32_t GetThreadHandle() { return s_threadHandle.val; }
+
+std::atomic<ThreadNameData*>& GetThreadNameData() { return s_threadNameData; }
+
+# ifdef TRACY_ON_DEMAND
+TRACY_API LuaZoneState& GetLuaZoneState() { return s_luaZoneState; }
+# endif
+#endif
+
+TRACY_API bool ProfilerAvailable() { return s_instance != nullptr; }
+TRACY_API bool ProfilerAllocatorAvailable() { return !RpThreadShutdown; }
+
+Profiler::Profiler()
+ : m_timeBegin( 0 )
+ , m_mainThread( detail::GetThreadHandleImpl() )
+ , m_epoch( std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch() ).count() )
+ , m_shutdown( false )
+ , m_shutdownManual( false )
+ , m_shutdownFinished( false )
+ , m_sock( nullptr )
+ , m_broadcast( nullptr )
+ , m_noExit( false )
+ , m_userPort( 0 )
+ , m_zoneId( 1 )
+ , m_samplingPeriod( 0 )
+ , m_stream( LZ4_createStream() )
+ , m_buffer( (char*)tracy_malloc( TargetFrameSize*3 ) )
+ , m_bufferOffset( 0 )
+ , m_bufferStart( 0 )
+ , m_lz4Buf( (char*)tracy_malloc( LZ4Size + sizeof( lz4sz_t ) ) )
+ , m_serialQueue( 1024*1024 )
+ , m_serialDequeue( 1024*1024 )
+#ifndef TRACY_NO_FRAME_IMAGE
+ , m_fiQueue( 16 )
+ , m_fiDequeue( 16 )
+#endif
+ , m_symbolQueue( 8*1024 )
+ , m_frameCount( 0 )
+ , m_isConnected( false )
+#ifdef TRACY_ON_DEMAND
+ , m_connectionId( 0 )
+ , m_deferredQueue( 64*1024 )
+#endif
+ , m_paramCallback( nullptr )
+ , m_queryData( nullptr )
+ , m_crashHandlerInstalled( false )
+{
+ assert( !s_instance );
+ s_instance = this;
+
+#ifndef TRACY_DELAYED_INIT
+# ifdef _MSC_VER
+ // 3. But these variables need to be initialized in main thread within the .CRT$XCB section. Do it here.
+ s_token_detail = moodycamel::ProducerToken( s_queue );
+ s_token = ProducerWrapper { s_queue.get_explicit_producer( s_token_detail ) };
+ s_threadHandle = ThreadHandleWrapper { m_mainThread };
+# endif
+#endif
+
+ CalibrateTimer();
+ CalibrateDelay();
+ ReportTopology();
+
+#ifndef TRACY_NO_EXIT
+ const char* noExitEnv = GetEnvVar( "TRACY_NO_EXIT" );
+ if( noExitEnv && noExitEnv[0] == '1' )
+ {
+ m_noExit = true;
+ }
+#endif
+
+ const char* userPort = GetEnvVar( "TRACY_PORT" );
+ if( userPort )
+ {
+ m_userPort = atoi( userPort );
+ }
+
+#if !defined(TRACY_DELAYED_INIT) || !defined(TRACY_MANUAL_LIFETIME)
+ SpawnWorkerThreads();
+#endif
+}
+
+void Profiler::SpawnWorkerThreads()
+{
+#ifdef TRACY_HAS_SYSTEM_TRACING
+ if( SysTraceStart( m_samplingPeriod ) )
+ {
+ s_sysTraceThread = (Thread*)tracy_malloc( sizeof( Thread ) );
+ new(s_sysTraceThread) Thread( SysTraceWorker, nullptr );
+ std::this_thread::sleep_for( std::chrono::milliseconds( 1 ) );
+ }
+#endif
+
+ s_thread = (Thread*)tracy_malloc( sizeof( Thread ) );
+ new(s_thread) Thread( LaunchWorker, this );
+
+#ifndef TRACY_NO_FRAME_IMAGE
+ s_compressThread = (Thread*)tracy_malloc( sizeof( Thread ) );
+ new(s_compressThread) Thread( LaunchCompressWorker, this );
+#endif
+
+#ifdef TRACY_HAS_CALLSTACK
+ s_symbolThread = (Thread*)tracy_malloc( sizeof( Thread ) );
+ new(s_symbolThread) Thread( LaunchSymbolWorker, this );
+#endif
+
+#if defined _WIN32 && !defined TRACY_UWP
+ s_profilerThreadId = GetThreadId( s_thread->Handle() );
+ m_exceptionHandler = AddVectoredExceptionHandler( 1, CrashFilter );
+#endif
+
+#ifdef __linux__
+ struct sigaction threadFreezer = {};
+ threadFreezer.sa_handler = ThreadFreezer;
+ sigaction( TRACY_CRASH_SIGNAL, &threadFreezer, &m_prevSignal.pwr );
+
+ struct sigaction crashHandler = {};
+ crashHandler.sa_sigaction = CrashHandler;
+ crashHandler.sa_flags = SA_SIGINFO;
+ sigaction( SIGILL, &crashHandler, &m_prevSignal.ill );
+ sigaction( SIGFPE, &crashHandler, &m_prevSignal.fpe );
+ sigaction( SIGSEGV, &crashHandler, &m_prevSignal.segv );
+ sigaction( SIGPIPE, &crashHandler, &m_prevSignal.pipe );
+ sigaction( SIGBUS, &crashHandler, &m_prevSignal.bus );
+ sigaction( SIGABRT, &crashHandler, &m_prevSignal.abrt );
+#endif
+
+ m_crashHandlerInstalled = true;
+
+#ifdef TRACY_HAS_CALLSTACK
+ InitCallstack();
+#endif
+
+ m_timeBegin.store( GetTime(), std::memory_order_relaxed );
+}
+
+Profiler::~Profiler()
+{
+ m_shutdown.store( true, std::memory_order_relaxed );
+
+#if defined _WIN32 && !defined TRACY_UWP
+ if( m_crashHandlerInstalled ) RemoveVectoredExceptionHandler( m_exceptionHandler );
+#endif
+
+#ifdef __linux__
+ if( m_crashHandlerInstalled )
+ {
+ sigaction( TRACY_CRASH_SIGNAL, &m_prevSignal.pwr, nullptr );
+ sigaction( SIGILL, &m_prevSignal.ill, nullptr );
+ sigaction( SIGFPE, &m_prevSignal.fpe, nullptr );
+ sigaction( SIGSEGV, &m_prevSignal.segv, nullptr );
+ sigaction( SIGPIPE, &m_prevSignal.pipe, nullptr );
+ sigaction( SIGBUS, &m_prevSignal.bus, nullptr );
+ sigaction( SIGABRT, &m_prevSignal.abrt, nullptr );
+ }
+#endif
+
+#ifdef TRACY_HAS_SYSTEM_TRACING
+ if( s_sysTraceThread )
+ {
+ SysTraceStop();
+ s_sysTraceThread->~Thread();
+ tracy_free( s_sysTraceThread );
+ }
+#endif
+
+#ifdef TRACY_HAS_CALLSTACK
+ s_symbolThread->~Thread();
+ tracy_free( s_symbolThread );
+ s_symbolThreadGone.store( true, std::memory_order_release );
+#endif
+
+#ifndef TRACY_NO_FRAME_IMAGE
+ s_compressThread->~Thread();
+ tracy_free( s_compressThread );
+#endif
+
+ s_thread->~Thread();
+ tracy_free( s_thread );
+
+ tracy_free( m_lz4Buf );
+ tracy_free( m_buffer );
+ LZ4_freeStream( (LZ4_stream_t*)m_stream );
+
+ if( m_sock )
+ {
+ m_sock->~Socket();
+ tracy_free( m_sock );
+ }
+
+ if( m_broadcast )
+ {
+ m_broadcast->~UdpBroadcast();
+ tracy_free( m_broadcast );
+ }
+
+ assert( s_instance );
+ s_instance = nullptr;
+}
+
+bool Profiler::ShouldExit()
+{
+ return s_instance->m_shutdown.load( std::memory_order_relaxed );
+}
+
+void Profiler::Worker()
+{
+#ifdef __linux__
+ s_profilerTid = syscall( SYS_gettid );
+#endif
+
+ ThreadExitHandler threadExitHandler;
+
+ SetThreadName( "Tracy Profiler" );
+
+#ifdef TRACY_DATA_PORT
+ const bool dataPortSearch = false;
+ auto dataPort = m_userPort != 0 ? m_userPort : TRACY_DATA_PORT;
+#else
+ const bool dataPortSearch = m_userPort == 0;
+ auto dataPort = m_userPort != 0 ? m_userPort : 8086;
+#endif
+#ifdef TRACY_BROADCAST_PORT
+ const auto broadcastPort = TRACY_BROADCAST_PORT;
+#else
+ const auto broadcastPort = 8086;
+#endif
+
+ while( m_timeBegin.load( std::memory_order_relaxed ) == 0 ) std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+
+ rpmalloc_thread_initialize();
+
+ m_exectime = 0;
+ const auto execname = GetProcessExecutablePath();
+ if( execname )
+ {
+ struct stat st;
+ if( stat( execname, &st ) == 0 )
+ {
+ m_exectime = (uint64_t)st.st_mtime;
+ }
+ }
+
+ const auto procname = GetProcessName();
+ const auto pnsz = std::min<size_t>( strlen( procname ), WelcomeMessageProgramNameSize - 1 );
+
+ const auto hostinfo = GetHostInfo();
+ const auto hisz = std::min<size_t>( strlen( hostinfo ), WelcomeMessageHostInfoSize - 1 );
+
+ const uint64_t pid = GetPid();
+
+ uint8_t flags = 0;
+
+#ifdef TRACY_ON_DEMAND
+ flags |= WelcomeFlag::OnDemand;
+#endif
+#ifdef __APPLE__
+ flags |= WelcomeFlag::IsApple;
+#endif
+#ifndef TRACY_NO_CODE_TRANSFER
+ flags |= WelcomeFlag::CodeTransfer;
+#endif
+#ifdef _WIN32
+ flags |= WelcomeFlag::CombineSamples;
+# ifndef TRACY_NO_CONTEXT_SWITCH
+ flags |= WelcomeFlag::IdentifySamples;
+# endif
+#endif
+
+#if defined __i386 || defined _M_IX86
+ uint8_t cpuArch = CpuArchX86;
+#elif defined __x86_64__ || defined _M_X64
+ uint8_t cpuArch = CpuArchX64;
+#elif defined __aarch64__
+ uint8_t cpuArch = CpuArchArm64;
+#elif defined __ARM_ARCH
+ uint8_t cpuArch = CpuArchArm32;
+#else
+ uint8_t cpuArch = CpuArchUnknown;
+#endif
+
+#if defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+ uint32_t regs[4];
+ char manufacturer[12];
+ CpuId( regs, 0 );
+ memcpy( manufacturer, regs+1, 4 );
+ memcpy( manufacturer+4, regs+3, 4 );
+ memcpy( manufacturer+8, regs+2, 4 );
+
+ CpuId( regs, 1 );
+ uint32_t cpuId = ( regs[0] & 0xFFF ) | ( ( regs[0] & 0xFFF0000 ) >> 4 );
+#else
+ const char manufacturer[12] = {};
+ uint32_t cpuId = 0;
+#endif
+
+ WelcomeMessage welcome;
+ MemWrite( &welcome.timerMul, m_timerMul );
+ MemWrite( &welcome.initBegin, GetInitTime() );
+ MemWrite( &welcome.initEnd, m_timeBegin.load( std::memory_order_relaxed ) );
+ MemWrite( &welcome.delay, m_delay );
+ MemWrite( &welcome.resolution, m_resolution );
+ MemWrite( &welcome.epoch, m_epoch );
+ MemWrite( &welcome.exectime, m_exectime );
+ MemWrite( &welcome.pid, pid );
+ MemWrite( &welcome.samplingPeriod, m_samplingPeriod );
+ MemWrite( &welcome.flags, flags );
+ MemWrite( &welcome.cpuArch, cpuArch );
+ memcpy( welcome.cpuManufacturer, manufacturer, 12 );
+ MemWrite( &welcome.cpuId, cpuId );
+ memcpy( welcome.programName, procname, pnsz );
+ memset( welcome.programName + pnsz, 0, WelcomeMessageProgramNameSize - pnsz );
+ memcpy( welcome.hostInfo, hostinfo, hisz );
+ memset( welcome.hostInfo + hisz, 0, WelcomeMessageHostInfoSize - hisz );
+
+ moodycamel::ConsumerToken token( GetQueue() );
+
+ ListenSocket listen;
+ bool isListening = false;
+ if( !dataPortSearch )
+ {
+ isListening = listen.Listen( dataPort, 4 );
+ }
+ else
+ {
+ for( uint32_t i=0; i<20; i++ )
+ {
+ if( listen.Listen( dataPort+i, 4 ) )
+ {
+ dataPort += i;
+ isListening = true;
+ break;
+ }
+ }
+ }
+ if( !isListening )
+ {
+ for(;;)
+ {
+ if( ShouldExit() )
+ {
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+
+ ClearQueues( token );
+ std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+ }
+ }
+
+#ifndef TRACY_NO_BROADCAST
+ m_broadcast = (UdpBroadcast*)tracy_malloc( sizeof( UdpBroadcast ) );
+ new(m_broadcast) UdpBroadcast();
+# ifdef TRACY_ONLY_LOCALHOST
+ const char* addr = "127.255.255.255";
+# else
+ const char* addr = "255.255.255.255";
+# endif
+ if( !m_broadcast->Open( addr, broadcastPort ) )
+ {
+ m_broadcast->~UdpBroadcast();
+ tracy_free( m_broadcast );
+ m_broadcast = nullptr;
+ }
+#endif
+
+ int broadcastLen = 0;
+ auto& broadcastMsg = GetBroadcastMessage( procname, pnsz, broadcastLen, dataPort );
+ uint64_t lastBroadcast = 0;
+
+ // Connections loop.
+ // Each iteration of the loop handles whole connection. Multiple iterations will only
+ // happen in the on-demand mode or when handshake fails.
+ for(;;)
+ {
+ // Wait for incoming connection
+ for(;;)
+ {
+#ifndef TRACY_NO_EXIT
+ if( !m_noExit && ShouldExit() )
+ {
+ if( m_broadcast )
+ {
+ broadcastMsg.activeTime = -1;
+ m_broadcast->Send( broadcastPort, &broadcastMsg, broadcastLen );
+ }
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+#endif
+ m_sock = listen.Accept();
+ if( m_sock ) break;
+#ifndef TRACY_ON_DEMAND
+ ProcessSysTime();
+#endif
+
+ if( m_broadcast )
+ {
+ const auto t = std::chrono::high_resolution_clock::now().time_since_epoch().count();
+ if( t - lastBroadcast > 3000000000 ) // 3s
+ {
+ lastBroadcast = t;
+ const auto ts = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch() ).count();
+ broadcastMsg.activeTime = int32_t( ts - m_epoch );
+ assert( broadcastMsg.activeTime >= 0 );
+ m_broadcast->Send( broadcastPort, &broadcastMsg, broadcastLen );
+ }
+ }
+ }
+
+ if( m_broadcast )
+ {
+ lastBroadcast = 0;
+ broadcastMsg.activeTime = -1;
+ m_broadcast->Send( broadcastPort, &broadcastMsg, broadcastLen );
+ }
+
+ // Handshake
+ {
+ char shibboleth[HandshakeShibbolethSize];
+ auto res = m_sock->ReadRaw( shibboleth, HandshakeShibbolethSize, 2000 );
+ if( !res || memcmp( shibboleth, HandshakeShibboleth, HandshakeShibbolethSize ) != 0 )
+ {
+ m_sock->~Socket();
+ tracy_free( m_sock );
+ m_sock = nullptr;
+ continue;
+ }
+
+ uint32_t protocolVersion;
+ res = m_sock->ReadRaw( &protocolVersion, sizeof( protocolVersion ), 2000 );
+ if( !res )
+ {
+ m_sock->~Socket();
+ tracy_free( m_sock );
+ m_sock = nullptr;
+ continue;
+ }
+
+ if( protocolVersion != ProtocolVersion )
+ {
+ HandshakeStatus status = HandshakeProtocolMismatch;
+ m_sock->Send( &status, sizeof( status ) );
+ m_sock->~Socket();
+ tracy_free( m_sock );
+ m_sock = nullptr;
+ continue;
+ }
+ }
+
+#ifdef TRACY_ON_DEMAND
+ const auto currentTime = GetTime();
+ ClearQueues( token );
+ m_connectionId.fetch_add( 1, std::memory_order_release );
+#endif
+ m_isConnected.store( true, std::memory_order_release );
+
+ HandshakeStatus handshake = HandshakeWelcome;
+ m_sock->Send( &handshake, sizeof( handshake ) );
+
+ LZ4_resetStream( (LZ4_stream_t*)m_stream );
+ m_sock->Send( &welcome, sizeof( welcome ) );
+
+ m_threadCtx = 0;
+ m_refTimeSerial = 0;
+ m_refTimeCtx = 0;
+ m_refTimeGpu = 0;
+
+#ifdef TRACY_ON_DEMAND
+ OnDemandPayloadMessage onDemand;
+ onDemand.frames = m_frameCount.load( std::memory_order_relaxed );
+ onDemand.currentTime = currentTime;
+
+ m_sock->Send( &onDemand, sizeof( onDemand ) );
+
+ m_deferredLock.lock();
+ for( auto& item : m_deferredQueue )
+ {
+ uint64_t ptr;
+ uint16_t size;
+ const auto idx = MemRead<uint8_t>( &item.hdr.idx );
+ switch( (QueueType)idx )
+ {
+ case QueueType::MessageAppInfo:
+ ptr = MemRead<uint64_t>( &item.messageFat.text );
+ size = MemRead<uint16_t>( &item.messageFat.size );
+ SendSingleString( (const char*)ptr, size );
+ break;
+ case QueueType::LockName:
+ ptr = MemRead<uint64_t>( &item.lockNameFat.name );
+ size = MemRead<uint16_t>( &item.lockNameFat.size );
+ SendSingleString( (const char*)ptr, size );
+ break;
+ case QueueType::GpuContextName:
+ ptr = MemRead<uint64_t>( &item.gpuContextNameFat.ptr );
+ size = MemRead<uint16_t>( &item.gpuContextNameFat.size );
+ SendSingleString( (const char*)ptr, size );
+ break;
+ default:
+ break;
+ }
+ AppendData( &item, QueueDataSize[idx] );
+ }
+ m_deferredLock.unlock();
+#endif
+
+ // Main communications loop
+ int keepAlive = 0;
+ for(;;)
+ {
+ ProcessSysTime();
+ const auto status = Dequeue( token );
+ const auto serialStatus = DequeueSerial();
+ if( status == DequeueStatus::ConnectionLost || serialStatus == DequeueStatus::ConnectionLost )
+ {
+ break;
+ }
+ else if( status == DequeueStatus::QueueEmpty && serialStatus == DequeueStatus::QueueEmpty )
+ {
+ if( ShouldExit() ) break;
+ if( m_bufferOffset != m_bufferStart )
+ {
+ if( !CommitData() ) break;
+ }
+ if( keepAlive == 500 )
+ {
+ QueueItem ka;
+ ka.hdr.type = QueueType::KeepAlive;
+ AppendData( &ka, QueueDataSize[ka.hdr.idx] );
+ if( !CommitData() ) break;
+
+ keepAlive = 0;
+ }
+ else if( !m_sock->HasData() )
+ {
+ keepAlive++;
+ std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+ }
+ }
+ else
+ {
+ keepAlive = 0;
+ }
+
+ bool connActive = true;
+ while( m_sock->HasData() )
+ {
+ connActive = HandleServerQuery();
+ if( !connActive ) break;
+ }
+ if( !connActive ) break;
+ }
+ if( ShouldExit() ) break;
+
+ m_isConnected.store( false, std::memory_order_release );
+#ifdef TRACY_ON_DEMAND
+ m_bufferOffset = 0;
+ m_bufferStart = 0;
+#endif
+
+ m_sock->~Socket();
+ tracy_free( m_sock );
+ m_sock = nullptr;
+
+#ifndef TRACY_ON_DEMAND
+ // Client is no longer available here. Accept incoming connections, but reject handshake.
+ for(;;)
+ {
+ if( ShouldExit() )
+ {
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+
+ ClearQueues( token );
+
+ m_sock = listen.Accept();
+ if( m_sock )
+ {
+ char shibboleth[HandshakeShibbolethSize];
+ auto res = m_sock->ReadRaw( shibboleth, HandshakeShibbolethSize, 1000 );
+ if( !res || memcmp( shibboleth, HandshakeShibboleth, HandshakeShibbolethSize ) != 0 )
+ {
+ m_sock->~Socket();
+ tracy_free( m_sock );
+ m_sock = nullptr;
+ continue;
+ }
+
+ uint32_t protocolVersion;
+ res = m_sock->ReadRaw( &protocolVersion, sizeof( protocolVersion ), 1000 );
+ if( !res )
+ {
+ m_sock->~Socket();
+ tracy_free( m_sock );
+ m_sock = nullptr;
+ continue;
+ }
+
+ HandshakeStatus status = HandshakeNotAvailable;
+ m_sock->Send( &status, sizeof( status ) );
+ m_sock->~Socket();
+ tracy_free( m_sock );
+ }
+ }
+#endif
+ }
+ // End of connections loop
+
+ // Wait for symbols thread to terminate. Symbol resolution will continue in this thread.
+#ifdef TRACY_HAS_CALLSTACK
+ while( s_symbolThreadGone.load() == false ) { YieldThread(); }
+#endif
+
+ // Client is exiting. Send items remaining in queues.
+ for(;;)
+ {
+ const auto status = Dequeue( token );
+ const auto serialStatus = DequeueSerial();
+ if( status == DequeueStatus::ConnectionLost || serialStatus == DequeueStatus::ConnectionLost )
+ {
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+ else if( status == DequeueStatus::QueueEmpty && serialStatus == DequeueStatus::QueueEmpty )
+ {
+ if( m_bufferOffset != m_bufferStart ) CommitData();
+ break;
+ }
+
+ while( m_sock->HasData() )
+ {
+ if( !HandleServerQuery() )
+ {
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+ }
+
+#ifdef TRACY_HAS_CALLSTACK
+ for(;;)
+ {
+ auto si = m_symbolQueue.front();
+ if( !si ) break;
+ HandleSymbolQueueItem( *si );
+ m_symbolQueue.pop();
+ }
+#endif
+ }
+
+ // Send client termination notice to the server
+ QueueItem terminate;
+ MemWrite( &terminate.hdr.type, QueueType::Terminate );
+ if( !SendData( (const char*)&terminate, 1 ) )
+ {
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+ // Handle remaining server queries
+ for(;;)
+ {
+ while( m_sock->HasData() )
+ {
+ if( !HandleServerQuery() )
+ {
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+ }
+#ifdef TRACY_HAS_CALLSTACK
+ for(;;)
+ {
+ auto si = m_symbolQueue.front();
+ if( !si ) break;
+ HandleSymbolQueueItem( *si );
+ m_symbolQueue.pop();
+ }
+#endif
+ const auto status = Dequeue( token );
+ const auto serialStatus = DequeueSerial();
+ if( status == DequeueStatus::ConnectionLost || serialStatus == DequeueStatus::ConnectionLost )
+ {
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+ if( m_bufferOffset != m_bufferStart )
+ {
+ if( !CommitData() )
+ {
+ m_shutdownFinished.store( true, std::memory_order_relaxed );
+ return;
+ }
+ }
+ }
+}
+
+#ifndef TRACY_NO_FRAME_IMAGE
+void Profiler::CompressWorker()
+{
+ ThreadExitHandler threadExitHandler;
+ SetThreadName( "Tracy DXT1" );
+ while( m_timeBegin.load( std::memory_order_relaxed ) == 0 ) std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+ rpmalloc_thread_initialize();
+
+ for(;;)
+ {
+ const auto shouldExit = ShouldExit();
+
+ {
+ bool lockHeld = true;
+ while( !m_fiLock.try_lock() )
+ {
+ if( m_shutdownManual.load( std::memory_order_relaxed ) )
+ {
+ lockHeld = false;
+ break;
+ }
+ }
+ if( !m_fiQueue.empty() ) m_fiQueue.swap( m_fiDequeue );
+ if( lockHeld )
+ {
+ m_fiLock.unlock();
+ }
+ }
+
+ const auto sz = m_fiDequeue.size();
+ if( sz > 0 )
+ {
+ auto fi = m_fiDequeue.data();
+ auto end = fi + sz;
+ while( fi != end )
+ {
+ const auto w = fi->w;
+ const auto h = fi->h;
+ const auto csz = size_t( w * h / 2 );
+ auto etc1buf = (char*)tracy_malloc( csz );
+ CompressImageDxt1( (const char*)fi->image, etc1buf, w, h );
+ tracy_free( fi->image );
+
+ TracyLfqPrepare( QueueType::FrameImage );
+ MemWrite( &item->frameImageFat.image, (uint64_t)etc1buf );
+ MemWrite( &item->frameImageFat.frame, fi->frame );
+ MemWrite( &item->frameImageFat.w, w );
+ MemWrite( &item->frameImageFat.h, h );
+ uint8_t flip = fi->flip;
+ MemWrite( &item->frameImageFat.flip, flip );
+ TracyLfqCommit;
+
+ fi++;
+ }
+ m_fiDequeue.clear();
+ }
+ else
+ {
+ std::this_thread::sleep_for( std::chrono::milliseconds( 20 ) );
+ }
+
+ if( shouldExit )
+ {
+ return;
+ }
+ }
+}
+#endif
+
+static void FreeAssociatedMemory( const QueueItem& item )
+{
+ if( item.hdr.idx >= (int)QueueType::Terminate ) return;
+
+ uint64_t ptr;
+ switch( item.hdr.type )
+ {
+ case QueueType::ZoneText:
+ case QueueType::ZoneName:
+ ptr = MemRead<uint64_t>( &item.zoneTextFat.text );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::MessageColor:
+ case QueueType::MessageColorCallstack:
+ ptr = MemRead<uint64_t>( &item.messageColorFat.text );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::Message:
+ case QueueType::MessageCallstack:
+#ifndef TRACY_ON_DEMAND
+ case QueueType::MessageAppInfo:
+#endif
+ ptr = MemRead<uint64_t>( &item.messageFat.text );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::ZoneBeginAllocSrcLoc:
+ case QueueType::ZoneBeginAllocSrcLocCallstack:
+ ptr = MemRead<uint64_t>( &item.zoneBegin.srcloc );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::GpuZoneBeginAllocSrcLoc:
+ case QueueType::GpuZoneBeginAllocSrcLocCallstack:
+ case QueueType::GpuZoneBeginAllocSrcLocSerial:
+ case QueueType::GpuZoneBeginAllocSrcLocCallstackSerial:
+ ptr = MemRead<uint64_t>( &item.gpuZoneBegin.srcloc );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::CallstackSerial:
+ case QueueType::Callstack:
+ ptr = MemRead<uint64_t>( &item.callstackFat.ptr );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::CallstackAlloc:
+ ptr = MemRead<uint64_t>( &item.callstackAllocFat.nativePtr );
+ tracy_free( (void*)ptr );
+ ptr = MemRead<uint64_t>( &item.callstackAllocFat.ptr );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::CallstackSample:
+ case QueueType::CallstackSampleContextSwitch:
+ ptr = MemRead<uint64_t>( &item.callstackSampleFat.ptr );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::FrameImage:
+ ptr = MemRead<uint64_t>( &item.frameImageFat.image );
+ tracy_free( (void*)ptr );
+ break;
+#ifdef TRACY_HAS_CALLSTACK
+ case QueueType::CallstackFrameSize:
+ {
+ InitRpmalloc();
+ auto size = MemRead<uint8_t>( &item.callstackFrameSizeFat.size );
+ auto data = (const CallstackEntry*)MemRead<uint64_t>( &item.callstackFrameSizeFat.data );
+ for( uint8_t i=0; i<size; i++ )
+ {
+ const auto& frame = data[i];
+ tracy_free_fast( (void*)frame.name );
+ tracy_free_fast( (void*)frame.file );
+ }
+ tracy_free_fast( (void*)data );
+ break;
+ }
+ case QueueType::SymbolInformation:
+ {
+ uint8_t needFree = MemRead<uint8_t>( &item.symbolInformationFat.needFree );
+ if( needFree )
+ {
+ ptr = MemRead<uint64_t>( &item.symbolInformationFat.fileString );
+ tracy_free( (void*)ptr );
+ }
+ break;
+ }
+ case QueueType::CodeInformation:
+ {
+ uint8_t needFree = MemRead<uint8_t>( &item.codeInformationFat.needFree );
+ if( needFree )
+ {
+ ptr = MemRead<uint64_t>( &item.codeInformationFat.fileString );
+ tracy_free( (void*)ptr );
+ }
+ break;
+ }
+ case QueueType::SymbolCodeMetadata:
+ ptr = MemRead<uint64_t>( &item.symbolCodeMetadata.ptr );
+ tracy_free( (void*)ptr );
+ break;
+#endif
+#ifndef TRACY_ON_DEMAND
+ case QueueType::LockName:
+ ptr = MemRead<uint64_t>( &item.lockNameFat.name );
+ tracy_free( (void*)ptr );
+ break;
+ case QueueType::GpuContextName:
+ ptr = MemRead<uint64_t>( &item.gpuContextNameFat.ptr );
+ tracy_free( (void*)ptr );
+ break;
+#endif
+#ifdef TRACY_ON_DEMAND
+ case QueueType::MessageAppInfo:
+ case QueueType::GpuContextName:
+ // Don't free memory associated with deferred messages.
+ break;
+#endif
+#ifdef TRACY_HAS_SYSTEM_TRACING
+ case QueueType::ExternalNameMetadata:
+ ptr = MemRead<uint64_t>( &item.externalNameMetadata.name );
+ tracy_free( (void*)ptr );
+ ptr = MemRead<uint64_t>( &item.externalNameMetadata.threadName );
+ tracy_free_fast( (void*)ptr );
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
+void Profiler::ClearQueues( moodycamel::ConsumerToken& token )
+{
+ for(;;)
+ {
+ const auto sz = GetQueue().try_dequeue_bulk_single( token, [](const uint64_t&){}, []( QueueItem* item, size_t sz ) { assert( sz > 0 ); while( sz-- > 0 ) FreeAssociatedMemory( *item++ ); } );
+ if( sz == 0 ) break;
+ }
+
+ ClearSerial();
+}
+
+void Profiler::ClearSerial()
+{
+ bool lockHeld = true;
+ while( !m_serialLock.try_lock() )
+ {
+ if( m_shutdownManual.load( std::memory_order_relaxed ) )
+ {
+ lockHeld = false;
+ break;
+ }
+ }
+ for( auto& v : m_serialQueue ) FreeAssociatedMemory( v );
+ m_serialQueue.clear();
+ if( lockHeld )
+ {
+ m_serialLock.unlock();
+ }
+
+ for( auto& v : m_serialDequeue ) FreeAssociatedMemory( v );
+ m_serialDequeue.clear();
+}
+
+Profiler::DequeueStatus Profiler::Dequeue( moodycamel::ConsumerToken& token )
+{
+ bool connectionLost = false;
+ const auto sz = GetQueue().try_dequeue_bulk_single( token,
+ [this, &connectionLost] ( const uint32_t& threadId )
+ {
+ if( ThreadCtxCheck( threadId ) == ThreadCtxStatus::ConnectionLost ) connectionLost = true;
+ },
+ [this, &connectionLost] ( QueueItem* item, size_t sz )
+ {
+ if( connectionLost ) return;
+ InitRpmalloc();
+ assert( sz > 0 );
+ int64_t refThread = m_refTimeThread;
+ int64_t refCtx = m_refTimeCtx;
+ int64_t refGpu = m_refTimeGpu;
+ while( sz-- > 0 )
+ {
+ uint64_t ptr;
+ uint16_t size;
+ auto idx = MemRead<uint8_t>( &item->hdr.idx );
+ if( idx < (int)QueueType::Terminate )
+ {
+ switch( (QueueType)idx )
+ {
+ case QueueType::ZoneText:
+ case QueueType::ZoneName:
+ ptr = MemRead<uint64_t>( &item->zoneTextFat.text );
+ size = MemRead<uint16_t>( &item->zoneTextFat.size );
+ SendSingleString( (const char*)ptr, size );
+ tracy_free_fast( (void*)ptr );
+ break;
+ case QueueType::Message:
+ case QueueType::MessageCallstack:
+ ptr = MemRead<uint64_t>( &item->messageFat.text );
+ size = MemRead<uint16_t>( &item->messageFat.size );
+ SendSingleString( (const char*)ptr, size );
+ tracy_free_fast( (void*)ptr );
+ break;
+ case QueueType::MessageColor:
+ case QueueType::MessageColorCallstack:
+ ptr = MemRead<uint64_t>( &item->messageColorFat.text );
+ size = MemRead<uint16_t>( &item->messageColorFat.size );
+ SendSingleString( (const char*)ptr, size );
+ tracy_free_fast( (void*)ptr );
+ break;
+ case QueueType::MessageAppInfo:
+ ptr = MemRead<uint64_t>( &item->messageFat.text );
+ size = MemRead<uint16_t>( &item->messageFat.size );
+ SendSingleString( (const char*)ptr, size );
+#ifndef TRACY_ON_DEMAND
+ tracy_free_fast( (void*)ptr );
+#endif
+ break;
+ case QueueType::ZoneBeginAllocSrcLoc:
+ case QueueType::ZoneBeginAllocSrcLocCallstack:
+ {
+ int64_t t = MemRead<int64_t>( &item->zoneBegin.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->zoneBegin.time, dt );
+ ptr = MemRead<uint64_t>( &item->zoneBegin.srcloc );
+ SendSourceLocationPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::Callstack:
+ ptr = MemRead<uint64_t>( &item->callstackFat.ptr );
+ SendCallstackPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ case QueueType::CallstackAlloc:
+ ptr = MemRead<uint64_t>( &item->callstackAllocFat.nativePtr );
+ if( ptr != 0 )
+ {
+ CutCallstack( (void*)ptr, "lua_pcall" );
+ SendCallstackPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ }
+ ptr = MemRead<uint64_t>( &item->callstackAllocFat.ptr );
+ SendCallstackAlloc( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ case QueueType::CallstackSample:
+ case QueueType::CallstackSampleContextSwitch:
+ {
+ ptr = MemRead<uint64_t>( &item->callstackSampleFat.ptr );
+ SendCallstackPayload64( ptr );
+ tracy_free_fast( (void*)ptr );
+ int64_t t = MemRead<int64_t>( &item->callstackSampleFat.time );
+ int64_t dt = t - refCtx;
+ refCtx = t;
+ MemWrite( &item->callstackSampleFat.time, dt );
+ break;
+ }
+ case QueueType::FrameImage:
+ {
+ ptr = MemRead<uint64_t>( &item->frameImageFat.image );
+ const auto w = MemRead<uint16_t>( &item->frameImageFat.w );
+ const auto h = MemRead<uint16_t>( &item->frameImageFat.h );
+ const auto csz = size_t( w * h / 2 );
+ SendLongString( ptr, (const char*)ptr, csz, QueueType::FrameImageData );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::ZoneBegin:
+ case QueueType::ZoneBeginCallstack:
+ {
+ int64_t t = MemRead<int64_t>( &item->zoneBegin.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->zoneBegin.time, dt );
+ break;
+ }
+ case QueueType::ZoneEnd:
+ {
+ int64_t t = MemRead<int64_t>( &item->zoneEnd.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->zoneEnd.time, dt );
+ break;
+ }
+ case QueueType::GpuZoneBegin:
+ case QueueType::GpuZoneBeginCallstack:
+ {
+ int64_t t = MemRead<int64_t>( &item->gpuZoneBegin.cpuTime );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->gpuZoneBegin.cpuTime, dt );
+ break;
+ }
+ case QueueType::GpuZoneBeginAllocSrcLoc:
+ case QueueType::GpuZoneBeginAllocSrcLocCallstack:
+ {
+ int64_t t = MemRead<int64_t>( &item->gpuZoneBegin.cpuTime );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->gpuZoneBegin.cpuTime, dt );
+ ptr = MemRead<uint64_t>( &item->gpuZoneBegin.srcloc );
+ SendSourceLocationPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::GpuZoneEnd:
+ {
+ int64_t t = MemRead<int64_t>( &item->gpuZoneEnd.cpuTime );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->gpuZoneEnd.cpuTime, dt );
+ break;
+ }
+ case QueueType::GpuContextName:
+ ptr = MemRead<uint64_t>( &item->gpuContextNameFat.ptr );
+ size = MemRead<uint16_t>( &item->gpuContextNameFat.size );
+ SendSingleString( (const char*)ptr, size );
+#ifndef TRACY_ON_DEMAND
+ tracy_free_fast( (void*)ptr );
+#endif
+ break;
+ case QueueType::PlotData:
+ {
+ int64_t t = MemRead<int64_t>( &item->plotData.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->plotData.time, dt );
+ break;
+ }
+ case QueueType::ContextSwitch:
+ {
+ int64_t t = MemRead<int64_t>( &item->contextSwitch.time );
+ int64_t dt = t - refCtx;
+ refCtx = t;
+ MemWrite( &item->contextSwitch.time, dt );
+ break;
+ }
+ case QueueType::ThreadWakeup:
+ {
+ int64_t t = MemRead<int64_t>( &item->threadWakeup.time );
+ int64_t dt = t - refCtx;
+ refCtx = t;
+ MemWrite( &item->threadWakeup.time, dt );
+ break;
+ }
+ case QueueType::GpuTime:
+ {
+ int64_t t = MemRead<int64_t>( &item->gpuTime.gpuTime );
+ int64_t dt = t - refGpu;
+ refGpu = t;
+ MemWrite( &item->gpuTime.gpuTime, dt );
+ break;
+ }
+#ifdef TRACY_HAS_CALLSTACK
+ case QueueType::CallstackFrameSize:
+ {
+ auto data = (const CallstackEntry*)MemRead<uint64_t>( &item->callstackFrameSizeFat.data );
+ auto datasz = MemRead<uint8_t>( &item->callstackFrameSizeFat.size );
+ auto imageName = (const char*)MemRead<uint64_t>( &item->callstackFrameSizeFat.imageName );
+ SendSingleString( imageName );
+ AppendData( item++, QueueDataSize[idx] );
+
+ for( uint8_t i=0; i<datasz; i++ )
+ {
+ const auto& frame = data[i];
+
+ SendSingleString( frame.name );
+ SendSecondString( frame.file );
+
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::CallstackFrame );
+ MemWrite( &item.callstackFrame.line, frame.line );
+ MemWrite( &item.callstackFrame.symAddr, frame.symAddr );
+ MemWrite( &item.callstackFrame.symLen, frame.symLen );
+
+ AppendData( &item, QueueDataSize[(int)QueueType::CallstackFrame] );
+
+ tracy_free_fast( (void*)frame.name );
+ tracy_free_fast( (void*)frame.file );
+ }
+ tracy_free_fast( (void*)data );
+ continue;
+ }
+ case QueueType::SymbolInformation:
+ {
+ auto fileString = (const char*)MemRead<uint64_t>( &item->symbolInformationFat.fileString );
+ auto needFree = MemRead<uint8_t>( &item->symbolInformationFat.needFree );
+ SendSingleString( fileString );
+ if( needFree ) tracy_free_fast( (void*)fileString );
+ break;
+ }
+ case QueueType::CodeInformation:
+ {
+ auto fileString = (const char*)MemRead<uint64_t>( &item->codeInformationFat.fileString );
+ auto needFree = MemRead<uint8_t>( &item->codeInformationFat.needFree );
+ SendSingleString( fileString );
+ if( needFree ) tracy_free_fast( (void*)fileString );
+ break;
+ }
+ case QueueType::SymbolCodeMetadata:
+ {
+ auto symbol = MemRead<uint64_t>( &item->symbolCodeMetadata.symbol );
+ auto ptr = (const char*)MemRead<uint64_t>( &item->symbolCodeMetadata.ptr );
+ auto size = MemRead<uint32_t>( &item->symbolCodeMetadata.size );
+ SendLongString( symbol, ptr, size, QueueType::SymbolCode );
+ tracy_free_fast( (void*)ptr );
+ ++item;
+ continue;
+ }
+#endif
+#ifdef TRACY_HAS_SYSTEM_TRACING
+ case QueueType::ExternalNameMetadata:
+ {
+ auto thread = MemRead<uint64_t>( &item->externalNameMetadata.thread );
+ auto name = (const char*)MemRead<uint64_t>( &item->externalNameMetadata.name );
+ auto threadName = (const char*)MemRead<uint64_t>( &item->externalNameMetadata.threadName );
+ SendString( thread, threadName, QueueType::ExternalThreadName );
+ SendString( thread, name, QueueType::ExternalName );
+ tracy_free_fast( (void*)threadName );
+ tracy_free_fast( (void*)name );
+ ++item;
+ continue;
+ }
+#endif
+ default:
+ assert( false );
+ break;
+ }
+ }
+ if( !AppendData( item++, QueueDataSize[idx] ) )
+ {
+ connectionLost = true;
+ m_refTimeThread = refThread;
+ m_refTimeCtx = refCtx;
+ m_refTimeGpu = refGpu;
+ return;
+ }
+ }
+ m_refTimeThread = refThread;
+ m_refTimeCtx = refCtx;
+ m_refTimeGpu = refGpu;
+ }
+ );
+ if( connectionLost ) return DequeueStatus::ConnectionLost;
+ return sz > 0 ? DequeueStatus::DataDequeued : DequeueStatus::QueueEmpty;
+}
+
+Profiler::DequeueStatus Profiler::DequeueContextSwitches( tracy::moodycamel::ConsumerToken& token, int64_t& timeStop )
+{
+ const auto sz = GetQueue().try_dequeue_bulk_single( token, [] ( const uint64_t& ) {},
+ [this, &timeStop] ( QueueItem* item, size_t sz )
+ {
+ assert( sz > 0 );
+ int64_t refCtx = m_refTimeCtx;
+ while( sz-- > 0 )
+ {
+ FreeAssociatedMemory( *item );
+ if( timeStop < 0 ) return;
+ const auto idx = MemRead<uint8_t>( &item->hdr.idx );
+ if( idx == (uint8_t)QueueType::ContextSwitch )
+ {
+ const auto csTime = MemRead<int64_t>( &item->contextSwitch.time );
+ if( csTime > timeStop )
+ {
+ timeStop = -1;
+ m_refTimeCtx = refCtx;
+ return;
+ }
+ int64_t dt = csTime - refCtx;
+ refCtx = csTime;
+ MemWrite( &item->contextSwitch.time, dt );
+ if( !AppendData( item, QueueDataSize[(int)QueueType::ContextSwitch] ) )
+ {
+ timeStop = -2;
+ m_refTimeCtx = refCtx;
+ return;
+ }
+ }
+ else if( idx == (uint8_t)QueueType::ThreadWakeup )
+ {
+ const auto csTime = MemRead<int64_t>( &item->threadWakeup.time );
+ if( csTime > timeStop )
+ {
+ timeStop = -1;
+ m_refTimeCtx = refCtx;
+ return;
+ }
+ int64_t dt = csTime - refCtx;
+ refCtx = csTime;
+ MemWrite( &item->threadWakeup.time, dt );
+ if( !AppendData( item, QueueDataSize[(int)QueueType::ThreadWakeup] ) )
+ {
+ timeStop = -2;
+ m_refTimeCtx = refCtx;
+ return;
+ }
+ }
+ item++;
+ }
+ m_refTimeCtx = refCtx;
+ }
+ );
+
+ if( timeStop == -2 ) return DequeueStatus::ConnectionLost;
+ return ( timeStop == -1 || sz > 0 ) ? DequeueStatus::DataDequeued : DequeueStatus::QueueEmpty;
+}
+
+#define ThreadCtxCheckSerial( _name ) \
+ uint32_t thread = MemRead<uint32_t>( &item->_name.thread ); \
+ switch( ThreadCtxCheck( thread ) ) \
+ { \
+ case ThreadCtxStatus::Same: break; \
+ case ThreadCtxStatus::Changed: assert( m_refTimeThread == 0 ); refThread = 0; break; \
+ case ThreadCtxStatus::ConnectionLost: return DequeueStatus::ConnectionLost; \
+ default: assert( false ); break; \
+ }
+
+Profiler::DequeueStatus Profiler::DequeueSerial()
+{
+ {
+ bool lockHeld = true;
+ while( !m_serialLock.try_lock() )
+ {
+ if( m_shutdownManual.load( std::memory_order_relaxed ) )
+ {
+ lockHeld = false;
+ break;
+ }
+ }
+ if( !m_serialQueue.empty() ) m_serialQueue.swap( m_serialDequeue );
+ if( lockHeld )
+ {
+ m_serialLock.unlock();
+ }
+ }
+
+ const auto sz = m_serialDequeue.size();
+ if( sz > 0 )
+ {
+ InitRpmalloc();
+ int64_t refSerial = m_refTimeSerial;
+ int64_t refGpu = m_refTimeGpu;
+#ifdef TRACY_FIBERS
+ int64_t refThread = m_refTimeThread;
+#endif
+ auto item = m_serialDequeue.data();
+ auto end = item + sz;
+ while( item != end )
+ {
+ uint64_t ptr;
+ auto idx = MemRead<uint8_t>( &item->hdr.idx );
+ if( idx < (int)QueueType::Terminate )
+ {
+ switch( (QueueType)idx )
+ {
+ case QueueType::CallstackSerial:
+ ptr = MemRead<uint64_t>( &item->callstackFat.ptr );
+ SendCallstackPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ case QueueType::LockWait:
+ case QueueType::LockSharedWait:
+ {
+ int64_t t = MemRead<int64_t>( &item->lockWait.time );
+ int64_t dt = t - refSerial;
+ refSerial = t;
+ MemWrite( &item->lockWait.time, dt );
+ break;
+ }
+ case QueueType::LockObtain:
+ case QueueType::LockSharedObtain:
+ {
+ int64_t t = MemRead<int64_t>( &item->lockObtain.time );
+ int64_t dt = t - refSerial;
+ refSerial = t;
+ MemWrite( &item->lockObtain.time, dt );
+ break;
+ }
+ case QueueType::LockRelease:
+ case QueueType::LockSharedRelease:
+ {
+ int64_t t = MemRead<int64_t>( &item->lockRelease.time );
+ int64_t dt = t - refSerial;
+ refSerial = t;
+ MemWrite( &item->lockRelease.time, dt );
+ break;
+ }
+ case QueueType::LockName:
+ {
+ ptr = MemRead<uint64_t>( &item->lockNameFat.name );
+ uint16_t size = MemRead<uint16_t>( &item->lockNameFat.size );
+ SendSingleString( (const char*)ptr, size );
+#ifndef TRACY_ON_DEMAND
+ tracy_free_fast( (void*)ptr );
+#endif
+ break;
+ }
+ case QueueType::MemAlloc:
+ case QueueType::MemAllocNamed:
+ case QueueType::MemAllocCallstack:
+ case QueueType::MemAllocCallstackNamed:
+ {
+ int64_t t = MemRead<int64_t>( &item->memAlloc.time );
+ int64_t dt = t - refSerial;
+ refSerial = t;
+ MemWrite( &item->memAlloc.time, dt );
+ break;
+ }
+ case QueueType::MemFree:
+ case QueueType::MemFreeNamed:
+ case QueueType::MemFreeCallstack:
+ case QueueType::MemFreeCallstackNamed:
+ {
+ int64_t t = MemRead<int64_t>( &item->memFree.time );
+ int64_t dt = t - refSerial;
+ refSerial = t;
+ MemWrite( &item->memFree.time, dt );
+ break;
+ }
+ case QueueType::GpuZoneBeginSerial:
+ case QueueType::GpuZoneBeginCallstackSerial:
+ {
+ int64_t t = MemRead<int64_t>( &item->gpuZoneBegin.cpuTime );
+ int64_t dt = t - refSerial;
+ refSerial = t;
+ MemWrite( &item->gpuZoneBegin.cpuTime, dt );
+ break;
+ }
+ case QueueType::GpuZoneBeginAllocSrcLocSerial:
+ case QueueType::GpuZoneBeginAllocSrcLocCallstackSerial:
+ {
+ int64_t t = MemRead<int64_t>( &item->gpuZoneBegin.cpuTime );
+ int64_t dt = t - refSerial;
+ refSerial = t;
+ MemWrite( &item->gpuZoneBegin.cpuTime, dt );
+ ptr = MemRead<uint64_t>( &item->gpuZoneBegin.srcloc );
+ SendSourceLocationPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::GpuZoneEndSerial:
+ {
+ int64_t t = MemRead<int64_t>( &item->gpuZoneEnd.cpuTime );
+ int64_t dt = t - refSerial;
+ refSerial = t;
+ MemWrite( &item->gpuZoneEnd.cpuTime, dt );
+ break;
+ }
+ case QueueType::GpuTime:
+ {
+ int64_t t = MemRead<int64_t>( &item->gpuTime.gpuTime );
+ int64_t dt = t - refGpu;
+ refGpu = t;
+ MemWrite( &item->gpuTime.gpuTime, dt );
+ break;
+ }
+ case QueueType::GpuContextName:
+ {
+ ptr = MemRead<uint64_t>( &item->gpuContextNameFat.ptr );
+ uint16_t size = MemRead<uint16_t>( &item->gpuContextNameFat.size );
+ SendSingleString( (const char*)ptr, size );
+#ifndef TRACY_ON_DEMAND
+ tracy_free_fast( (void*)ptr );
+#endif
+ break;
+ }
+#ifdef TRACY_FIBERS
+ case QueueType::ZoneBegin:
+ case QueueType::ZoneBeginCallstack:
+ {
+ ThreadCtxCheckSerial( zoneBeginThread );
+ int64_t t = MemRead<int64_t>( &item->zoneBegin.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->zoneBegin.time, dt );
+ break;
+ }
+ case QueueType::ZoneBeginAllocSrcLoc:
+ case QueueType::ZoneBeginAllocSrcLocCallstack:
+ {
+ ThreadCtxCheckSerial( zoneBeginThread );
+ int64_t t = MemRead<int64_t>( &item->zoneBegin.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->zoneBegin.time, dt );
+ ptr = MemRead<uint64_t>( &item->zoneBegin.srcloc );
+ SendSourceLocationPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::ZoneEnd:
+ {
+ ThreadCtxCheckSerial( zoneEndThread );
+ int64_t t = MemRead<int64_t>( &item->zoneEnd.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->zoneEnd.time, dt );
+ break;
+ }
+ case QueueType::ZoneText:
+ case QueueType::ZoneName:
+ {
+ ThreadCtxCheckSerial( zoneTextFatThread );
+ ptr = MemRead<uint64_t>( &item->zoneTextFat.text );
+ uint16_t size = MemRead<uint16_t>( &item->zoneTextFat.size );
+ SendSingleString( (const char*)ptr, size );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::Message:
+ case QueueType::MessageCallstack:
+ {
+ ThreadCtxCheckSerial( messageFatThread );
+ ptr = MemRead<uint64_t>( &item->messageFat.text );
+ uint16_t size = MemRead<uint16_t>( &item->messageFat.size );
+ SendSingleString( (const char*)ptr, size );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::MessageColor:
+ case QueueType::MessageColorCallstack:
+ {
+ ThreadCtxCheckSerial( messageColorFatThread );
+ ptr = MemRead<uint64_t>( &item->messageColorFat.text );
+ uint16_t size = MemRead<uint16_t>( &item->messageColorFat.size );
+ SendSingleString( (const char*)ptr, size );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::Callstack:
+ {
+ ThreadCtxCheckSerial( callstackFatThread );
+ ptr = MemRead<uint64_t>( &item->callstackFat.ptr );
+ SendCallstackPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::CallstackAlloc:
+ {
+ ThreadCtxCheckSerial( callstackAllocFatThread );
+ ptr = MemRead<uint64_t>( &item->callstackAllocFat.nativePtr );
+ if( ptr != 0 )
+ {
+ CutCallstack( (void*)ptr, "lua_pcall" );
+ SendCallstackPayload( ptr );
+ tracy_free_fast( (void*)ptr );
+ }
+ ptr = MemRead<uint64_t>( &item->callstackAllocFat.ptr );
+ SendCallstackAlloc( ptr );
+ tracy_free_fast( (void*)ptr );
+ break;
+ }
+ case QueueType::FiberEnter:
+ {
+ ThreadCtxCheckSerial( fiberEnter );
+ int64_t t = MemRead<int64_t>( &item->fiberEnter.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->fiberEnter.time, dt );
+ break;
+ }
+ case QueueType::FiberLeave:
+ {
+ ThreadCtxCheckSerial( fiberLeave );
+ int64_t t = MemRead<int64_t>( &item->fiberLeave.time );
+ int64_t dt = t - refThread;
+ refThread = t;
+ MemWrite( &item->fiberLeave.time, dt );
+ break;
+ }
+#endif
+ default:
+ assert( false );
+ break;
+ }
+ }
+#ifdef TRACY_FIBERS
+ else
+ {
+ switch( (QueueType)idx )
+ {
+ case QueueType::ZoneColor:
+ {
+ ThreadCtxCheckSerial( zoneColorThread );
+ break;
+ }
+ case QueueType::ZoneValue:
+ {
+ ThreadCtxCheckSerial( zoneValueThread );
+ break;
+ }
+ case QueueType::ZoneValidation:
+ {
+ ThreadCtxCheckSerial( zoneValidationThread );
+ break;
+ }
+ case QueueType::MessageLiteral:
+ case QueueType::MessageLiteralCallstack:
+ {
+ ThreadCtxCheckSerial( messageLiteralThread );
+ break;
+ }
+ case QueueType::MessageLiteralColor:
+ case QueueType::MessageLiteralColorCallstack:
+ {
+ ThreadCtxCheckSerial( messageColorLiteralThread );
+ break;
+ }
+ case QueueType::CrashReport:
+ {
+ ThreadCtxCheckSerial( crashReportThread );
+ break;
+ }
+ default:
+ break;
+ }
+ }
+#endif
+ if( !AppendData( item, QueueDataSize[idx] ) ) return DequeueStatus::ConnectionLost;
+ item++;
+ }
+ m_refTimeSerial = refSerial;
+ m_refTimeGpu = refGpu;
+#ifdef TRACY_FIBERS
+ m_refTimeThread = refThread;
+#endif
+ m_serialDequeue.clear();
+ }
+ else
+ {
+ return DequeueStatus::QueueEmpty;
+ }
+ return DequeueStatus::DataDequeued;
+}
+
+Profiler::ThreadCtxStatus Profiler::ThreadCtxCheck( uint32_t threadId )
+{
+ if( m_threadCtx == threadId ) return ThreadCtxStatus::Same;
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::ThreadContext );
+ MemWrite( &item.threadCtx.thread, threadId );
+ if( !AppendData( &item, QueueDataSize[(int)QueueType::ThreadContext] ) ) return ThreadCtxStatus::ConnectionLost;
+ m_threadCtx = threadId;
+ m_refTimeThread = 0;
+ return ThreadCtxStatus::Changed;
+}
+
+bool Profiler::CommitData()
+{
+ bool ret = SendData( m_buffer + m_bufferStart, m_bufferOffset - m_bufferStart );
+ if( m_bufferOffset > TargetFrameSize * 2 ) m_bufferOffset = 0;
+ m_bufferStart = m_bufferOffset;
+ return ret;
+}
+
+bool Profiler::SendData( const char* data, size_t len )
+{
+ const lz4sz_t lz4sz = LZ4_compress_fast_continue( (LZ4_stream_t*)m_stream, data, m_lz4Buf + sizeof( lz4sz_t ), (int)len, LZ4Size, 1 );
+ memcpy( m_lz4Buf, &lz4sz, sizeof( lz4sz ) );
+ return m_sock->Send( m_lz4Buf, lz4sz + sizeof( lz4sz_t ) ) != -1;
+}
+
+void Profiler::SendString( uint64_t str, const char* ptr, size_t len, QueueType type )
+{
+ assert( type == QueueType::StringData ||
+ type == QueueType::ThreadName ||
+ type == QueueType::PlotName ||
+ type == QueueType::FrameName ||
+ type == QueueType::ExternalName ||
+ type == QueueType::ExternalThreadName ||
+ type == QueueType::FiberName );
+
+ QueueItem item;
+ MemWrite( &item.hdr.type, type );
+ MemWrite( &item.stringTransfer.ptr, str );
+
+ assert( len <= std::numeric_limits<uint16_t>::max() );
+ auto l16 = uint16_t( len );
+
+ NeedDataSize( QueueDataSize[(int)type] + sizeof( l16 ) + l16 );
+
+ AppendDataUnsafe( &item, QueueDataSize[(int)type] );
+ AppendDataUnsafe( &l16, sizeof( l16 ) );
+ AppendDataUnsafe( ptr, l16 );
+}
+
+void Profiler::SendSingleString( const char* ptr, size_t len )
+{
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::SingleStringData );
+
+ assert( len <= std::numeric_limits<uint16_t>::max() );
+ auto l16 = uint16_t( len );
+
+ NeedDataSize( QueueDataSize[(int)QueueType::SingleStringData] + sizeof( l16 ) + l16 );
+
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::SingleStringData] );
+ AppendDataUnsafe( &l16, sizeof( l16 ) );
+ AppendDataUnsafe( ptr, l16 );
+}
+
+void Profiler::SendSecondString( const char* ptr, size_t len )
+{
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::SecondStringData );
+
+ assert( len <= std::numeric_limits<uint16_t>::max() );
+ auto l16 = uint16_t( len );
+
+ NeedDataSize( QueueDataSize[(int)QueueType::SecondStringData] + sizeof( l16 ) + l16 );
+
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::SecondStringData] );
+ AppendDataUnsafe( &l16, sizeof( l16 ) );
+ AppendDataUnsafe( ptr, l16 );
+}
+
+void Profiler::SendLongString( uint64_t str, const char* ptr, size_t len, QueueType type )
+{
+ assert( type == QueueType::FrameImageData ||
+ type == QueueType::SymbolCode ||
+ type == QueueType::SourceCode );
+
+ QueueItem item;
+ MemWrite( &item.hdr.type, type );
+ MemWrite( &item.stringTransfer.ptr, str );
+
+ assert( len <= std::numeric_limits<uint32_t>::max() );
+ assert( QueueDataSize[(int)type] + sizeof( uint32_t ) + len <= TargetFrameSize );
+ auto l32 = uint32_t( len );
+
+ NeedDataSize( QueueDataSize[(int)type] + sizeof( l32 ) + l32 );
+
+ AppendDataUnsafe( &item, QueueDataSize[(int)type] );
+ AppendDataUnsafe( &l32, sizeof( l32 ) );
+ AppendDataUnsafe( ptr, l32 );
+}
+
+void Profiler::SendSourceLocation( uint64_t ptr )
+{
+ auto srcloc = (const SourceLocationData*)ptr;
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::SourceLocation );
+ MemWrite( &item.srcloc.name, (uint64_t)srcloc->name );
+ MemWrite( &item.srcloc.file, (uint64_t)srcloc->file );
+ MemWrite( &item.srcloc.function, (uint64_t)srcloc->function );
+ MemWrite( &item.srcloc.line, srcloc->line );
+ MemWrite( &item.srcloc.r, uint8_t( ( srcloc->color ) & 0xFF ) );
+ MemWrite( &item.srcloc.g, uint8_t( ( srcloc->color >> 8 ) & 0xFF ) );
+ MemWrite( &item.srcloc.b, uint8_t( ( srcloc->color >> 16 ) & 0xFF ) );
+ AppendData( &item, QueueDataSize[(int)QueueType::SourceLocation] );
+}
+
+void Profiler::SendSourceLocationPayload( uint64_t _ptr )
+{
+ auto ptr = (const char*)_ptr;
+
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::SourceLocationPayload );
+ MemWrite( &item.stringTransfer.ptr, _ptr );
+
+ uint16_t len;
+ memcpy( &len, ptr, sizeof( len ) );
+ assert( len > 2 );
+ len -= 2;
+ ptr += 2;
+
+ NeedDataSize( QueueDataSize[(int)QueueType::SourceLocationPayload] + sizeof( len ) + len );
+
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::SourceLocationPayload] );
+ AppendDataUnsafe( &len, sizeof( len ) );
+ AppendDataUnsafe( ptr, len );
+}
+
+void Profiler::SendCallstackPayload( uint64_t _ptr )
+{
+ auto ptr = (uintptr_t*)_ptr;
+
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::CallstackPayload );
+ MemWrite( &item.stringTransfer.ptr, _ptr );
+
+ const auto sz = *ptr++;
+ const auto len = sz * sizeof( uint64_t );
+ const auto l16 = uint16_t( len );
+
+ NeedDataSize( QueueDataSize[(int)QueueType::CallstackPayload] + sizeof( l16 ) + l16 );
+
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::CallstackPayload] );
+ AppendDataUnsafe( &l16, sizeof( l16 ) );
+
+ if( compile_time_condition<sizeof( uintptr_t ) == sizeof( uint64_t )>::value )
+ {
+ AppendDataUnsafe( ptr, sizeof( uint64_t ) * sz );
+ }
+ else
+ {
+ for( uintptr_t i=0; i<sz; i++ )
+ {
+ const auto val = uint64_t( *ptr++ );
+ AppendDataUnsafe( &val, sizeof( uint64_t ) );
+ }
+ }
+}
+
+void Profiler::SendCallstackPayload64( uint64_t _ptr )
+{
+ auto ptr = (uint64_t*)_ptr;
+
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::CallstackPayload );
+ MemWrite( &item.stringTransfer.ptr, _ptr );
+
+ const auto sz = *ptr++;
+ const auto len = sz * sizeof( uint64_t );
+ const auto l16 = uint16_t( len );
+
+ NeedDataSize( QueueDataSize[(int)QueueType::CallstackPayload] + sizeof( l16 ) + l16 );
+
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::CallstackPayload] );
+ AppendDataUnsafe( &l16, sizeof( l16 ) );
+ AppendDataUnsafe( ptr, sizeof( uint64_t ) * sz );
+}
+
+void Profiler::SendCallstackAlloc( uint64_t _ptr )
+{
+ auto ptr = (const char*)_ptr;
+
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::CallstackAllocPayload );
+ MemWrite( &item.stringTransfer.ptr, _ptr );
+
+ uint16_t len;
+ memcpy( &len, ptr, 2 );
+ ptr += 2;
+
+ NeedDataSize( QueueDataSize[(int)QueueType::CallstackAllocPayload] + sizeof( len ) + len );
+
+ AppendDataUnsafe( &item, QueueDataSize[(int)QueueType::CallstackAllocPayload] );
+ AppendDataUnsafe( &len, sizeof( len ) );
+ AppendDataUnsafe( ptr, len );
+}
+
+void Profiler::QueueCallstackFrame( uint64_t ptr )
+{
+#ifdef TRACY_HAS_CALLSTACK
+ m_symbolQueue.emplace( SymbolQueueItem { SymbolQueueItemType::CallstackFrame, ptr } );
+#else
+ AckServerQuery();
+#endif
+}
+
+void Profiler::QueueSymbolQuery( uint64_t symbol )
+{
+#ifdef TRACY_HAS_CALLSTACK
+ // Special handling for kernel frames
+ if( symbol >> 63 != 0 )
+ {
+ SendSingleString( "<kernel>" );
+ QueueItem item;
+ MemWrite( &item.hdr.type, QueueType::SymbolInformation );
+ MemWrite( &item.symbolInformation.line, 0 );
+ MemWrite( &item.symbolInformation.symAddr, symbol );
+ AppendData( &item, QueueDataSize[(int)QueueType::SymbolInformation] );
+ }
+ else
+ {
+ m_symbolQueue.emplace( SymbolQueueItem { SymbolQueueItemType::SymbolQuery, symbol } );
+ }
+#else
+ AckServerQuery();
+#endif
+}
+
+void Profiler::QueueCodeLocation( uint64_t ptr )
+{
+#ifdef TRACY_HAS_CALLSTACK
+ m_symbolQueue.emplace( SymbolQueueItem { SymbolQueueItemType::CodeLocation, ptr } );
+#else
+ AckServerQuery();
+#endif
+}
+
+void Profiler::QueueExternalName( uint64_t ptr )
+{
+#ifdef TRACY_HAS_SYSTEM_TRACING
+ m_symbolQueue.emplace( SymbolQueueItem { SymbolQueueItemType::ExternalName, ptr } );
+#endif
+}
+
+void Profiler::QueueKernelCode( uint64_t symbol, uint32_t size )
+{
+ assert( symbol >> 63 != 0 );
+#ifdef TRACY_HAS_CALLSTACK
+ m_symbolQueue.emplace( SymbolQueueItem { SymbolQueueItemType::KernelCode, symbol, size } );
+#else
+ AckSymbolCodeNotAvailable();
+#endif
+}
+
+#ifdef TRACY_HAS_CALLSTACK
+void Profiler::HandleSymbolQueueItem( const SymbolQueueItem& si )
+{
+ switch( si.type )
+ {
+ case SymbolQueueItemType::CallstackFrame:
+ {
+ const auto frameData = DecodeCallstackPtr( si.ptr );
+ auto data = tracy_malloc_fast( sizeof( CallstackEntry ) * frameData.size );
+ memcpy( data, frameData.data, sizeof( CallstackEntry ) * frameData.size );
+ TracyLfqPrepare( QueueType::CallstackFrameSize );
+ MemWrite( &item->callstackFrameSizeFat.ptr, si.ptr );
+ MemWrite( &item->callstackFrameSizeFat.size, frameData.size );
+ MemWrite( &item->callstackFrameSizeFat.data, (uint64_t)data );
+ MemWrite( &item->callstackFrameSizeFat.imageName, (uint64_t)frameData.imageName );
+ TracyLfqCommit;
+ break;
+ }
+ case SymbolQueueItemType::SymbolQuery:
+ {
+#ifdef __ANDROID__
+ // On Android it's common for code to be in mappings that are only executable
+ // but not readable.
+ if( !EnsureReadable( si.ptr ) )
+ {
+ TracyLfqPrepare( QueueType::AckServerQueryNoop );
+ TracyLfqCommit;
+ break;
+ }
+#endif
+ const auto sym = DecodeSymbolAddress( si.ptr );
+ TracyLfqPrepare( QueueType::SymbolInformation );
+ MemWrite( &item->symbolInformationFat.line, sym.line );
+ MemWrite( &item->symbolInformationFat.symAddr, si.ptr );
+ MemWrite( &item->symbolInformationFat.fileString, (uint64_t)sym.file );
+ MemWrite( &item->symbolInformationFat.needFree, (uint8_t)sym.needFree );
+ TracyLfqCommit;
+ break;
+ }
+ case SymbolQueueItemType::CodeLocation:
+ {
+ const auto sym = DecodeCodeAddress( si.ptr );
+ const uint64_t offset = si.ptr - sym.symAddr;
+ TracyLfqPrepare( QueueType::CodeInformation );
+ MemWrite( &item->codeInformationFat.ptrOffset, offset );
+ MemWrite( &item->codeInformationFat.line, sym.line );
+ MemWrite( &item->codeInformationFat.symAddr, sym.symAddr );
+ MemWrite( &item->codeInformationFat.fileString, (uint64_t)sym.file );
+ MemWrite( &item->codeInformationFat.needFree, (uint8_t)sym.needFree );
+ TracyLfqCommit;
+ break;
+ }
+#ifdef TRACY_HAS_SYSTEM_TRACING
+ case SymbolQueueItemType::ExternalName:
+ {
+ const char* threadName;
+ const char* name;
+ SysTraceGetExternalName( si.ptr, threadName, name );
+ TracyLfqPrepare( QueueType::ExternalNameMetadata );
+ MemWrite( &item->externalNameMetadata.thread, si.ptr );
+ MemWrite( &item->externalNameMetadata.name, (uint64_t)name );
+ MemWrite( &item->externalNameMetadata.threadName, (uint64_t)threadName );
+ TracyLfqCommit;
+ break;
+ }
+#endif
+ case SymbolQueueItemType::KernelCode:
+ {
+#ifdef _WIN32
+ auto mod = GetKernelModulePath( si.ptr );
+ if( mod )
+ {
+ auto fn = DecodeCallstackPtrFast( si.ptr );
+ if( *fn )
+ {
+ auto hnd = LoadLibraryExA( mod, nullptr, DONT_RESOLVE_DLL_REFERENCES );
+ if( hnd )
+ {
+ auto ptr = (const void*)GetProcAddress( hnd, fn );
+ if( ptr )
+ {
+ auto buf = (char*)tracy_malloc( si.extra );
+ memcpy( buf, ptr, si.extra );
+ FreeLibrary( hnd );
+ TracyLfqPrepare( QueueType::SymbolCodeMetadata );
+ MemWrite( &item->symbolCodeMetadata.symbol, si.ptr );
+ MemWrite( &item->symbolCodeMetadata.ptr, (uint64_t)buf );
+ MemWrite( &item->symbolCodeMetadata.size, (uint32_t)si.extra );
+ TracyLfqCommit;
+ break;
+ }
+ FreeLibrary( hnd );
+ }
+ }
+ }
+#endif
+ TracyLfqPrepare( QueueType::AckSymbolCodeNotAvailable );
+ TracyLfqCommit;
+ break;
+ }
+ default:
+ assert( false );
+ break;
+ }
+}
+
+void Profiler::SymbolWorker()
+{
+ ThreadExitHandler threadExitHandler;
+ SetThreadName( "Tracy Symbol Worker" );
+ while( m_timeBegin.load( std::memory_order_relaxed ) == 0 ) std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+ rpmalloc_thread_initialize();
+
+ for(;;)
+ {
+ const auto shouldExit = ShouldExit();
+#ifdef TRACY_ON_DEMAND
+ if( !IsConnected() )
+ {
+ if( shouldExit ) return;
+ while( m_symbolQueue.front() ) m_symbolQueue.pop();
+ std::this_thread::sleep_for( std::chrono::milliseconds( 20 ) );
+ continue;
+ }
+#endif
+ auto si = m_symbolQueue.front();
+ if( si )
+ {
+ HandleSymbolQueueItem( *si );
+ m_symbolQueue.pop();
+ }
+ else
+ {
+ if( shouldExit ) return;
+ std::this_thread::sleep_for( std::chrono::milliseconds( 20 ) );
+ }
+ }
+}
+#endif
+
+bool Profiler::HandleServerQuery()
+{
+ ServerQueryPacket payload;
+ if( !m_sock->Read( &payload, sizeof( payload ), 10 ) ) return false;
+
+ uint8_t type;
+ uint64_t ptr;
+ uint32_t extra;
+ memcpy( &type, &payload.type, sizeof( payload.type ) );
+ memcpy( &ptr, &payload.ptr, sizeof( payload.ptr ) );
+ memcpy( &extra, &payload.extra, sizeof( payload.extra ) );
+
+ switch( type )
+ {
+ case ServerQueryString:
+ SendString( ptr, (const char*)ptr, QueueType::StringData );
+ break;
+ case ServerQueryThreadString:
+ if( ptr == m_mainThread )
+ {
+ SendString( ptr, "Main thread", 11, QueueType::ThreadName );
+ }
+ else
+ {
+ SendString( ptr, GetThreadName( ptr ), QueueType::ThreadName );
+ }
+ break;
+ case ServerQuerySourceLocation:
+ SendSourceLocation( ptr );
+ break;
+ case ServerQueryPlotName:
+ SendString( ptr, (const char*)ptr, QueueType::PlotName );
+ break;
+ case ServerQueryTerminate:
+ return false;
+ case ServerQueryCallstackFrame:
+ QueueCallstackFrame( ptr );
+ break;
+ case ServerQueryFrameName:
+ SendString( ptr, (const char*)ptr, QueueType::FrameName );
+ break;
+ case ServerQueryDisconnect:
+ HandleDisconnect();
+ return false;
+#ifdef TRACY_HAS_SYSTEM_TRACING
+ case ServerQueryExternalName:
+ QueueExternalName( ptr );
+ break;
+#endif
+ case ServerQueryParameter:
+ HandleParameter( ptr );
+ break;
+ case ServerQuerySymbol:
+ QueueSymbolQuery( ptr );
+ break;
+#ifndef TRACY_NO_CODE_TRANSFER
+ case ServerQuerySymbolCode:
+ HandleSymbolCodeQuery( ptr, extra );
+ break;
+#endif
+ case ServerQueryCodeLocation:
+ QueueCodeLocation( ptr );
+ break;
+ case ServerQuerySourceCode:
+ HandleSourceCodeQuery();
+ break;
+ case ServerQueryDataTransfer:
+ assert( !m_queryData );
+ m_queryDataPtr = m_queryData = (char*)tracy_malloc( ptr + 11 );
+ AckServerQuery();
+ break;
+ case ServerQueryDataTransferPart:
+ memcpy( m_queryDataPtr, &ptr, 8 );
+ memcpy( m_queryDataPtr+8, &extra, 4 );
+ m_queryDataPtr += 12;
+ AckServerQuery();
+ break;
+#ifdef TRACY_FIBERS
+ case ServerQueryFiberName:
+ SendString( ptr, (const char*)ptr, QueueType::FiberName );
+ break;
+#endif
+ default:
+ assert( false );
+ break;
+ }
+
+ return true;
+}
+
+void Profiler::HandleDisconnect()
+{
+ moodycamel::ConsumerToken token( GetQueue() );
+
+#ifdef TRACY_HAS_SYSTEM_TRACING
+ if( s_sysTraceThread )
+ {
+ auto timestamp = GetTime();
+ for(;;)
+ {
+ const auto status = DequeueContextSwitches( token, timestamp );
+ if( status == DequeueStatus::ConnectionLost )
+ {
+ return;
+ }
+ else if( status == DequeueStatus::QueueEmpty )
+ {
+ if( m_bufferOffset != m_bufferStart )
+ {
+ if( !CommitData() ) return;
+ }
+ }
+ if( timestamp < 0 )
+ {
+ if( m_bufferOffset != m_bufferStart )
+ {
+ if( !CommitData() ) return;
+ }
+ break;
+ }
+ ClearSerial();
+ if( m_sock->HasData() )
+ {
+ while( m_sock->HasData() )
+ {
+ if( !HandleServerQuery() ) return;
+ }
+ if( m_bufferOffset != m_bufferStart )
+ {
+ if( !CommitData() ) return;
+ }
+ }
+ else
+ {
+ if( m_bufferOffset != m_bufferStart )
+ {
+ if( !CommitData() ) return;
+ }
+ std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+ }
+ }
+ }
+#endif
+
+ QueueItem terminate;
+ MemWrite( &terminate.hdr.type, QueueType::Terminate );
+ if( !SendData( (const char*)&terminate, 1 ) ) return;
+ for(;;)
+ {
+ ClearQueues( token );
+ if( m_sock->HasData() )
+ {
+ while( m_sock->HasData() )
+ {
+ if( !HandleServerQuery() ) return;
+ }
+ if( m_bufferOffset != m_bufferStart )
+ {
+ if( !CommitData() ) return;
+ }
+ }
+ else
+ {
+ if( m_bufferOffset != m_bufferStart )
+ {
+ if( !CommitData() ) return;
+ }
+ std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+ }
+ }
+}
+
+void Profiler::CalibrateTimer()
+{
+ m_timerMul = 1.;
+
+#ifdef TRACY_HW_TIMER
+
+# if !defined TRACY_TIMER_QPC && defined TRACY_TIMER_FALLBACK
+ const bool needCalibration = HardwareSupportsInvariantTSC();
+# else
+ const bool needCalibration = true;
+# endif
+ if( needCalibration )
+ {
+ std::atomic_signal_fence( std::memory_order_acq_rel );
+ const auto t0 = std::chrono::high_resolution_clock::now();
+ const auto r0 = GetTime();
+ std::atomic_signal_fence( std::memory_order_acq_rel );
+ std::this_thread::sleep_for( std::chrono::milliseconds( 200 ) );
+ std::atomic_signal_fence( std::memory_order_acq_rel );
+ const auto t1 = std::chrono::high_resolution_clock::now();
+ const auto r1 = GetTime();
+ std::atomic_signal_fence( std::memory_order_acq_rel );
+
+ const auto dt = std::chrono::duration_cast<std::chrono::nanoseconds>( t1 - t0 ).count();
+ const auto dr = r1 - r0;
+
+ m_timerMul = double( dt ) / double( dr );
+ }
+#endif
+}
+
+void Profiler::CalibrateDelay()
+{
+ constexpr int Iterations = 50000;
+
+ auto mindiff = std::numeric_limits<int64_t>::max();
+ for( int i=0; i<Iterations * 10; i++ )
+ {
+ const auto t0i = GetTime();
+ const auto t1i = GetTime();
+ const auto dti = t1i - t0i;
+ if( dti > 0 && dti < mindiff ) mindiff = dti;
+ }
+ m_resolution = mindiff;
+
+#ifdef TRACY_DELAYED_INIT
+ m_delay = m_resolution;
+#else
+ constexpr int Events = Iterations * 2; // start + end
+ static_assert( Events < QueuePrealloc, "Delay calibration loop will allocate memory in queue" );
+
+ static const tracy::SourceLocationData __tracy_source_location { nullptr, __FUNCTION__, __FILE__, (uint32_t)__LINE__, 0 };
+ const auto t0 = GetTime();
+ for( int i=0; i<Iterations; i++ )
+ {
+ {
+ TracyLfqPrepare( QueueType::ZoneBegin );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, (uint64_t)&__tracy_source_location );
+ TracyLfqCommit;
+ }
+ {
+ TracyLfqPrepare( QueueType::ZoneEnd );
+ MemWrite( &item->zoneEnd.time, GetTime() );
+ TracyLfqCommit;
+ }
+ }
+ const auto t1 = GetTime();
+ const auto dt = t1 - t0;
+ m_delay = dt / Events;
+
+ moodycamel::ConsumerToken token( GetQueue() );
+ int left = Events;
+ while( left != 0 )
+ {
+ const auto sz = GetQueue().try_dequeue_bulk_single( token, [](const uint64_t&){}, [](QueueItem* item, size_t sz){} );
+ assert( sz > 0 );
+ left -= (int)sz;
+ }
+ assert( GetQueue().size_approx() == 0 );
+#endif
+}
+
+void Profiler::ReportTopology()
+{
+#ifndef TRACY_DELAYED_INIT
+ struct CpuData
+ {
+ uint32_t package;
+ uint32_t core;
+ uint32_t thread;
+ };
+
+#if defined _WIN32
+# ifdef TRACY_UWP
+ t_GetLogicalProcessorInformationEx _GetLogicalProcessorInformationEx = &::GetLogicalProcessorInformationEx;
+# else
+ t_GetLogicalProcessorInformationEx _GetLogicalProcessorInformationEx = (t_GetLogicalProcessorInformationEx)GetProcAddress( GetModuleHandleA( "kernel32.dll" ), "GetLogicalProcessorInformationEx" );
+# endif
+ if( !_GetLogicalProcessorInformationEx ) return;
+
+ DWORD psz = 0;
+ _GetLogicalProcessorInformationEx( RelationProcessorPackage, nullptr, &psz );
+ auto packageInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)tracy_malloc( psz );
+ auto res = _GetLogicalProcessorInformationEx( RelationProcessorPackage, packageInfo, &psz );
+ assert( res );
+
+ DWORD csz = 0;
+ _GetLogicalProcessorInformationEx( RelationProcessorCore, nullptr, &csz );
+ auto coreInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)tracy_malloc( csz );
+ res = _GetLogicalProcessorInformationEx( RelationProcessorCore, coreInfo, &csz );
+ assert( res );
+
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo( &sysinfo );
+ const uint32_t numcpus = sysinfo.dwNumberOfProcessors;
+
+ auto cpuData = (CpuData*)tracy_malloc( sizeof( CpuData ) * numcpus );
+ for( uint32_t i=0; i<numcpus; i++ ) cpuData[i].thread = i;
+
+ int idx = 0;
+ auto ptr = packageInfo;
+ while( (char*)ptr < ((char*)packageInfo) + psz )
+ {
+ assert( ptr->Relationship == RelationProcessorPackage );
+ // FIXME account for GroupCount
+ auto mask = ptr->Processor.GroupMask[0].Mask;
+ int core = 0;
+ while( mask != 0 )
+ {
+ if( mask & 1 ) cpuData[core].package = idx;
+ core++;
+ mask >>= 1;
+ }
+ ptr = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)(((char*)ptr) + ptr->Size);
+ idx++;
+ }
+
+ idx = 0;
+ ptr = coreInfo;
+ while( (char*)ptr < ((char*)coreInfo) + csz )
+ {
+ assert( ptr->Relationship == RelationProcessorCore );
+ // FIXME account for GroupCount
+ auto mask = ptr->Processor.GroupMask[0].Mask;
+ int core = 0;
+ while( mask != 0 )
+ {
+ if( mask & 1 ) cpuData[core].core = idx;
+ core++;
+ mask >>= 1;
+ }
+ ptr = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)(((char*)ptr) + ptr->Size);
+ idx++;
+ }
+
+ for( uint32_t i=0; i<numcpus; i++ )
+ {
+ auto& data = cpuData[i];
+
+ TracyLfqPrepare( QueueType::CpuTopology );
+ MemWrite( &item->cpuTopology.package, data.package );
+ MemWrite( &item->cpuTopology.core, data.core );
+ MemWrite( &item->cpuTopology.thread, data.thread );
+
+#ifdef TRACY_ON_DEMAND
+ DeferItem( *item );
+#endif
+
+ TracyLfqCommit;
+ }
+
+ tracy_free( cpuData );
+ tracy_free( coreInfo );
+ tracy_free( packageInfo );
+#elif defined __linux__
+ const int numcpus = std::thread::hardware_concurrency();
+ auto cpuData = (CpuData*)tracy_malloc( sizeof( CpuData ) * numcpus );
+ memset( cpuData, 0, sizeof( CpuData ) * numcpus );
+
+ const char* basePath = "/sys/devices/system/cpu/cpu";
+ for( int i=0; i<numcpus; i++ )
+ {
+ char path[1024];
+ sprintf( path, "%s%i/topology/physical_package_id", basePath, i );
+ char buf[1024];
+ FILE* f = fopen( path, "rb" );
+ if( !f )
+ {
+ tracy_free( cpuData );
+ return;
+ }
+ auto read = fread( buf, 1, 1024, f );
+ buf[read] = '\0';
+ fclose( f );
+ cpuData[i].package = uint32_t( atoi( buf ) );
+ cpuData[i].thread = i;
+ sprintf( path, "%s%i/topology/core_id", basePath, i );
+ f = fopen( path, "rb" );
+ read = fread( buf, 1, 1024, f );
+ buf[read] = '\0';
+ fclose( f );
+ cpuData[i].core = uint32_t( atoi( buf ) );
+ }
+
+ for( int i=0; i<numcpus; i++ )
+ {
+ auto& data = cpuData[i];
+
+ TracyLfqPrepare( QueueType::CpuTopology );
+ MemWrite( &item->cpuTopology.package, data.package );
+ MemWrite( &item->cpuTopology.core, data.core );
+ MemWrite( &item->cpuTopology.thread, data.thread );
+
+#ifdef TRACY_ON_DEMAND
+ DeferItem( *item );
+#endif
+
+ TracyLfqCommit;
+ }
+
+ tracy_free( cpuData );
+#endif
+#endif
+}
+
+void Profiler::SendCallstack( int depth, const char* skipBefore )
+{
+#ifdef TRACY_HAS_CALLSTACK
+ auto ptr = Callstack( depth );
+ CutCallstack( ptr, skipBefore );
+
+ TracyQueuePrepare( QueueType::Callstack );
+ MemWrite( &item->callstackFat.ptr, (uint64_t)ptr );
+ TracyQueueCommit( callstackFatThread );
+#endif
+}
+
+void Profiler::CutCallstack( void* callstack, const char* skipBefore )
+{
+#ifdef TRACY_HAS_CALLSTACK
+ auto data = (uintptr_t*)callstack;
+ const auto sz = *data++;
+ uintptr_t i;
+ for( i=0; i<sz; i++ )
+ {
+ auto name = DecodeCallstackPtrFast( uint64_t( data[i] ) );
+ const bool found = strcmp( name, skipBefore ) == 0;
+ if( found )
+ {
+ i++;
+ break;
+ }
+ }
+
+ if( i != sz )
+ {
+ memmove( data, data + i, ( sz - i ) * sizeof( uintptr_t* ) );
+ *--data = sz - i;
+ }
+#endif
+}
+
+#ifdef TRACY_HAS_SYSTIME
+void Profiler::ProcessSysTime()
+{
+ if( m_shutdown.load( std::memory_order_relaxed ) ) return;
+ auto t = std::chrono::high_resolution_clock::now().time_since_epoch().count();
+ if( t - m_sysTimeLast > 100000000 ) // 100 ms
+ {
+ auto sysTime = m_sysTime.Get();
+ if( sysTime >= 0 )
+ {
+ m_sysTimeLast = t;
+
+ TracyLfqPrepare( QueueType::SysTimeReport );
+ MemWrite( &item->sysTime.time, GetTime() );
+ MemWrite( &item->sysTime.sysTime, sysTime );
+ TracyLfqCommit;
+ }
+ }
+}
+#endif
+
+void Profiler::HandleParameter( uint64_t payload )
+{
+ assert( m_paramCallback );
+ const auto idx = uint32_t( payload >> 32 );
+ const auto val = int32_t( payload & 0xFFFFFFFF );
+ m_paramCallback( idx, val );
+ AckServerQuery();
+}
+
+void Profiler::HandleSymbolCodeQuery( uint64_t symbol, uint32_t size )
+{
+ if( symbol >> 63 != 0 )
+ {
+ QueueKernelCode( symbol, size );
+ }
+ else
+ {
+#ifdef __ANDROID__
+ // On Android it's common for code to be in mappings that are only executable
+ // but not readable.
+ if( !EnsureReadable( symbol ) )
+ {
+ AckSymbolCodeNotAvailable();
+ return;
+ }
+#endif
+ SendLongString( symbol, (const char*)symbol, size, QueueType::SymbolCode );
+ }
+}
+
+void Profiler::HandleSourceCodeQuery()
+{
+ assert( m_exectime != 0 );
+ assert( m_queryData );
+
+ InitRpmalloc();
+ struct stat st;
+ if( stat( m_queryData, &st ) == 0 && (uint64_t)st.st_mtime < m_exectime && st.st_size < ( TargetFrameSize - 16 ) )
+ {
+ FILE* f = fopen( m_queryData, "rb" );
+ tracy_free_fast( m_queryData );
+ if( f )
+ {
+ auto ptr = (char*)tracy_malloc_fast( st.st_size );
+ auto rd = fread( ptr, 1, st.st_size, f );
+ fclose( f );
+ if( rd == (size_t)st.st_size )
+ {
+ SendLongString( (uint64_t)ptr, ptr, rd, QueueType::SourceCode );
+ }
+ else
+ {
+ AckSourceCodeNotAvailable();
+ }
+ tracy_free_fast( ptr );
+ }
+ else
+ {
+ AckSourceCodeNotAvailable();
+ }
+ }
+ else
+ {
+ tracy_free_fast( m_queryData );
+ AckSourceCodeNotAvailable();
+ }
+ m_queryData = nullptr;
+}
+
+#if defined _WIN32 && defined TRACY_TIMER_QPC
+int64_t Profiler::GetTimeQpc()
+{
+ LARGE_INTEGER t;
+ QueryPerformanceCounter( &t );
+ return t.QuadPart;
+}
+#endif
+
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+TRACY_API TracyCZoneCtx ___tracy_emit_zone_begin( const struct ___tracy_source_location_data* srcloc, int active )
+{
+ ___tracy_c_zone_context ctx;
+#ifdef TRACY_ON_DEMAND
+ ctx.active = active && tracy::GetProfiler().IsConnected();
+#else
+ ctx.active = active;
+#endif
+ if( !ctx.active ) return ctx;
+ const auto id = tracy::GetProfiler().GetNextZoneId();
+ ctx.id = id;
+
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneBegin );
+ tracy::MemWrite( &item->zoneBegin.time, tracy::Profiler::GetTime() );
+ tracy::MemWrite( &item->zoneBegin.srcloc, (uint64_t)srcloc );
+ TracyQueueCommitC( zoneBeginThread );
+ }
+ return ctx;
+}
+
+TRACY_API TracyCZoneCtx ___tracy_emit_zone_begin_callstack( const struct ___tracy_source_location_data* srcloc, int depth, int active )
+{
+ ___tracy_c_zone_context ctx;
+#ifdef TRACY_ON_DEMAND
+ ctx.active = active && tracy::GetProfiler().IsConnected();
+#else
+ ctx.active = active;
+#endif
+ if( !ctx.active ) return ctx;
+ const auto id = tracy::GetProfiler().GetNextZoneId();
+ ctx.id = id;
+
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ tracy::GetProfiler().SendCallstack( depth );
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneBeginCallstack );
+ tracy::MemWrite( &item->zoneBegin.time, tracy::Profiler::GetTime() );
+ tracy::MemWrite( &item->zoneBegin.srcloc, (uint64_t)srcloc );
+ TracyQueueCommitC( zoneBeginThread );
+ }
+ return ctx;
+}
+
+TRACY_API TracyCZoneCtx ___tracy_emit_zone_begin_alloc( uint64_t srcloc, int active )
+{
+ ___tracy_c_zone_context ctx;
+#ifdef TRACY_ON_DEMAND
+ ctx.active = active && tracy::GetProfiler().IsConnected();
+#else
+ ctx.active = active;
+#endif
+ if( !ctx.active )
+ {
+ tracy::tracy_free( (void*)srcloc );
+ return ctx;
+ }
+ const auto id = tracy::GetProfiler().GetNextZoneId();
+ ctx.id = id;
+
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneBeginAllocSrcLoc );
+ tracy::MemWrite( &item->zoneBegin.time, tracy::Profiler::GetTime() );
+ tracy::MemWrite( &item->zoneBegin.srcloc, srcloc );
+ TracyQueueCommitC( zoneBeginThread );
+ }
+ return ctx;
+}
+
+TRACY_API TracyCZoneCtx ___tracy_emit_zone_begin_alloc_callstack( uint64_t srcloc, int depth, int active )
+{
+ ___tracy_c_zone_context ctx;
+#ifdef TRACY_ON_DEMAND
+ ctx.active = active && tracy::GetProfiler().IsConnected();
+#else
+ ctx.active = active;
+#endif
+ if( !ctx.active )
+ {
+ tracy::tracy_free( (void*)srcloc );
+ return ctx;
+ }
+ const auto id = tracy::GetProfiler().GetNextZoneId();
+ ctx.id = id;
+
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ tracy::GetProfiler().SendCallstack( depth );
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneBeginAllocSrcLocCallstack );
+ tracy::MemWrite( &item->zoneBegin.time, tracy::Profiler::GetTime() );
+ tracy::MemWrite( &item->zoneBegin.srcloc, srcloc );
+ TracyQueueCommitC( zoneBeginThread );
+ }
+ return ctx;
+}
+
+TRACY_API void ___tracy_emit_zone_end( TracyCZoneCtx ctx )
+{
+ if( !ctx.active ) return;
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, ctx.id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneEnd );
+ tracy::MemWrite( &item->zoneEnd.time, tracy::Profiler::GetTime() );
+ TracyQueueCommitC( zoneEndThread );
+ }
+}
+
+TRACY_API void ___tracy_emit_zone_text( TracyCZoneCtx ctx, const char* txt, size_t size )
+{
+ assert( size < std::numeric_limits<uint16_t>::max() );
+ if( !ctx.active ) return;
+ auto ptr = (char*)tracy::tracy_malloc( size );
+ memcpy( ptr, txt, size );
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, ctx.id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneText );
+ tracy::MemWrite( &item->zoneTextFat.text, (uint64_t)ptr );
+ tracy::MemWrite( &item->zoneTextFat.size, (uint16_t)size );
+ TracyQueueCommitC( zoneTextFatThread );
+ }
+}
+
+TRACY_API void ___tracy_emit_zone_name( TracyCZoneCtx ctx, const char* txt, size_t size )
+{
+ assert( size < std::numeric_limits<uint16_t>::max() );
+ if( !ctx.active ) return;
+ auto ptr = (char*)tracy::tracy_malloc( size );
+ memcpy( ptr, txt, size );
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, ctx.id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneName );
+ tracy::MemWrite( &item->zoneTextFat.text, (uint64_t)ptr );
+ tracy::MemWrite( &item->zoneTextFat.size, (uint16_t)size );
+ TracyQueueCommitC( zoneTextFatThread );
+ }
+}
+
+TRACY_API void ___tracy_emit_zone_color( TracyCZoneCtx ctx, uint32_t color ) {
+ if( !ctx.active ) return;
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, ctx.id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneColor );
+ tracy::MemWrite( &item->zoneColor.r, uint8_t( ( color ) & 0xFF ) );
+ tracy::MemWrite( &item->zoneColor.g, uint8_t( ( color >> 8 ) & 0xFF ) );
+ tracy::MemWrite( &item->zoneColor.b, uint8_t( ( color >> 16 ) & 0xFF ) );
+ TracyQueueCommitC( zoneColorThread );
+ }
+}
+
+TRACY_API void ___tracy_emit_zone_value( TracyCZoneCtx ctx, uint64_t value )
+{
+ if( !ctx.active ) return;
+#ifndef TRACY_NO_VERIFY
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValidation );
+ tracy::MemWrite( &item->zoneValidation.id, ctx.id );
+ TracyQueueCommitC( zoneValidationThread );
+ }
+#endif
+ {
+ TracyQueuePrepareC( tracy::QueueType::ZoneValue );
+ tracy::MemWrite( &item->zoneValue.value, value );
+ TracyQueueCommitC( zoneValueThread );
+ }
+}
+
+TRACY_API void ___tracy_emit_memory_alloc( const void* ptr, size_t size, int secure ) { tracy::Profiler::MemAlloc( ptr, size, secure != 0 ); }
+TRACY_API void ___tracy_emit_memory_alloc_callstack( const void* ptr, size_t size, int depth, int secure ) { tracy::Profiler::MemAllocCallstack( ptr, size, depth, secure != 0 ); }
+TRACY_API void ___tracy_emit_memory_free( const void* ptr, int secure ) { tracy::Profiler::MemFree( ptr, secure != 0 ); }
+TRACY_API void ___tracy_emit_memory_free_callstack( const void* ptr, int depth, int secure ) { tracy::Profiler::MemFreeCallstack( ptr, depth, secure != 0 ); }
+TRACY_API void ___tracy_emit_memory_alloc_named( const void* ptr, size_t size, int secure, const char* name ) { tracy::Profiler::MemAllocNamed( ptr, size, secure != 0, name ); }
+TRACY_API void ___tracy_emit_memory_alloc_callstack_named( const void* ptr, size_t size, int depth, int secure, const char* name ) { tracy::Profiler::MemAllocCallstackNamed( ptr, size, depth, secure != 0, name ); }
+TRACY_API void ___tracy_emit_memory_free_named( const void* ptr, int secure, const char* name ) { tracy::Profiler::MemFreeNamed( ptr, secure != 0, name ); }
+TRACY_API void ___tracy_emit_memory_free_callstack_named( const void* ptr, int depth, int secure, const char* name ) { tracy::Profiler::MemFreeCallstackNamed( ptr, depth, secure != 0, name ); }
+TRACY_API void ___tracy_emit_frame_mark( const char* name ) { tracy::Profiler::SendFrameMark( name ); }
+TRACY_API void ___tracy_emit_frame_mark_start( const char* name ) { tracy::Profiler::SendFrameMark( name, tracy::QueueType::FrameMarkMsgStart ); }
+TRACY_API void ___tracy_emit_frame_mark_end( const char* name ) { tracy::Profiler::SendFrameMark( name, tracy::QueueType::FrameMarkMsgEnd ); }
+TRACY_API void ___tracy_emit_frame_image( const void* image, uint16_t w, uint16_t h, uint8_t offset, int flip ) { tracy::Profiler::SendFrameImage( image, w, h, offset, flip ); }
+TRACY_API void ___tracy_emit_plot( const char* name, double val ) { tracy::Profiler::PlotData( name, val ); }
+TRACY_API void ___tracy_emit_message( const char* txt, size_t size, int callstack ) { tracy::Profiler::Message( txt, size, callstack ); }
+TRACY_API void ___tracy_emit_messageL( const char* txt, int callstack ) { tracy::Profiler::Message( txt, callstack ); }
+TRACY_API void ___tracy_emit_messageC( const char* txt, size_t size, uint32_t color, int callstack ) { tracy::Profiler::MessageColor( txt, size, color, callstack ); }
+TRACY_API void ___tracy_emit_messageLC( const char* txt, uint32_t color, int callstack ) { tracy::Profiler::MessageColor( txt, color, callstack ); }
+TRACY_API void ___tracy_emit_message_appinfo( const char* txt, size_t size ) { tracy::Profiler::MessageAppInfo( txt, size ); }
+
+TRACY_API uint64_t ___tracy_alloc_srcloc( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz ) {
+ return tracy::Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz );
+}
+
+TRACY_API uint64_t ___tracy_alloc_srcloc_name( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz ) {
+ return tracy::Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+}
+
+TRACY_API void ___tracy_emit_gpu_zone_begin_alloc( const struct ___tracy_gpu_zone_begin_data data )
+{
+ TracyLfqPrepareC( tracy::QueueType::GpuZoneBeginAllocSrcLoc );
+ tracy::MemWrite( &item->gpuZoneBegin.cpuTime, tracy::Profiler::GetTime() );
+ tracy::MemWrite( &item->gpuNewContext.thread, tracy::GetThreadHandle() );
+ tracy::MemWrite( &item->gpuZoneBegin.srcloc, data.srcloc );
+ tracy::MemWrite( &item->gpuZoneBegin.queryId, data.queryId );
+ tracy::MemWrite( &item->gpuZoneBegin.context, data.context );
+ TracyLfqCommitC;
+}
+
+TRACY_API void ___tracy_emit_gpu_time( const struct ___tracy_gpu_time_data data )
+{
+ TracyLfqPrepareC( tracy::QueueType::GpuTime );
+ tracy::MemWrite( &item->gpuTime.gpuTime, data.gpuTime );
+ tracy::MemWrite( &item->gpuTime.queryId, data.queryId );
+ tracy::MemWrite( &item->gpuTime.context, data.context );
+ TracyLfqCommitC;
+}
+
+TRACY_API void ___tracy_emit_gpu_zone_end( const struct ___tracy_gpu_zone_end_data data )
+{
+ TracyLfqPrepareC( tracy::QueueType::GpuZoneEnd );
+ tracy::MemWrite( &item->gpuZoneEnd.cpuTime, tracy::Profiler::GetTime() );
+ memset( &item->gpuZoneEnd.thread, 0, sizeof( item->gpuZoneEnd.thread ) );
+ tracy::MemWrite( &item->gpuZoneEnd.queryId, data.queryId );
+ tracy::MemWrite( &item->gpuZoneEnd.context, data.context );
+ TracyLfqCommitC;
+}
+
+TRACY_API void ___tracy_emit_gpu_new_context( ___tracy_gpu_new_context_data data )
+{
+ TracyLfqPrepareC( tracy::QueueType::GpuNewContext );
+ tracy::MemWrite( &item->gpuNewContext.cpuTime, tracy::Profiler::GetTime() );
+ tracy::MemWrite( &item->gpuNewContext.thread, tracy::GetThreadHandle() );
+ tracy::MemWrite( &item->gpuNewContext.gpuTime, data.gpuTime );
+ tracy::MemWrite( &item->gpuNewContext.period, data.period );
+ tracy::MemWrite( &item->gpuNewContext.context, data.context );
+ tracy::MemWrite( &item->gpuNewContext.flags, data.flags );
+ tracy::MemWrite( &item->gpuNewContext.type, data.type );
+ TracyLfqCommitC;
+}
+
+TRACY_API void ___tracy_emit_gpu_context_name( const struct ___tracy_gpu_context_name_data data )
+{
+ auto ptr = (char*)tracy::tracy_malloc( data.len );
+ memcpy( ptr, data.name, data.len );
+
+ TracyLfqPrepareC( tracy::QueueType::GpuContextName );
+ tracy::MemWrite( &item->gpuContextNameFat.context, data.context );
+ tracy::MemWrite( &item->gpuContextNameFat.ptr, (uint64_t)ptr );
+ tracy::MemWrite( &item->gpuContextNameFat.size, data.len );
+ TracyLfqCommitC;
+}
+
+TRACY_API void ___tracy_emit_gpu_zone_begin_alloc_serial( const struct ___tracy_gpu_zone_begin_data data )
+{
+ auto item = tracy::Profiler::QueueSerial();
+ tracy::MemWrite( &item->hdr.type, tracy::QueueType::GpuZoneBeginAllocSrcLocSerial );
+ tracy::MemWrite( &item->gpuZoneBegin.cpuTime, tracy::Profiler::GetTime() );
+ tracy::MemWrite( &item->gpuNewContext.thread, tracy::GetThreadHandle() );
+ tracy::MemWrite( &item->gpuZoneBegin.srcloc, data.srcloc );
+ tracy::MemWrite( &item->gpuZoneBegin.queryId, data.queryId );
+ tracy::MemWrite( &item->gpuZoneBegin.context, data.context );
+ tracy::Profiler::QueueSerialFinish();
+}
+
+TRACY_API void ___tracy_emit_gpu_time_serial( const struct ___tracy_gpu_time_data data )
+{
+ auto item = tracy::Profiler::QueueSerial();
+ tracy::MemWrite( &item->hdr.type, tracy::QueueType::GpuTime );
+ tracy::MemWrite( &item->gpuTime.gpuTime, data.gpuTime );
+ tracy::MemWrite( &item->gpuTime.queryId, data.queryId );
+ tracy::MemWrite( &item->gpuTime.context, data.context );
+ tracy::Profiler::QueueSerialFinish();
+}
+
+TRACY_API void ___tracy_emit_gpu_zone_end_serial( const struct ___tracy_gpu_zone_end_data data )
+{
+ auto item = tracy::Profiler::QueueSerial();
+ tracy::MemWrite( &item->hdr.type, tracy::QueueType::GpuZoneEndSerial );
+ tracy::MemWrite( &item->gpuZoneEnd.cpuTime, tracy::Profiler::GetTime() );
+ memset( &item->gpuZoneEnd.thread, 0, sizeof( item->gpuZoneEnd.thread ) );
+ tracy::MemWrite( &item->gpuZoneEnd.queryId, data.queryId );
+ tracy::MemWrite( &item->gpuZoneEnd.context, data.context );
+ tracy::Profiler::QueueSerialFinish();
+}
+
+TRACY_API void ___tracy_emit_gpu_new_context_serial( ___tracy_gpu_new_context_data data )
+{
+ auto item = tracy::Profiler::QueueSerial();
+ tracy::MemWrite( &item->hdr.type, tracy::QueueType::GpuNewContext );
+ tracy::MemWrite( &item->gpuNewContext.cpuTime, tracy::Profiler::GetTime() );
+ tracy::MemWrite( &item->gpuNewContext.thread, tracy::GetThreadHandle() );
+ tracy::MemWrite( &item->gpuNewContext.gpuTime, data.gpuTime );
+ tracy::MemWrite( &item->gpuNewContext.period, data.period );
+ tracy::MemWrite( &item->gpuNewContext.context, data.context );
+ tracy::MemWrite( &item->gpuNewContext.flags, data.flags );
+ tracy::MemWrite( &item->gpuNewContext.type, data.type );
+ tracy::Profiler::QueueSerialFinish();
+}
+
+TRACY_API void ___tracy_emit_gpu_context_name_serial( const struct ___tracy_gpu_context_name_data data )
+{
+ auto ptr = (char*)tracy::tracy_malloc( data.len );
+ memcpy( ptr, data.name, data.len );
+
+ auto item = tracy::Profiler::QueueSerial();
+ tracy::MemWrite( &item->hdr.type, tracy::QueueType::GpuContextName );
+ tracy::MemWrite( &item->gpuContextNameFat.context, data.context );
+ tracy::MemWrite( &item->gpuContextNameFat.ptr, (uint64_t)ptr );
+ tracy::MemWrite( &item->gpuContextNameFat.size, data.len );
+ tracy::Profiler::QueueSerialFinish();
+}
+
+TRACY_API int ___tracy_connected( void )
+{
+ return tracy::GetProfiler().IsConnected();
+}
+
+#ifdef TRACY_FIBERS
+TRACY_API void ___tracy_fiber_enter( const char* fiber ){ tracy::Profiler::EnterFiber( fiber ); }
+TRACY_API void ___tracy_fiber_leave( void ){ tracy::Profiler::LeaveFiber(); }
+#endif
+
+# ifdef TRACY_MANUAL_LIFETIME
+TRACY_API void ___tracy_startup_profiler( void )
+{
+ tracy::StartupProfiler();
+}
+
+TRACY_API void ___tracy_shutdown_profiler( void )
+{
+ tracy::ShutdownProfiler();
+}
+# endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyProfiler.hpp b/3rdparty/tracy/tracy/client/TracyProfiler.hpp
new file mode 100644
index 0000000..fab7770
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyProfiler.hpp
@@ -0,0 +1,942 @@
+#ifndef __TRACYPROFILER_HPP__
+#define __TRACYPROFILER_HPP__
+
+#include <assert.h>
+#include <atomic>
+#include <stdint.h>
+#include <string.h>
+#include <time.h>
+
+#include "tracy_concurrentqueue.h"
+#include "tracy_SPSCQueue.h"
+#include "TracyCallstack.hpp"
+#include "TracySysTime.hpp"
+#include "TracyFastVector.hpp"
+#include "../common/TracyQueue.hpp"
+#include "../common/TracyAlign.hpp"
+#include "../common/TracyAlloc.hpp"
+#include "../common/TracyMutex.hpp"
+#include "../common/TracyProtocol.hpp"
+
+#if defined _WIN32
+# include <intrin.h>
+#endif
+#ifdef __APPLE__
+# include <TargetConditionals.h>
+# include <mach/mach_time.h>
+#endif
+
+#if ( defined _WIN32 || ( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 ) || ( defined TARGET_OS_IOS && TARGET_OS_IOS == 1 ) )
+# define TRACY_HW_TIMER
+#endif
+
+#if defined TRACY_TIMER_FALLBACK || !defined TRACY_HW_TIMER
+# include <chrono>
+#endif
+
+#ifndef TracyConcat
+# define TracyConcat(x,y) TracyConcatIndirect(x,y)
+#endif
+#ifndef TracyConcatIndirect
+# define TracyConcatIndirect(x,y) x##y
+#endif
+
+namespace tracy
+{
+#if defined(TRACY_DELAYED_INIT) && defined(TRACY_MANUAL_LIFETIME)
+TRACY_API void StartupProfiler();
+TRACY_API void ShutdownProfiler();
+#endif
+
+class GpuCtx;
+class Profiler;
+class Socket;
+class UdpBroadcast;
+
+struct GpuCtxWrapper
+{
+ GpuCtx* ptr;
+};
+
+TRACY_API moodycamel::ConcurrentQueue<QueueItem>::ExplicitProducer* GetToken();
+TRACY_API Profiler& GetProfiler();
+TRACY_API std::atomic<uint32_t>& GetLockCounter();
+TRACY_API std::atomic<uint8_t>& GetGpuCtxCounter();
+TRACY_API GpuCtxWrapper& GetGpuCtx();
+TRACY_API uint32_t GetThreadHandle();
+TRACY_API bool ProfilerAvailable();
+TRACY_API bool ProfilerAllocatorAvailable();
+TRACY_API int64_t GetFrequencyQpc();
+
+#if defined TRACY_TIMER_FALLBACK && defined TRACY_HW_TIMER && ( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 )
+TRACY_API bool HardwareSupportsInvariantTSC(); // check, if we need fallback scenario
+#else
+# if defined TRACY_HW_TIMER
+tracy_force_inline bool HardwareSupportsInvariantTSC()
+{
+ return true; // this is checked at startup
+}
+# else
+tracy_force_inline bool HardwareSupportsInvariantTSC()
+{
+ return false;
+}
+# endif
+#endif
+
+
+struct SourceLocationData
+{
+ const char* name;
+ const char* function;
+ const char* file;
+ uint32_t line;
+ uint32_t color;
+};
+
+#ifdef TRACY_ON_DEMAND
+struct LuaZoneState
+{
+ uint32_t counter;
+ bool active;
+};
+#endif
+
+
+#define TracyLfqPrepare( _type ) \
+ moodycamel::ConcurrentQueueDefaultTraits::index_t __magic; \
+ auto __token = GetToken(); \
+ auto& __tail = __token->get_tail_index(); \
+ auto item = __token->enqueue_begin( __magic ); \
+ MemWrite( &item->hdr.type, _type );
+
+#define TracyLfqCommit \
+ __tail.store( __magic + 1, std::memory_order_release );
+
+#define TracyLfqPrepareC( _type ) \
+ tracy::moodycamel::ConcurrentQueueDefaultTraits::index_t __magic; \
+ auto __token = tracy::GetToken(); \
+ auto& __tail = __token->get_tail_index(); \
+ auto item = __token->enqueue_begin( __magic ); \
+ tracy::MemWrite( &item->hdr.type, _type );
+
+#define TracyLfqCommitC \
+ __tail.store( __magic + 1, std::memory_order_release );
+
+
+#ifdef TRACY_FIBERS
+# define TracyQueuePrepare( _type ) \
+ auto item = Profiler::QueueSerial(); \
+ MemWrite( &item->hdr.type, _type );
+# define TracyQueueCommit( _name ) \
+ MemWrite( &item->_name.thread, GetThreadHandle() ); \
+ Profiler::QueueSerialFinish();
+# define TracyQueuePrepareC( _type ) \
+ auto item = tracy::Profiler::QueueSerial(); \
+ tracy::MemWrite( &item->hdr.type, _type );
+# define TracyQueueCommitC( _name ) \
+ tracy::MemWrite( &item->_name.thread, tracy::GetThreadHandle() ); \
+ tracy::Profiler::QueueSerialFinish();
+#else
+# define TracyQueuePrepare( _type ) TracyLfqPrepare( _type )
+# define TracyQueueCommit( _name ) TracyLfqCommit
+# define TracyQueuePrepareC( _type ) TracyLfqPrepareC( _type )
+# define TracyQueueCommitC( _name ) TracyLfqCommitC
+#endif
+
+
+typedef void(*ParameterCallback)( uint32_t idx, int32_t val );
+
+class Profiler
+{
+ struct FrameImageQueueItem
+ {
+ void* image;
+ uint32_t frame;
+ uint16_t w;
+ uint16_t h;
+ bool flip;
+ };
+
+ enum class SymbolQueueItemType
+ {
+ CallstackFrame,
+ SymbolQuery,
+ CodeLocation,
+ ExternalName,
+ KernelCode
+ };
+
+ struct SymbolQueueItem
+ {
+ SymbolQueueItemType type;
+ uint64_t ptr;
+ uint32_t extra;
+ };
+
+public:
+ Profiler();
+ ~Profiler();
+
+ void SpawnWorkerThreads();
+
+ static tracy_force_inline int64_t GetTime()
+ {
+#ifdef TRACY_HW_TIMER
+# if defined TARGET_OS_IOS && TARGET_OS_IOS == 1
+ if( HardwareSupportsInvariantTSC() ) return mach_absolute_time();
+# elif defined _WIN32
+# ifdef TRACY_TIMER_QPC
+ return GetTimeQpc();
+# else
+ if( HardwareSupportsInvariantTSC() ) return int64_t( __rdtsc() );
+# endif
+# elif defined __i386 || defined _M_IX86
+ if( HardwareSupportsInvariantTSC() )
+ {
+ uint32_t eax, edx;
+ asm volatile ( "rdtsc" : "=a" (eax), "=d" (edx) );
+ return ( uint64_t( edx ) << 32 ) + uint64_t( eax );
+ }
+# elif defined __x86_64__ || defined _M_X64
+ if( HardwareSupportsInvariantTSC() )
+ {
+ uint64_t rax, rdx;
+ asm volatile ( "rdtsc" : "=a" (rax), "=d" (rdx) );
+ return (int64_t)(( rdx << 32 ) + rax);
+ }
+# else
+# error "TRACY_HW_TIMER detection logic needs fixing"
+# endif
+#endif
+
+#if !defined TRACY_HW_TIMER || defined TRACY_TIMER_FALLBACK
+# if defined __linux__ && defined CLOCK_MONOTONIC_RAW
+ struct timespec ts;
+ clock_gettime( CLOCK_MONOTONIC_RAW, &ts );
+ return int64_t( ts.tv_sec ) * 1000000000ll + int64_t( ts.tv_nsec );
+# else
+ return std::chrono::duration_cast<std::chrono::nanoseconds>( std::chrono::high_resolution_clock::now().time_since_epoch() ).count();
+# endif
+#endif
+
+ return 0; // unreacheble branch
+ }
+
+ tracy_force_inline uint32_t GetNextZoneId()
+ {
+ return m_zoneId.fetch_add( 1, std::memory_order_relaxed );
+ }
+
+ static tracy_force_inline QueueItem* QueueSerial()
+ {
+ auto& p = GetProfiler();
+ p.m_serialLock.lock();
+ return p.m_serialQueue.prepare_next();
+ }
+
+ static tracy_force_inline QueueItem* QueueSerialCallstack( void* ptr )
+ {
+ auto& p = GetProfiler();
+ p.m_serialLock.lock();
+ p.SendCallstackSerial( ptr );
+ return p.m_serialQueue.prepare_next();
+ }
+
+ static tracy_force_inline void QueueSerialFinish()
+ {
+ auto& p = GetProfiler();
+ p.m_serialQueue.commit_next();
+ p.m_serialLock.unlock();
+ }
+
+ static tracy_force_inline void SendFrameMark( const char* name )
+ {
+ if( !name ) GetProfiler().m_frameCount.fetch_add( 1, std::memory_order_relaxed );
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ auto item = QueueSerial();
+ MemWrite( &item->hdr.type, QueueType::FrameMarkMsg );
+ MemWrite( &item->frameMark.time, GetTime() );
+ MemWrite( &item->frameMark.name, uint64_t( name ) );
+ QueueSerialFinish();
+ }
+
+ static tracy_force_inline void SendFrameMark( const char* name, QueueType type )
+ {
+ assert( type == QueueType::FrameMarkMsgStart || type == QueueType::FrameMarkMsgEnd );
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ auto item = QueueSerial();
+ MemWrite( &item->hdr.type, type );
+ MemWrite( &item->frameMark.time, GetTime() );
+ MemWrite( &item->frameMark.name, uint64_t( name ) );
+ QueueSerialFinish();
+ }
+
+ static tracy_force_inline void SendFrameImage( const void* image, uint16_t w, uint16_t h, uint8_t offset, bool flip )
+ {
+#ifndef TRACY_NO_FRAME_IMAGE
+ auto& profiler = GetProfiler();
+ assert( profiler.m_frameCount.load( std::memory_order_relaxed ) < std::numeric_limits<uint32_t>::max() );
+# ifdef TRACY_ON_DEMAND
+ if( !profiler.IsConnected() ) return;
+# endif
+ const auto sz = size_t( w ) * size_t( h ) * 4;
+ auto ptr = (char*)tracy_malloc( sz );
+ memcpy( ptr, image, sz );
+
+ profiler.m_fiLock.lock();
+ auto fi = profiler.m_fiQueue.prepare_next();
+ fi->image = ptr;
+ fi->frame = uint32_t( profiler.m_frameCount.load( std::memory_order_relaxed ) - offset );
+ fi->w = w;
+ fi->h = h;
+ fi->flip = flip;
+ profiler.m_fiQueue.commit_next();
+ profiler.m_fiLock.unlock();
+#endif
+ }
+
+ static tracy_force_inline void PlotData( const char* name, int64_t val )
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ TracyLfqPrepare( QueueType::PlotData );
+ MemWrite( &item->plotData.name, (uint64_t)name );
+ MemWrite( &item->plotData.time, GetTime() );
+ MemWrite( &item->plotData.type, PlotDataType::Int );
+ MemWrite( &item->plotData.data.i, val );
+ TracyLfqCommit;
+ }
+
+ static tracy_force_inline void PlotData( const char* name, float val )
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ TracyLfqPrepare( QueueType::PlotData );
+ MemWrite( &item->plotData.name, (uint64_t)name );
+ MemWrite( &item->plotData.time, GetTime() );
+ MemWrite( &item->plotData.type, PlotDataType::Float );
+ MemWrite( &item->plotData.data.f, val );
+ TracyLfqCommit;
+ }
+
+ static tracy_force_inline void PlotData( const char* name, double val )
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ TracyLfqPrepare( QueueType::PlotData );
+ MemWrite( &item->plotData.name, (uint64_t)name );
+ MemWrite( &item->plotData.time, GetTime() );
+ MemWrite( &item->plotData.type, PlotDataType::Double );
+ MemWrite( &item->plotData.data.d, val );
+ TracyLfqCommit;
+ }
+
+ static tracy_force_inline void ConfigurePlot( const char* name, PlotFormatType type )
+ {
+ TracyLfqPrepare( QueueType::PlotConfig );
+ MemWrite( &item->plotConfig.name, (uint64_t)name );
+ MemWrite( &item->plotConfig.type, (uint8_t)type );
+
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+
+ TracyLfqCommit;
+ }
+
+ static tracy_force_inline void Message( const char* txt, size_t size, int callstack )
+ {
+ assert( size < std::numeric_limits<uint16_t>::max() );
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ if( callstack != 0 )
+ {
+ tracy::GetProfiler().SendCallstack( callstack );
+ }
+
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, txt, size );
+
+ TracyQueuePrepare( callstack == 0 ? QueueType::Message : QueueType::MessageCallstack );
+ MemWrite( &item->messageFat.time, GetTime() );
+ MemWrite( &item->messageFat.text, (uint64_t)ptr );
+ MemWrite( &item->messageFat.size, (uint16_t)size );
+ TracyQueueCommit( messageFatThread );
+ }
+
+ static tracy_force_inline void Message( const char* txt, int callstack )
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ if( callstack != 0 )
+ {
+ tracy::GetProfiler().SendCallstack( callstack );
+ }
+
+ TracyQueuePrepare( callstack == 0 ? QueueType::MessageLiteral : QueueType::MessageLiteralCallstack );
+ MemWrite( &item->messageLiteral.time, GetTime() );
+ MemWrite( &item->messageLiteral.text, (uint64_t)txt );
+ TracyQueueCommit( messageLiteralThread );
+ }
+
+ static tracy_force_inline void MessageColor( const char* txt, size_t size, uint32_t color, int callstack )
+ {
+ assert( size < std::numeric_limits<uint16_t>::max() );
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ if( callstack != 0 )
+ {
+ tracy::GetProfiler().SendCallstack( callstack );
+ }
+
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, txt, size );
+
+ TracyQueuePrepare( callstack == 0 ? QueueType::MessageColor : QueueType::MessageColorCallstack );
+ MemWrite( &item->messageColorFat.time, GetTime() );
+ MemWrite( &item->messageColorFat.text, (uint64_t)ptr );
+ MemWrite( &item->messageColorFat.r, uint8_t( ( color ) & 0xFF ) );
+ MemWrite( &item->messageColorFat.g, uint8_t( ( color >> 8 ) & 0xFF ) );
+ MemWrite( &item->messageColorFat.b, uint8_t( ( color >> 16 ) & 0xFF ) );
+ MemWrite( &item->messageColorFat.size, (uint16_t)size );
+ TracyQueueCommit( messageColorFatThread );
+ }
+
+ static tracy_force_inline void MessageColor( const char* txt, uint32_t color, int callstack )
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ if( callstack != 0 )
+ {
+ tracy::GetProfiler().SendCallstack( callstack );
+ }
+
+ TracyQueuePrepare( callstack == 0 ? QueueType::MessageLiteralColor : QueueType::MessageLiteralColorCallstack );
+ MemWrite( &item->messageColorLiteral.time, GetTime() );
+ MemWrite( &item->messageColorLiteral.text, (uint64_t)txt );
+ MemWrite( &item->messageColorLiteral.r, uint8_t( ( color ) & 0xFF ) );
+ MemWrite( &item->messageColorLiteral.g, uint8_t( ( color >> 8 ) & 0xFF ) );
+ MemWrite( &item->messageColorLiteral.b, uint8_t( ( color >> 16 ) & 0xFF ) );
+ TracyQueueCommit( messageColorLiteralThread );
+ }
+
+ static tracy_force_inline void MessageAppInfo( const char* txt, size_t size )
+ {
+ assert( size < std::numeric_limits<uint16_t>::max() );
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, txt, size );
+ TracyLfqPrepare( QueueType::MessageAppInfo );
+ MemWrite( &item->messageFat.time, GetTime() );
+ MemWrite( &item->messageFat.text, (uint64_t)ptr );
+ MemWrite( &item->messageFat.size, (uint16_t)size );
+
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+
+ TracyLfqCommit;
+ }
+
+ static tracy_force_inline void MemAlloc( const void* ptr, size_t size, bool secure )
+ {
+ if( secure && !ProfilerAvailable() ) return;
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ const auto thread = GetThreadHandle();
+
+ GetProfiler().m_serialLock.lock();
+ SendMemAlloc( QueueType::MemAlloc, thread, ptr, size );
+ GetProfiler().m_serialLock.unlock();
+ }
+
+ static tracy_force_inline void MemFree( const void* ptr, bool secure )
+ {
+ if( secure && !ProfilerAvailable() ) return;
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ const auto thread = GetThreadHandle();
+
+ GetProfiler().m_serialLock.lock();
+ SendMemFree( QueueType::MemFree, thread, ptr );
+ GetProfiler().m_serialLock.unlock();
+ }
+
+ static tracy_force_inline void MemAllocCallstack( const void* ptr, size_t size, int depth, bool secure )
+ {
+ if( secure && !ProfilerAvailable() ) return;
+#ifdef TRACY_HAS_CALLSTACK
+ auto& profiler = GetProfiler();
+# ifdef TRACY_ON_DEMAND
+ if( !profiler.IsConnected() ) return;
+# endif
+ const auto thread = GetThreadHandle();
+
+ auto callstack = Callstack( depth );
+
+ profiler.m_serialLock.lock();
+ SendCallstackSerial( callstack );
+ SendMemAlloc( QueueType::MemAllocCallstack, thread, ptr, size );
+ profiler.m_serialLock.unlock();
+#else
+ static_cast<void>(depth); // unused
+ MemAlloc( ptr, size, secure );
+#endif
+ }
+
+ static tracy_force_inline void MemFreeCallstack( const void* ptr, int depth, bool secure )
+ {
+ if( secure && !ProfilerAvailable() ) return;
+ if( !ProfilerAllocatorAvailable() )
+ {
+ MemFree( ptr, secure );
+ return;
+ }
+#ifdef TRACY_HAS_CALLSTACK
+ auto& profiler = GetProfiler();
+# ifdef TRACY_ON_DEMAND
+ if( !profiler.IsConnected() ) return;
+# endif
+ const auto thread = GetThreadHandle();
+
+ auto callstack = Callstack( depth );
+
+ profiler.m_serialLock.lock();
+ SendCallstackSerial( callstack );
+ SendMemFree( QueueType::MemFreeCallstack, thread, ptr );
+ profiler.m_serialLock.unlock();
+#else
+ static_cast<void>(depth); // unused
+ MemFree( ptr, secure );
+#endif
+ }
+
+ static tracy_force_inline void MemAllocNamed( const void* ptr, size_t size, bool secure, const char* name )
+ {
+ if( secure && !ProfilerAvailable() ) return;
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ const auto thread = GetThreadHandle();
+
+ GetProfiler().m_serialLock.lock();
+ SendMemName( name );
+ SendMemAlloc( QueueType::MemAllocNamed, thread, ptr, size );
+ GetProfiler().m_serialLock.unlock();
+ }
+
+ static tracy_force_inline void MemFreeNamed( const void* ptr, bool secure, const char* name )
+ {
+ if( secure && !ProfilerAvailable() ) return;
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+ const auto thread = GetThreadHandle();
+
+ GetProfiler().m_serialLock.lock();
+ SendMemName( name );
+ SendMemFree( QueueType::MemFreeNamed, thread, ptr );
+ GetProfiler().m_serialLock.unlock();
+ }
+
+ static tracy_force_inline void MemAllocCallstackNamed( const void* ptr, size_t size, int depth, bool secure, const char* name )
+ {
+ if( secure && !ProfilerAvailable() ) return;
+#ifdef TRACY_HAS_CALLSTACK
+ auto& profiler = GetProfiler();
+# ifdef TRACY_ON_DEMAND
+ if( !profiler.IsConnected() ) return;
+# endif
+ const auto thread = GetThreadHandle();
+
+ auto callstack = Callstack( depth );
+
+ profiler.m_serialLock.lock();
+ SendCallstackSerial( callstack );
+ SendMemName( name );
+ SendMemAlloc( QueueType::MemAllocCallstackNamed, thread, ptr, size );
+ profiler.m_serialLock.unlock();
+#else
+ static_cast<void>(depth); // unused
+ static_cast<void>(name); // unused
+ MemAlloc( ptr, size, secure );
+#endif
+ }
+
+ static tracy_force_inline void MemFreeCallstackNamed( const void* ptr, int depth, bool secure, const char* name )
+ {
+ if( secure && !ProfilerAvailable() ) return;
+#ifdef TRACY_HAS_CALLSTACK
+ auto& profiler = GetProfiler();
+# ifdef TRACY_ON_DEMAND
+ if( !profiler.IsConnected() ) return;
+# endif
+ const auto thread = GetThreadHandle();
+
+ auto callstack = Callstack( depth );
+
+ profiler.m_serialLock.lock();
+ SendCallstackSerial( callstack );
+ SendMemName( name );
+ SendMemFree( QueueType::MemFreeCallstackNamed, thread, ptr );
+ profiler.m_serialLock.unlock();
+#else
+ static_cast<void>(depth); // unused
+ static_cast<void>(name); // unused
+ MemFree( ptr, secure );
+#endif
+ }
+
+ static tracy_force_inline void SendCallstack( int depth )
+ {
+#ifdef TRACY_HAS_CALLSTACK
+ auto ptr = Callstack( depth );
+ TracyQueuePrepare( QueueType::Callstack );
+ MemWrite( &item->callstackFat.ptr, (uint64_t)ptr );
+ TracyQueueCommit( callstackFatThread );
+#else
+ static_cast<void>(depth); // unused
+#endif
+ }
+
+ static tracy_force_inline void ParameterRegister( ParameterCallback cb ) { GetProfiler().m_paramCallback = cb; }
+ static tracy_force_inline void ParameterSetup( uint32_t idx, const char* name, bool isBool, int32_t val )
+ {
+ TracyLfqPrepare( QueueType::ParamSetup );
+ tracy::MemWrite( &item->paramSetup.idx, idx );
+ tracy::MemWrite( &item->paramSetup.name, (uint64_t)name );
+ tracy::MemWrite( &item->paramSetup.isBool, (uint8_t)isBool );
+ tracy::MemWrite( &item->paramSetup.val, val );
+
+#ifdef TRACY_ON_DEMAND
+ GetProfiler().DeferItem( *item );
+#endif
+
+ TracyLfqCommit;
+ }
+
+#ifdef TRACY_FIBERS
+ static tracy_force_inline void EnterFiber( const char* fiber )
+ {
+ TracyQueuePrepare( QueueType::FiberEnter );
+ MemWrite( &item->fiberEnter.time, GetTime() );
+ MemWrite( &item->fiberEnter.fiber, (uint64_t)fiber );
+ TracyQueueCommit( fiberEnter );
+ }
+
+ static tracy_force_inline void LeaveFiber()
+ {
+ TracyQueuePrepare( QueueType::FiberLeave );
+ MemWrite( &item->fiberLeave.time, GetTime() );
+ TracyQueueCommit( fiberLeave );
+ }
+#endif
+
+ void SendCallstack( int depth, const char* skipBefore );
+ static void CutCallstack( void* callstack, const char* skipBefore );
+
+ static bool ShouldExit();
+
+ tracy_force_inline bool IsConnected() const
+ {
+ return m_isConnected.load( std::memory_order_acquire );
+ }
+
+#ifdef TRACY_ON_DEMAND
+ tracy_force_inline uint64_t ConnectionId() const
+ {
+ return m_connectionId.load( std::memory_order_acquire );
+ }
+
+ tracy_force_inline void DeferItem( const QueueItem& item )
+ {
+ m_deferredLock.lock();
+ auto dst = m_deferredQueue.push_next();
+ memcpy( dst, &item, sizeof( item ) );
+ m_deferredLock.unlock();
+ }
+#endif
+
+ void RequestShutdown() { m_shutdown.store( true, std::memory_order_relaxed ); m_shutdownManual.store( true, std::memory_order_relaxed ); }
+ bool HasShutdownFinished() const { return m_shutdownFinished.load( std::memory_order_relaxed ); }
+
+ void SendString( uint64_t str, const char* ptr, QueueType type ) { SendString( str, ptr, strlen( ptr ), type ); }
+ void SendString( uint64_t str, const char* ptr, size_t len, QueueType type );
+ void SendSingleString( const char* ptr ) { SendSingleString( ptr, strlen( ptr ) ); }
+ void SendSingleString( const char* ptr, size_t len );
+ void SendSecondString( const char* ptr ) { SendSecondString( ptr, strlen( ptr ) ); }
+ void SendSecondString( const char* ptr, size_t len );
+
+
+ // Allocated source location data layout:
+ // 2b payload size
+ // 4b color
+ // 4b source line
+ // fsz function name
+ // 1b null terminator
+ // ssz source file name
+ // 1b null terminator
+ // nsz zone name (optional)
+
+ static tracy_force_inline uint64_t AllocSourceLocation( uint32_t line, const char* source, const char* function )
+ {
+ return AllocSourceLocation( line, source, function, nullptr, 0 );
+ }
+
+ static tracy_force_inline uint64_t AllocSourceLocation( uint32_t line, const char* source, const char* function, const char* name, size_t nameSz )
+ {
+ return AllocSourceLocation( line, source, strlen(source), function, strlen(function), name, nameSz );
+ }
+
+ static tracy_force_inline uint64_t AllocSourceLocation( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz )
+ {
+ return AllocSourceLocation( line, source, sourceSz, function, functionSz, nullptr, 0 );
+ }
+
+ static tracy_force_inline uint64_t AllocSourceLocation( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz )
+ {
+ const auto sz32 = uint32_t( 2 + 4 + 4 + functionSz + 1 + sourceSz + 1 + nameSz );
+ assert( sz32 <= std::numeric_limits<uint16_t>::max() );
+ const auto sz = uint16_t( sz32 );
+ auto ptr = (char*)tracy_malloc( sz );
+ memcpy( ptr, &sz, 2 );
+ memset( ptr + 2, 0, 4 );
+ memcpy( ptr + 6, &line, 4 );
+ memcpy( ptr + 10, function, functionSz );
+ ptr[10 + functionSz] = '\0';
+ memcpy( ptr + 10 + functionSz + 1, source, sourceSz );
+ ptr[10 + functionSz + 1 + sourceSz] = '\0';
+ if( nameSz != 0 )
+ {
+ memcpy( ptr + 10 + functionSz + 1 + sourceSz + 1, name, nameSz );
+ }
+ return uint64_t( ptr );
+ }
+
+private:
+ enum class DequeueStatus { DataDequeued, ConnectionLost, QueueEmpty };
+ enum class ThreadCtxStatus { Same, Changed, ConnectionLost };
+
+ static void LaunchWorker( void* ptr ) { ((Profiler*)ptr)->Worker(); }
+ void Worker();
+
+#ifndef TRACY_NO_FRAME_IMAGE
+ static void LaunchCompressWorker( void* ptr ) { ((Profiler*)ptr)->CompressWorker(); }
+ void CompressWorker();
+#endif
+
+#ifdef TRACY_HAS_CALLSTACK
+ static void LaunchSymbolWorker( void* ptr ) { ((Profiler*)ptr)->SymbolWorker(); }
+ void SymbolWorker();
+ void HandleSymbolQueueItem( const SymbolQueueItem& si );
+#endif
+
+ void ClearQueues( tracy::moodycamel::ConsumerToken& token );
+ void ClearSerial();
+ DequeueStatus Dequeue( tracy::moodycamel::ConsumerToken& token );
+ DequeueStatus DequeueContextSwitches( tracy::moodycamel::ConsumerToken& token, int64_t& timeStop );
+ DequeueStatus DequeueSerial();
+ ThreadCtxStatus ThreadCtxCheck( uint32_t threadId );
+ bool CommitData();
+
+ tracy_force_inline bool AppendData( const void* data, size_t len )
+ {
+ const auto ret = NeedDataSize( len );
+ AppendDataUnsafe( data, len );
+ return ret;
+ }
+
+ tracy_force_inline bool NeedDataSize( size_t len )
+ {
+ assert( len <= TargetFrameSize );
+ bool ret = true;
+ if( m_bufferOffset - m_bufferStart + (int)len > TargetFrameSize )
+ {
+ ret = CommitData();
+ }
+ return ret;
+ }
+
+ tracy_force_inline void AppendDataUnsafe( const void* data, size_t len )
+ {
+ memcpy( m_buffer + m_bufferOffset, data, len );
+ m_bufferOffset += int( len );
+ }
+
+ bool SendData( const char* data, size_t len );
+ void SendLongString( uint64_t ptr, const char* str, size_t len, QueueType type );
+ void SendSourceLocation( uint64_t ptr );
+ void SendSourceLocationPayload( uint64_t ptr );
+ void SendCallstackPayload( uint64_t ptr );
+ void SendCallstackPayload64( uint64_t ptr );
+ void SendCallstackAlloc( uint64_t ptr );
+
+ void QueueCallstackFrame( uint64_t ptr );
+ void QueueSymbolQuery( uint64_t symbol );
+ void QueueCodeLocation( uint64_t ptr );
+ void QueueExternalName( uint64_t ptr );
+ void QueueKernelCode( uint64_t symbol, uint32_t size );
+
+ bool HandleServerQuery();
+ void HandleDisconnect();
+ void HandleParameter( uint64_t payload );
+ void HandleSymbolCodeQuery( uint64_t symbol, uint32_t size );
+ void HandleSourceCodeQuery();
+
+ void AckServerQuery();
+ void AckSourceCodeNotAvailable();
+ void AckSymbolCodeNotAvailable();
+
+ void CalibrateTimer();
+ void CalibrateDelay();
+ void ReportTopology();
+
+ static tracy_force_inline void SendCallstackSerial( void* ptr )
+ {
+#ifdef TRACY_HAS_CALLSTACK
+ auto item = GetProfiler().m_serialQueue.prepare_next();
+ MemWrite( &item->hdr.type, QueueType::CallstackSerial );
+ MemWrite( &item->callstackFat.ptr, (uint64_t)ptr );
+ GetProfiler().m_serialQueue.commit_next();
+#else
+ static_cast<void>(ptr); // unused
+#endif
+ }
+
+ static tracy_force_inline void SendMemAlloc( QueueType type, const uint32_t thread, const void* ptr, size_t size )
+ {
+ assert( type == QueueType::MemAlloc || type == QueueType::MemAllocCallstack || type == QueueType::MemAllocNamed || type == QueueType::MemAllocCallstackNamed );
+
+ auto item = GetProfiler().m_serialQueue.prepare_next();
+ MemWrite( &item->hdr.type, type );
+ MemWrite( &item->memAlloc.time, GetTime() );
+ MemWrite( &item->memAlloc.thread, thread );
+ MemWrite( &item->memAlloc.ptr, (uint64_t)ptr );
+ if( compile_time_condition<sizeof( size ) == 4>::value )
+ {
+ memcpy( &item->memAlloc.size, &size, 4 );
+ memset( &item->memAlloc.size + 4, 0, 2 );
+ }
+ else
+ {
+ assert( sizeof( size ) == 8 );
+ memcpy( &item->memAlloc.size, &size, 4 );
+ memcpy( ((char*)&item->memAlloc.size)+4, ((char*)&size)+4, 2 );
+ }
+ GetProfiler().m_serialQueue.commit_next();
+ }
+
+ static tracy_force_inline void SendMemFree( QueueType type, const uint32_t thread, const void* ptr )
+ {
+ assert( type == QueueType::MemFree || type == QueueType::MemFreeCallstack || type == QueueType::MemFreeNamed || type == QueueType::MemFreeCallstackNamed );
+
+ auto item = GetProfiler().m_serialQueue.prepare_next();
+ MemWrite( &item->hdr.type, type );
+ MemWrite( &item->memFree.time, GetTime() );
+ MemWrite( &item->memFree.thread, thread );
+ MemWrite( &item->memFree.ptr, (uint64_t)ptr );
+ GetProfiler().m_serialQueue.commit_next();
+ }
+
+ static tracy_force_inline void SendMemName( const char* name )
+ {
+ assert( name );
+ auto item = GetProfiler().m_serialQueue.prepare_next();
+ MemWrite( &item->hdr.type, QueueType::MemNamePayload );
+ MemWrite( &item->memName.name, (uint64_t)name );
+ GetProfiler().m_serialQueue.commit_next();
+ }
+
+#if defined _WIN32 && defined TRACY_TIMER_QPC
+ static int64_t GetTimeQpc();
+#endif
+
+ double m_timerMul;
+ uint64_t m_resolution;
+ uint64_t m_delay;
+ std::atomic<int64_t> m_timeBegin;
+ uint32_t m_mainThread;
+ uint64_t m_epoch, m_exectime;
+ std::atomic<bool> m_shutdown;
+ std::atomic<bool> m_shutdownManual;
+ std::atomic<bool> m_shutdownFinished;
+ Socket* m_sock;
+ UdpBroadcast* m_broadcast;
+ bool m_noExit;
+ uint32_t m_userPort;
+ std::atomic<uint32_t> m_zoneId;
+ int64_t m_samplingPeriod;
+
+ uint32_t m_threadCtx;
+ int64_t m_refTimeThread;
+ int64_t m_refTimeSerial;
+ int64_t m_refTimeCtx;
+ int64_t m_refTimeGpu;
+
+ void* m_stream; // LZ4_stream_t*
+ char* m_buffer;
+ int m_bufferOffset;
+ int m_bufferStart;
+
+ char* m_lz4Buf;
+
+ FastVector<QueueItem> m_serialQueue, m_serialDequeue;
+ TracyMutex m_serialLock;
+
+#ifndef TRACY_NO_FRAME_IMAGE
+ FastVector<FrameImageQueueItem> m_fiQueue, m_fiDequeue;
+ TracyMutex m_fiLock;
+#endif
+
+ SPSCQueue<SymbolQueueItem> m_symbolQueue;
+
+ std::atomic<uint64_t> m_frameCount;
+ std::atomic<bool> m_isConnected;
+#ifdef TRACY_ON_DEMAND
+ std::atomic<uint64_t> m_connectionId;
+
+ TracyMutex m_deferredLock;
+ FastVector<QueueItem> m_deferredQueue;
+#endif
+
+#ifdef TRACY_HAS_SYSTIME
+ void ProcessSysTime();
+
+ SysTime m_sysTime;
+ uint64_t m_sysTimeLast = 0;
+#else
+ void ProcessSysTime() {}
+#endif
+
+ ParameterCallback m_paramCallback;
+
+ char* m_queryData;
+ char* m_queryDataPtr;
+
+#if defined _WIN32
+ void* m_exceptionHandler;
+#endif
+#ifdef __linux__
+ struct {
+ struct sigaction pwr, ill, fpe, segv, pipe, bus, abrt;
+ } m_prevSignal;
+#endif
+ bool m_crashHandlerInstalled;
+};
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyRingBuffer.hpp b/3rdparty/tracy/tracy/client/TracyRingBuffer.hpp
new file mode 100644
index 0000000..bc90511
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyRingBuffer.hpp
@@ -0,0 +1,131 @@
+#include <errno.h>
+
+namespace tracy
+{
+
+class RingBuffer
+{
+public:
+ RingBuffer( unsigned int size, int fd, int id, int cpu = -1 )
+ : m_size( size )
+ , m_id( id )
+ , m_cpu( cpu )
+ , m_fd( fd )
+ {
+ const auto pageSize = uint32_t( getpagesize() );
+ assert( size >= pageSize );
+ assert( __builtin_popcount( size ) == 1 );
+ m_mapSize = size + pageSize;
+ auto mapAddr = mmap( nullptr, m_mapSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
+ if( mapAddr == MAP_FAILED )
+ {
+ TracyDebug( "mmap failed: errno %i (%s)\n", errno, strerror( errno ) );
+ m_fd = 0;
+ m_metadata = nullptr;
+ close( fd );
+ return;
+ }
+ m_metadata = (perf_event_mmap_page*)mapAddr;
+ assert( m_metadata->data_offset == pageSize );
+ m_buffer = ((char*)mapAddr) + pageSize;
+ m_tail = m_metadata->data_tail;
+ }
+
+ ~RingBuffer()
+ {
+ if( m_metadata ) munmap( m_metadata, m_mapSize );
+ if( m_fd ) close( m_fd );
+ }
+
+ RingBuffer( const RingBuffer& ) = delete;
+ RingBuffer& operator=( const RingBuffer& ) = delete;
+
+ RingBuffer( RingBuffer&& other )
+ {
+ memcpy( (char*)&other, (char*)this, sizeof( RingBuffer ) );
+ m_metadata = nullptr;
+ m_fd = 0;
+ }
+
+ RingBuffer& operator=( RingBuffer&& other )
+ {
+ memcpy( (char*)&other, (char*)this, sizeof( RingBuffer ) );
+ m_metadata = nullptr;
+ m_fd = 0;
+ return *this;
+ }
+
+ bool IsValid() const { return m_metadata != nullptr; }
+ int GetId() const { return m_id; }
+ int GetCpu() const { return m_cpu; }
+
+ void Enable()
+ {
+ ioctl( m_fd, PERF_EVENT_IOC_ENABLE, 0 );
+ }
+
+ void Read( void* dst, uint64_t offset, uint64_t cnt )
+ {
+ const auto size = m_size;
+ auto src = ( m_tail + offset ) % size;
+ if( src + cnt <= size )
+ {
+ memcpy( dst, m_buffer + src, cnt );
+ }
+ else
+ {
+ const auto s0 = size - src;
+ const auto buf = m_buffer;
+ memcpy( dst, buf + src, s0 );
+ memcpy( (char*)dst + s0, buf, cnt - s0 );
+ }
+ }
+
+ void Advance( uint64_t cnt )
+ {
+ m_tail += cnt;
+ StoreTail();
+ }
+
+ bool CheckTscCaps() const
+ {
+ return m_metadata->cap_user_time_zero;
+ }
+
+ int64_t ConvertTimeToTsc( int64_t timestamp ) const
+ {
+ if( !m_metadata->cap_user_time_zero ) return 0;
+ const auto time = timestamp - m_metadata->time_zero;
+ const auto quot = time / m_metadata->time_mult;
+ const auto rem = time % m_metadata->time_mult;
+ return ( quot << m_metadata->time_shift ) + ( rem << m_metadata->time_shift ) / m_metadata->time_mult;
+ }
+
+ uint64_t LoadHead() const
+ {
+ return std::atomic_load_explicit( (const volatile std::atomic<uint64_t>*)&m_metadata->data_head, std::memory_order_acquire );
+ }
+
+ uint64_t GetTail() const
+ {
+ return m_tail;
+ }
+
+private:
+ void StoreTail()
+ {
+ std::atomic_store_explicit( (volatile std::atomic<uint64_t>*)&m_metadata->data_tail, m_tail, std::memory_order_release );
+ }
+
+ unsigned int m_size;
+ uint64_t m_tail;
+ char* m_buffer;
+ int m_id;
+ int m_cpu;
+ perf_event_mmap_page* m_metadata;
+
+ size_t m_mapSize;
+ int m_fd;
+};
+
+}
diff --git a/3rdparty/tracy/tracy/client/TracyScoped.hpp b/3rdparty/tracy/tracy/client/TracyScoped.hpp
new file mode 100644
index 0000000..3c8105e
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyScoped.hpp
@@ -0,0 +1,175 @@
+#ifndef __TRACYSCOPED_HPP__
+#define __TRACYSCOPED_HPP__
+
+#include <limits>
+#include <stdint.h>
+#include <string.h>
+
+#include "../common/TracySystem.hpp"
+#include "../common/TracyAlign.hpp"
+#include "../common/TracyAlloc.hpp"
+#include "TracyProfiler.hpp"
+
+namespace tracy
+{
+
+class ScopedZone
+{
+public:
+ ScopedZone( const ScopedZone& ) = delete;
+ ScopedZone( ScopedZone&& ) = delete;
+ ScopedZone& operator=( const ScopedZone& ) = delete;
+ ScopedZone& operator=( ScopedZone&& ) = delete;
+
+ tracy_force_inline ScopedZone( const SourceLocationData* srcloc, bool is_active = true )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ m_connectionId = GetProfiler().ConnectionId();
+#endif
+ TracyQueuePrepare( QueueType::ZoneBegin );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, (uint64_t)srcloc );
+ TracyQueueCommit( zoneBeginThread );
+ }
+
+ tracy_force_inline ScopedZone( const SourceLocationData* srcloc, int depth, bool is_active = true )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ m_connectionId = GetProfiler().ConnectionId();
+#endif
+ GetProfiler().SendCallstack( depth );
+
+ TracyQueuePrepare( QueueType::ZoneBeginCallstack );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, (uint64_t)srcloc );
+ TracyQueueCommit( zoneBeginThread );
+ }
+
+ tracy_force_inline ScopedZone( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, bool is_active = true )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ m_connectionId = GetProfiler().ConnectionId();
+#endif
+ TracyQueuePrepare( QueueType::ZoneBeginAllocSrcLoc );
+ const auto srcloc = Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, srcloc );
+ TracyQueueCommit( zoneBeginThread );
+ }
+
+ tracy_force_inline ScopedZone( uint32_t line, const char* source, size_t sourceSz, const char* function, size_t functionSz, const char* name, size_t nameSz, int depth, bool is_active = true )
+#ifdef TRACY_ON_DEMAND
+ : m_active( is_active && GetProfiler().IsConnected() )
+#else
+ : m_active( is_active )
+#endif
+ {
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ m_connectionId = GetProfiler().ConnectionId();
+#endif
+ GetProfiler().SendCallstack( depth );
+
+ TracyQueuePrepare( QueueType::ZoneBeginAllocSrcLocCallstack );
+ const auto srcloc = Profiler::AllocSourceLocation( line, source, sourceSz, function, functionSz, name, nameSz );
+ MemWrite( &item->zoneBegin.time, Profiler::GetTime() );
+ MemWrite( &item->zoneBegin.srcloc, srcloc );
+ TracyQueueCommit( zoneBeginThread );
+ }
+
+ tracy_force_inline ~ScopedZone()
+ {
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ if( GetProfiler().ConnectionId() != m_connectionId ) return;
+#endif
+ TracyQueuePrepare( QueueType::ZoneEnd );
+ MemWrite( &item->zoneEnd.time, Profiler::GetTime() );
+ TracyQueueCommit( zoneEndThread );
+ }
+
+ tracy_force_inline void Text( const char* txt, size_t size )
+ {
+ assert( size < std::numeric_limits<uint16_t>::max() );
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ if( GetProfiler().ConnectionId() != m_connectionId ) return;
+#endif
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, txt, size );
+ TracyQueuePrepare( QueueType::ZoneText );
+ MemWrite( &item->zoneTextFat.text, (uint64_t)ptr );
+ MemWrite( &item->zoneTextFat.size, (uint16_t)size );
+ TracyQueueCommit( zoneTextFatThread );
+ }
+
+ tracy_force_inline void Name( const char* txt, size_t size )
+ {
+ assert( size < std::numeric_limits<uint16_t>::max() );
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ if( GetProfiler().ConnectionId() != m_connectionId ) return;
+#endif
+ auto ptr = (char*)tracy_malloc( size );
+ memcpy( ptr, txt, size );
+ TracyQueuePrepare( QueueType::ZoneName );
+ MemWrite( &item->zoneTextFat.text, (uint64_t)ptr );
+ MemWrite( &item->zoneTextFat.size, (uint16_t)size );
+ TracyQueueCommit( zoneTextFatThread );
+ }
+
+ tracy_force_inline void Color( uint32_t color )
+ {
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ if( GetProfiler().ConnectionId() != m_connectionId ) return;
+#endif
+ TracyQueuePrepare( QueueType::ZoneColor );
+ MemWrite( &item->zoneColor.r, uint8_t( ( color ) & 0xFF ) );
+ MemWrite( &item->zoneColor.g, uint8_t( ( color >> 8 ) & 0xFF ) );
+ MemWrite( &item->zoneColor.b, uint8_t( ( color >> 16 ) & 0xFF ) );
+ TracyQueueCommit( zoneColorThread );
+ }
+
+ tracy_force_inline void Value( uint64_t value )
+ {
+ if( !m_active ) return;
+#ifdef TRACY_ON_DEMAND
+ if( GetProfiler().ConnectionId() != m_connectionId ) return;
+#endif
+ TracyQueuePrepare( QueueType::ZoneValue );
+ MemWrite( &item->zoneValue.value, value );
+ TracyQueueCommit( zoneValueThread );
+ }
+
+ tracy_force_inline bool IsActive() const { return m_active; }
+
+private:
+ const bool m_active;
+
+#ifdef TRACY_ON_DEMAND
+ uint64_t m_connectionId;
+#endif
+};
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyStringHelpers.hpp b/3rdparty/tracy/tracy/client/TracyStringHelpers.hpp
new file mode 100644
index 0000000..7f9efbe
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyStringHelpers.hpp
@@ -0,0 +1,50 @@
+#ifndef __TRACYSTRINGHELPERS_HPP__
+#define __TRACYSTRINGHELPERS_HPP__
+
+#include <assert.h>
+#include <string.h>
+
+#include "../common/TracyAlloc.hpp"
+
+namespace tracy
+{
+
+static inline char* CopyString( const char* src, size_t sz )
+{
+ assert( strlen( src ) == sz );
+ auto dst = (char*)tracy_malloc( sz + 1 );
+ memcpy( dst, src, sz );
+ dst[sz] = '\0';
+ return dst;
+}
+
+static inline char* CopyString( const char* src )
+{
+ const auto sz = strlen( src );
+ auto dst = (char*)tracy_malloc( sz + 1 );
+ memcpy( dst, src, sz );
+ dst[sz] = '\0';
+ return dst;
+}
+
+static inline char* CopyStringFast( const char* src, size_t sz )
+{
+ assert( strlen( src ) == sz );
+ auto dst = (char*)tracy_malloc_fast( sz + 1 );
+ memcpy( dst, src, sz );
+ dst[sz] = '\0';
+ return dst;
+}
+
+static inline char* CopyStringFast( const char* src )
+{
+ const auto sz = strlen( src );
+ auto dst = (char*)tracy_malloc_fast( sz + 1 );
+ memcpy( dst, src, sz );
+ dst[sz] = '\0';
+ return dst;
+}
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracySysTime.cpp b/3rdparty/tracy/tracy/client/TracySysTime.cpp
new file mode 100644
index 0000000..b690a91
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracySysTime.cpp
@@ -0,0 +1,108 @@
+#include "TracySysTime.hpp"
+
+#ifdef TRACY_HAS_SYSTIME
+
+# if defined _WIN32
+# include <windows.h>
+# elif defined __linux__
+# include <stdio.h>
+# include <inttypes.h>
+# elif defined __APPLE__
+# include <mach/mach_host.h>
+# include <mach/host_info.h>
+# elif defined BSD
+# include <sys/types.h>
+# include <sys/sysctl.h>
+# endif
+
+namespace tracy
+{
+
+# if defined _WIN32
+
+static inline uint64_t ConvertTime( const FILETIME& t )
+{
+ return ( uint64_t( t.dwHighDateTime ) << 32 ) | uint64_t( t.dwLowDateTime );
+}
+
+void SysTime::ReadTimes()
+{
+ FILETIME idleTime;
+ FILETIME kernelTime;
+ FILETIME userTime;
+
+ GetSystemTimes( &idleTime, &kernelTime, &userTime );
+
+ idle = ConvertTime( idleTime );
+ const auto kernel = ConvertTime( kernelTime );
+ const auto user = ConvertTime( userTime );
+ used = kernel + user;
+}
+
+# elif defined __linux__
+
+void SysTime::ReadTimes()
+{
+ uint64_t user, nice, system;
+ FILE* f = fopen( "/proc/stat", "r" );
+ if( f )
+ {
+ int read = fscanf( f, "cpu %" PRIu64 " %" PRIu64 " %" PRIu64" %" PRIu64, &user, &nice, &system, &idle );
+ fclose( f );
+ if (read == 4)
+ {
+ used = user + nice + system;
+ }
+ }
+}
+
+# elif defined __APPLE__
+
+void SysTime::ReadTimes()
+{
+ host_cpu_load_info_data_t info;
+ mach_msg_type_number_t cnt = HOST_CPU_LOAD_INFO_COUNT;
+ host_statistics( mach_host_self(), HOST_CPU_LOAD_INFO, reinterpret_cast<host_info_t>( &info ), &cnt );
+ used = info.cpu_ticks[CPU_STATE_USER] + info.cpu_ticks[CPU_STATE_NICE] + info.cpu_ticks[CPU_STATE_SYSTEM];
+ idle = info.cpu_ticks[CPU_STATE_IDLE];
+}
+
+# elif defined BSD
+
+void SysTime::ReadTimes()
+{
+ u_long data[5];
+ size_t sz = sizeof( data );
+ sysctlbyname( "kern.cp_time", &data, &sz, nullptr, 0 );
+ used = data[0] + data[1] + data[2] + data[3];
+ idle = data[4];
+}
+
+#endif
+
+SysTime::SysTime()
+{
+ ReadTimes();
+}
+
+float SysTime::Get()
+{
+ const auto oldUsed = used;
+ const auto oldIdle = idle;
+
+ ReadTimes();
+
+ const auto diffIdle = idle - oldIdle;
+ const auto diffUsed = used - oldUsed;
+
+#if defined _WIN32
+ return diffUsed == 0 ? -1 : ( diffUsed - diffIdle ) * 100.f / diffUsed;
+#elif defined __linux__ || defined __APPLE__ || defined BSD
+ const auto total = diffUsed + diffIdle;
+ return total == 0 ? -1 : diffUsed * 100.f / total;
+#endif
+}
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracySysTime.hpp b/3rdparty/tracy/tracy/client/TracySysTime.hpp
new file mode 100644
index 0000000..cb5ebe7
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracySysTime.hpp
@@ -0,0 +1,36 @@
+#ifndef __TRACYSYSTIME_HPP__
+#define __TRACYSYSTIME_HPP__
+
+#if defined _WIN32 || defined __linux__ || defined __APPLE__
+# define TRACY_HAS_SYSTIME
+#else
+# include <sys/param.h>
+#endif
+
+#ifdef BSD
+# define TRACY_HAS_SYSTIME
+#endif
+
+#ifdef TRACY_HAS_SYSTIME
+
+#include <stdint.h>
+
+namespace tracy
+{
+
+class SysTime
+{
+public:
+ SysTime();
+ float Get();
+
+ void ReadTimes();
+
+private:
+ uint64_t idle, used;
+};
+
+}
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracySysTrace.cpp b/3rdparty/tracy/tracy/client/TracySysTrace.cpp
new file mode 100644
index 0000000..cb45509
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracySysTrace.cpp
@@ -0,0 +1,1489 @@
+#include "TracyDebug.hpp"
+#include "TracyStringHelpers.hpp"
+#include "TracySysTrace.hpp"
+#include "../common/TracySystem.hpp"
+
+#ifdef TRACY_HAS_SYSTEM_TRACING
+
+#ifndef TRACY_SAMPLING_HZ
+# if defined _WIN32
+# define TRACY_SAMPLING_HZ 8000
+# elif defined __linux__
+# define TRACY_SAMPLING_HZ 10000
+# endif
+#endif
+
+namespace tracy
+{
+
+static constexpr int GetSamplingFrequency()
+{
+#if defined _WIN32
+ return TRACY_SAMPLING_HZ > 8000 ? 8000 : ( TRACY_SAMPLING_HZ < 1 ? 1 : TRACY_SAMPLING_HZ );
+#else
+ return TRACY_SAMPLING_HZ > 1000000 ? 1000000 : ( TRACY_SAMPLING_HZ < 1 ? 1 : TRACY_SAMPLING_HZ );
+#endif
+}
+
+static constexpr int GetSamplingPeriod()
+{
+ return 1000000000 / GetSamplingFrequency();
+}
+
+}
+
+# if defined _WIN32
+
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+
+# define INITGUID
+# include <assert.h>
+# include <string.h>
+# include <windows.h>
+# include <dbghelp.h>
+# include <evntrace.h>
+# include <evntcons.h>
+# include <psapi.h>
+# include <winternl.h>
+
+# include "../common/TracyAlloc.hpp"
+# include "../common/TracySystem.hpp"
+# include "TracyProfiler.hpp"
+# include "TracyThread.hpp"
+
+namespace tracy
+{
+
+static const GUID PerfInfoGuid = { 0xce1dbfb4, 0x137e, 0x4da6, { 0x87, 0xb0, 0x3f, 0x59, 0xaa, 0x10, 0x2c, 0xbc } };
+static const GUID DxgKrnlGuid = { 0x802ec45a, 0x1e99, 0x4b83, { 0x99, 0x20, 0x87, 0xc9, 0x82, 0x77, 0xba, 0x9d } };
+static const GUID ThreadV2Guid = { 0x3d6fa8d1, 0xfe05, 0x11d0, { 0x9d, 0xda, 0x00, 0xc0, 0x4f, 0xd7, 0xba, 0x7c } };
+
+
+static TRACEHANDLE s_traceHandle;
+static TRACEHANDLE s_traceHandle2;
+static EVENT_TRACE_PROPERTIES* s_prop;
+static DWORD s_pid;
+
+static EVENT_TRACE_PROPERTIES* s_propVsync;
+static TRACEHANDLE s_traceHandleVsync;
+static TRACEHANDLE s_traceHandleVsync2;
+Thread* s_threadVsync = nullptr;
+
+struct CSwitch
+{
+ uint32_t newThreadId;
+ uint32_t oldThreadId;
+ int8_t newThreadPriority;
+ int8_t oldThreadPriority;
+ uint8_t previousCState;
+ int8_t spareByte;
+ int8_t oldThreadWaitReason;
+ int8_t oldThreadWaitMode;
+ int8_t oldThreadState;
+ int8_t oldThreadWaitIdealProcessor;
+ uint32_t newThreadWaitTime;
+ uint32_t reserved;
+};
+
+struct ReadyThread
+{
+ uint32_t threadId;
+ int8_t adjustReason;
+ int8_t adjustIncrement;
+ int8_t flag;
+ int8_t reserverd;
+};
+
+struct ThreadTrace
+{
+ uint32_t processId;
+ uint32_t threadId;
+ uint32_t stackBase;
+ uint32_t stackLimit;
+ uint32_t userStackBase;
+ uint32_t userStackLimit;
+ uint32_t startAddr;
+ uint32_t win32StartAddr;
+ uint32_t tebBase;
+ uint32_t subProcessTag;
+};
+
+struct StackWalkEvent
+{
+ uint64_t eventTimeStamp;
+ uint32_t stackProcess;
+ uint32_t stackThread;
+ uint64_t stack[192];
+};
+
+struct VSyncInfo
+{
+ void* dxgAdapter;
+ uint32_t vidPnTargetId;
+ uint64_t scannedPhysicalAddress;
+ uint32_t vidPnSourceId;
+ uint32_t frameNumber;
+ int64_t frameQpcTime;
+ void* hFlipDevice;
+ uint32_t flipType;
+ uint64_t flipFenceId;
+};
+
+extern "C" typedef NTSTATUS (WINAPI *t_NtQueryInformationThread)( HANDLE, THREADINFOCLASS, PVOID, ULONG, PULONG );
+extern "C" typedef BOOL (WINAPI *t_EnumProcessModules)( HANDLE, HMODULE*, DWORD, LPDWORD );
+extern "C" typedef BOOL (WINAPI *t_GetModuleInformation)( HANDLE, HMODULE, LPMODULEINFO, DWORD );
+extern "C" typedef DWORD (WINAPI *t_GetModuleBaseNameA)( HANDLE, HMODULE, LPSTR, DWORD );
+extern "C" typedef HRESULT (WINAPI *t_GetThreadDescription)( HANDLE, PWSTR* );
+
+t_NtQueryInformationThread NtQueryInformationThread = (t_NtQueryInformationThread)GetProcAddress( GetModuleHandleA( "ntdll.dll" ), "NtQueryInformationThread" );
+t_EnumProcessModules _EnumProcessModules = (t_EnumProcessModules)GetProcAddress( GetModuleHandleA( "kernel32.dll" ), "K32EnumProcessModules" );
+t_GetModuleInformation _GetModuleInformation = (t_GetModuleInformation)GetProcAddress( GetModuleHandleA( "kernel32.dll" ), "K32GetModuleInformation" );
+t_GetModuleBaseNameA _GetModuleBaseNameA = (t_GetModuleBaseNameA)GetProcAddress( GetModuleHandleA( "kernel32.dll" ), "K32GetModuleBaseNameA" );
+
+static t_GetThreadDescription _GetThreadDescription = 0;
+
+
+void WINAPI EventRecordCallback( PEVENT_RECORD record )
+{
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+
+ const auto& hdr = record->EventHeader;
+ switch( hdr.ProviderId.Data1 )
+ {
+ case 0x3d6fa8d1: // Thread Guid
+ if( hdr.EventDescriptor.Opcode == 36 )
+ {
+ const auto cswitch = (const CSwitch*)record->UserData;
+
+ TracyLfqPrepare( QueueType::ContextSwitch );
+ MemWrite( &item->contextSwitch.time, hdr.TimeStamp.QuadPart );
+ MemWrite( &item->contextSwitch.oldThread, cswitch->oldThreadId );
+ MemWrite( &item->contextSwitch.newThread, cswitch->newThreadId );
+ MemWrite( &item->contextSwitch.cpu, record->BufferContext.ProcessorNumber );
+ MemWrite( &item->contextSwitch.reason, cswitch->oldThreadWaitReason );
+ MemWrite( &item->contextSwitch.state, cswitch->oldThreadState );
+ TracyLfqCommit;
+ }
+ else if( hdr.EventDescriptor.Opcode == 50 )
+ {
+ const auto rt = (const ReadyThread*)record->UserData;
+
+ TracyLfqPrepare( QueueType::ThreadWakeup );
+ MemWrite( &item->threadWakeup.time, hdr.TimeStamp.QuadPart );
+ MemWrite( &item->threadWakeup.thread, rt->threadId );
+ TracyLfqCommit;
+ }
+ else if( hdr.EventDescriptor.Opcode == 1 || hdr.EventDescriptor.Opcode == 3 )
+ {
+ const auto tt = (const ThreadTrace*)record->UserData;
+
+ uint64_t tid = tt->threadId;
+ if( tid == 0 ) return;
+ uint64_t pid = tt->processId;
+ TracyLfqPrepare( QueueType::TidToPid );
+ MemWrite( &item->tidToPid.tid, tid );
+ MemWrite( &item->tidToPid.pid, pid );
+ TracyLfqCommit;
+ }
+ break;
+ case 0xdef2fe46: // StackWalk Guid
+ if( hdr.EventDescriptor.Opcode == 32 )
+ {
+ const auto sw = (const StackWalkEvent*)record->UserData;
+ if( sw->stackProcess == s_pid )
+ {
+ const uint64_t sz = ( record->UserDataLength - 16 ) / 8;
+ if( sz > 0 )
+ {
+ auto trace = (uint64_t*)tracy_malloc( ( 1 + sz ) * sizeof( uint64_t ) );
+ memcpy( trace, &sz, sizeof( uint64_t ) );
+ memcpy( trace+1, sw->stack, sizeof( uint64_t ) * sz );
+ TracyLfqPrepare( QueueType::CallstackSample );
+ MemWrite( &item->callstackSampleFat.time, sw->eventTimeStamp );
+ MemWrite( &item->callstackSampleFat.thread, sw->stackThread );
+ MemWrite( &item->callstackSampleFat.ptr, (uint64_t)trace );
+ TracyLfqCommit;
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static constexpr const char* VsyncName[] = {
+ "[0] Vsync",
+ "[1] Vsync",
+ "[2] Vsync",
+ "[3] Vsync",
+ "[4] Vsync",
+ "[5] Vsync",
+ "[6] Vsync",
+ "[7] Vsync",
+ "Vsync"
+};
+
+static uint32_t VsyncTarget[8] = {};
+
+void WINAPI EventRecordCallbackVsync( PEVENT_RECORD record )
+{
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() ) return;
+#endif
+
+ const auto& hdr = record->EventHeader;
+ assert( hdr.ProviderId.Data1 == 0x802EC45A );
+ assert( hdr.EventDescriptor.Id == 0x0011 );
+
+ const auto vs = (const VSyncInfo*)record->UserData;
+
+ int idx = 0;
+ do
+ {
+ if( VsyncTarget[idx] == 0 )
+ {
+ VsyncTarget[idx] = vs->vidPnTargetId;
+ break;
+ }
+ else if( VsyncTarget[idx] == vs->vidPnTargetId )
+ {
+ break;
+ }
+ }
+ while( ++idx < 8 );
+
+ TracyLfqPrepare( QueueType::FrameMarkMsg );
+ MemWrite( &item->frameMark.time, hdr.TimeStamp.QuadPart );
+ MemWrite( &item->frameMark.name, uint64_t( VsyncName[idx] ) );
+ TracyLfqCommit;
+}
+
+static void SetupVsync()
+{
+#if _WIN32_WINNT >= _WIN32_WINNT_WINBLUE
+ const auto psz = sizeof( EVENT_TRACE_PROPERTIES ) + MAX_PATH;
+ s_propVsync = (EVENT_TRACE_PROPERTIES*)tracy_malloc( psz );
+ memset( s_propVsync, 0, sizeof( EVENT_TRACE_PROPERTIES ) );
+ s_propVsync->LogFileMode = EVENT_TRACE_REAL_TIME_MODE;
+ s_propVsync->Wnode.BufferSize = psz;
+#ifdef TRACY_TIMER_QPC
+ s_propVsync->Wnode.ClientContext = 1;
+#else
+ s_propVsync->Wnode.ClientContext = 3;
+#endif
+ s_propVsync->LoggerNameOffset = sizeof( EVENT_TRACE_PROPERTIES );
+ strcpy( ((char*)s_propVsync) + sizeof( EVENT_TRACE_PROPERTIES ), "TracyVsync" );
+
+ auto backup = tracy_malloc( psz );
+ memcpy( backup, s_propVsync, psz );
+
+ const auto controlStatus = ControlTraceA( 0, "TracyVsync", s_propVsync, EVENT_TRACE_CONTROL_STOP );
+ if( controlStatus != ERROR_SUCCESS && controlStatus != ERROR_WMI_INSTANCE_NOT_FOUND )
+ {
+ tracy_free( backup );
+ tracy_free( s_propVsync );
+ return;
+ }
+
+ memcpy( s_propVsync, backup, psz );
+ tracy_free( backup );
+
+ const auto startStatus = StartTraceA( &s_traceHandleVsync, "TracyVsync", s_propVsync );
+ if( startStatus != ERROR_SUCCESS )
+ {
+ tracy_free( s_propVsync );
+ return;
+ }
+
+ EVENT_FILTER_EVENT_ID fe = {};
+ fe.FilterIn = TRUE;
+ fe.Count = 1;
+ fe.Events[0] = 0x0011; // VSyncDPC_Info
+
+ EVENT_FILTER_DESCRIPTOR desc = {};
+ desc.Ptr = (ULONGLONG)&fe;
+ desc.Size = sizeof( fe );
+ desc.Type = EVENT_FILTER_TYPE_EVENT_ID;
+
+ ENABLE_TRACE_PARAMETERS params = {};
+ params.Version = ENABLE_TRACE_PARAMETERS_VERSION_2;
+ params.EnableProperty = EVENT_ENABLE_PROPERTY_IGNORE_KEYWORD_0;
+ params.SourceId = s_propVsync->Wnode.Guid;
+ params.EnableFilterDesc = &desc;
+ params.FilterDescCount = 1;
+
+ uint64_t mask = 0x4000000000000001; // Microsoft_Windows_DxgKrnl_Performance | Base
+ if( EnableTraceEx2( s_traceHandleVsync, &DxgKrnlGuid, EVENT_CONTROL_CODE_ENABLE_PROVIDER, TRACE_LEVEL_INFORMATION, mask, mask, 0, &params ) != ERROR_SUCCESS )
+ {
+ tracy_free( s_propVsync );
+ return;
+ }
+
+ char loggerName[MAX_PATH];
+ strcpy( loggerName, "TracyVsync" );
+
+ EVENT_TRACE_LOGFILEA log = {};
+ log.LoggerName = loggerName;
+ log.ProcessTraceMode = PROCESS_TRACE_MODE_REAL_TIME | PROCESS_TRACE_MODE_EVENT_RECORD | PROCESS_TRACE_MODE_RAW_TIMESTAMP;
+ log.EventRecordCallback = EventRecordCallbackVsync;
+
+ s_traceHandleVsync2 = OpenTraceA( &log );
+ if( s_traceHandleVsync2 == (TRACEHANDLE)INVALID_HANDLE_VALUE )
+ {
+ CloseTrace( s_traceHandleVsync );
+ tracy_free( s_propVsync );
+ return;
+ }
+
+ s_threadVsync = (Thread*)tracy_malloc( sizeof( Thread ) );
+ new(s_threadVsync) Thread( [] (void*) {
+ ThreadExitHandler threadExitHandler;
+ SetThreadPriority( GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL );
+ SetThreadName( "Tracy Vsync" );
+ ProcessTrace( &s_traceHandleVsync2, 1, nullptr, nullptr );
+ }, nullptr );
+#endif
+}
+
+static constexpr int GetSamplingInterval()
+{
+ return GetSamplingPeriod() / 100;
+}
+
+bool SysTraceStart( int64_t& samplingPeriod )
+{
+ if( !_GetThreadDescription ) _GetThreadDescription = (t_GetThreadDescription)GetProcAddress( GetModuleHandleA( "kernel32.dll" ), "GetThreadDescription" );
+
+ s_pid = GetCurrentProcessId();
+
+#if defined _WIN64
+ constexpr bool isOs64Bit = true;
+#else
+ BOOL _iswow64;
+ IsWow64Process( GetCurrentProcess(), &_iswow64 );
+ const bool isOs64Bit = _iswow64;
+#endif
+
+ TOKEN_PRIVILEGES priv = {};
+ priv.PrivilegeCount = 1;
+ priv.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+ if( LookupPrivilegeValue( nullptr, SE_SYSTEM_PROFILE_NAME, &priv.Privileges[0].Luid ) == 0 ) return false;
+
+ HANDLE pt;
+ if( OpenProcessToken( GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &pt ) == 0 ) return false;
+ const auto adjust = AdjustTokenPrivileges( pt, FALSE, &priv, 0, nullptr, nullptr );
+ CloseHandle( pt );
+ if( adjust == 0 ) return false;
+ const auto status = GetLastError();
+ if( status != ERROR_SUCCESS ) return false;
+
+ if( isOs64Bit )
+ {
+ TRACE_PROFILE_INTERVAL interval = {};
+ interval.Interval = GetSamplingInterval();
+ const auto intervalStatus = TraceSetInformation( 0, TraceSampledProfileIntervalInfo, &interval, sizeof( interval ) );
+ if( intervalStatus != ERROR_SUCCESS ) return false;
+ samplingPeriod = GetSamplingPeriod();
+ }
+
+ const auto psz = sizeof( EVENT_TRACE_PROPERTIES ) + sizeof( KERNEL_LOGGER_NAME );
+ s_prop = (EVENT_TRACE_PROPERTIES*)tracy_malloc( psz );
+ memset( s_prop, 0, sizeof( EVENT_TRACE_PROPERTIES ) );
+ ULONG flags = 0;
+#ifndef TRACY_NO_CONTEXT_SWITCH
+ flags = EVENT_TRACE_FLAG_CSWITCH | EVENT_TRACE_FLAG_DISPATCHER | EVENT_TRACE_FLAG_THREAD;
+#endif
+#ifndef TRACY_NO_SAMPLING
+ if( isOs64Bit ) flags |= EVENT_TRACE_FLAG_PROFILE;
+#endif
+ s_prop->EnableFlags = flags;
+ s_prop->LogFileMode = EVENT_TRACE_REAL_TIME_MODE;
+ s_prop->Wnode.BufferSize = psz;
+ s_prop->Wnode.Flags = WNODE_FLAG_TRACED_GUID;
+#ifdef TRACY_TIMER_QPC
+ s_prop->Wnode.ClientContext = 1;
+#else
+ s_prop->Wnode.ClientContext = 3;
+#endif
+ s_prop->Wnode.Guid = SystemTraceControlGuid;
+ s_prop->BufferSize = 1024;
+ s_prop->MinimumBuffers = std::thread::hardware_concurrency() * 4;
+ s_prop->MaximumBuffers = std::thread::hardware_concurrency() * 6;
+ s_prop->LoggerNameOffset = sizeof( EVENT_TRACE_PROPERTIES );
+ memcpy( ((char*)s_prop) + sizeof( EVENT_TRACE_PROPERTIES ), KERNEL_LOGGER_NAME, sizeof( KERNEL_LOGGER_NAME ) );
+
+ auto backup = tracy_malloc( psz );
+ memcpy( backup, s_prop, psz );
+
+ const auto controlStatus = ControlTrace( 0, KERNEL_LOGGER_NAME, s_prop, EVENT_TRACE_CONTROL_STOP );
+ if( controlStatus != ERROR_SUCCESS && controlStatus != ERROR_WMI_INSTANCE_NOT_FOUND )
+ {
+ tracy_free( backup );
+ tracy_free( s_prop );
+ return false;
+ }
+
+ memcpy( s_prop, backup, psz );
+ tracy_free( backup );
+
+ const auto startStatus = StartTrace( &s_traceHandle, KERNEL_LOGGER_NAME, s_prop );
+ if( startStatus != ERROR_SUCCESS )
+ {
+ tracy_free( s_prop );
+ return false;
+ }
+
+ if( isOs64Bit )
+ {
+ CLASSIC_EVENT_ID stackId[2] = {};
+ stackId[0].EventGuid = PerfInfoGuid;
+ stackId[0].Type = 46;
+ stackId[1].EventGuid = ThreadV2Guid;
+ stackId[1].Type = 36;
+ const auto stackStatus = TraceSetInformation( s_traceHandle, TraceStackTracingInfo, &stackId, sizeof( stackId ) );
+ if( stackStatus != ERROR_SUCCESS )
+ {
+ tracy_free( s_prop );
+ return false;
+ }
+ }
+
+#ifdef UNICODE
+ WCHAR KernelLoggerName[sizeof( KERNEL_LOGGER_NAME )];
+#else
+ char KernelLoggerName[sizeof( KERNEL_LOGGER_NAME )];
+#endif
+ memcpy( KernelLoggerName, KERNEL_LOGGER_NAME, sizeof( KERNEL_LOGGER_NAME ) );
+ EVENT_TRACE_LOGFILE log = {};
+ log.LoggerName = KernelLoggerName;
+ log.ProcessTraceMode = PROCESS_TRACE_MODE_REAL_TIME | PROCESS_TRACE_MODE_EVENT_RECORD | PROCESS_TRACE_MODE_RAW_TIMESTAMP;
+ log.EventRecordCallback = EventRecordCallback;
+
+ s_traceHandle2 = OpenTrace( &log );
+ if( s_traceHandle2 == (TRACEHANDLE)INVALID_HANDLE_VALUE )
+ {
+ CloseTrace( s_traceHandle );
+ tracy_free( s_prop );
+ return false;
+ }
+
+#ifndef TRACY_NO_VSYNC_CAPTURE
+ SetupVsync();
+#endif
+
+ return true;
+}
+
+void SysTraceStop()
+{
+ if( s_threadVsync )
+ {
+ CloseTrace( s_traceHandleVsync2 );
+ CloseTrace( s_traceHandleVsync );
+ s_threadVsync->~Thread();
+ tracy_free( s_threadVsync );
+ }
+
+ CloseTrace( s_traceHandle2 );
+ CloseTrace( s_traceHandle );
+}
+
+void SysTraceWorker( void* ptr )
+{
+ ThreadExitHandler threadExitHandler;
+ SetThreadPriority( GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL );
+ SetThreadName( "Tracy SysTrace" );
+ ProcessTrace( &s_traceHandle2, 1, 0, 0 );
+ ControlTrace( 0, KERNEL_LOGGER_NAME, s_prop, EVENT_TRACE_CONTROL_STOP );
+ tracy_free( s_prop );
+}
+
+void SysTraceGetExternalName( uint64_t thread, const char*& threadName, const char*& name )
+{
+ bool threadSent = false;
+ auto hnd = OpenThread( THREAD_QUERY_INFORMATION, FALSE, DWORD( thread ) );
+ if( hnd == 0 )
+ {
+ hnd = OpenThread( THREAD_QUERY_LIMITED_INFORMATION, FALSE, DWORD( thread ) );
+ }
+ if( hnd != 0 )
+ {
+ if( _GetThreadDescription )
+ {
+ PWSTR tmp;
+ _GetThreadDescription( hnd, &tmp );
+ char buf[256];
+ if( tmp )
+ {
+ auto ret = wcstombs( buf, tmp, 256 );
+ if( ret != 0 )
+ {
+ threadName = CopyString( buf, ret );
+ threadSent = true;
+ }
+ }
+ }
+ const auto pid = GetProcessIdOfThread( hnd );
+ if( !threadSent && NtQueryInformationThread && _EnumProcessModules && _GetModuleInformation && _GetModuleBaseNameA )
+ {
+ void* ptr;
+ ULONG retlen;
+ auto status = NtQueryInformationThread( hnd, (THREADINFOCLASS)9 /*ThreadQuerySetWin32StartAddress*/, &ptr, sizeof( &ptr ), &retlen );
+ if( status == 0 )
+ {
+ const auto phnd = OpenProcess( PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, pid );
+ if( phnd != INVALID_HANDLE_VALUE )
+ {
+ HMODULE modules[1024];
+ DWORD needed;
+ if( _EnumProcessModules( phnd, modules, 1024 * sizeof( HMODULE ), &needed ) != 0 )
+ {
+ const auto sz = std::min( DWORD( needed / sizeof( HMODULE ) ), DWORD( 1024 ) );
+ for( DWORD i=0; i<sz; i++ )
+ {
+ MODULEINFO info;
+ if( _GetModuleInformation( phnd, modules[i], &info, sizeof( info ) ) != 0 )
+ {
+ if( (uint64_t)ptr >= (uint64_t)info.lpBaseOfDll && (uint64_t)ptr <= (uint64_t)info.lpBaseOfDll + (uint64_t)info.SizeOfImage )
+ {
+ char buf2[1024];
+ const auto modlen = _GetModuleBaseNameA( phnd, modules[i], buf2, 1024 );
+ if( modlen != 0 )
+ {
+ threadName = CopyString( buf2, modlen );
+ threadSent = true;
+ }
+ }
+ }
+ }
+ }
+ CloseHandle( phnd );
+ }
+ }
+ }
+ CloseHandle( hnd );
+ if( !threadSent )
+ {
+ threadName = CopyString( "???", 3 );
+ threadSent = true;
+ }
+ if( pid != 0 )
+ {
+ {
+ uint64_t _pid = pid;
+ TracyLfqPrepare( QueueType::TidToPid );
+ MemWrite( &item->tidToPid.tid, thread );
+ MemWrite( &item->tidToPid.pid, _pid );
+ TracyLfqCommit;
+ }
+ if( pid == 4 )
+ {
+ name = CopyStringFast( "System", 6 );
+ return;
+ }
+ else
+ {
+ const auto phnd = OpenProcess( PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid );
+ if( phnd != INVALID_HANDLE_VALUE )
+ {
+ char buf2[1024];
+ const auto sz = GetProcessImageFileNameA( phnd, buf2, 1024 );
+ CloseHandle( phnd );
+ if( sz != 0 )
+ {
+ auto ptr = buf2 + sz - 1;
+ while( ptr > buf2 && *ptr != '\\' ) ptr--;
+ if( *ptr == '\\' ) ptr++;
+ name = CopyStringFast( ptr );
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ if( !threadSent )
+ {
+ threadName = CopyString( "???", 3 );
+ }
+ name = CopyStringFast( "???", 3 );
+}
+
+}
+
+# elif defined __linux__
+
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <sys/wait.h>
+# include <fcntl.h>
+# include <inttypes.h>
+# include <limits>
+# include <poll.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <string.h>
+# include <unistd.h>
+# include <atomic>
+# include <thread>
+# include <linux/perf_event.h>
+# include <linux/version.h>
+# include <sys/mman.h>
+# include <sys/ioctl.h>
+# include <sys/syscall.h>
+
+# include "TracyProfiler.hpp"
+# include "TracyRingBuffer.hpp"
+# include "TracyThread.hpp"
+
+namespace tracy
+{
+
+static std::atomic<bool> traceActive { false };
+static int s_numCpus = 0;
+static int s_numBuffers = 0;
+static int s_ctxBufferIdx = 0;
+
+static RingBuffer* s_ring = nullptr;
+
+static const int ThreadHashSize = 4 * 1024;
+static uint32_t s_threadHash[ThreadHashSize] = {};
+
+static bool CurrentProcOwnsThread( uint32_t tid )
+{
+ const auto hash = tid & ( ThreadHashSize-1 );
+ const auto hv = s_threadHash[hash];
+ if( hv == tid ) return true;
+ if( hv == -tid ) return false;
+
+ char path[256];
+ sprintf( path, "/proc/self/task/%d", tid );
+ struct stat st;
+ if( stat( path, &st ) == 0 )
+ {
+ s_threadHash[hash] = tid;
+ return true;
+ }
+ else
+ {
+ s_threadHash[hash] = -tid;
+ return false;
+ }
+}
+
+static int perf_event_open( struct perf_event_attr* hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags )
+{
+ return syscall( __NR_perf_event_open, hw_event, pid, cpu, group_fd, flags );
+}
+
+enum TraceEventId
+{
+ EventCallstack,
+ EventCpuCycles,
+ EventInstructionsRetired,
+ EventCacheReference,
+ EventCacheMiss,
+ EventBranchRetired,
+ EventBranchMiss,
+ EventContextSwitch,
+ EventWakeup,
+};
+
+static void ProbePreciseIp( perf_event_attr& pe, unsigned long long config0, unsigned long long config1, pid_t pid )
+{
+ pe.config = config1;
+ pe.precise_ip = 3;
+ while( pe.precise_ip != 0 )
+ {
+ const int fd = perf_event_open( &pe, pid, 0, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ close( fd );
+ break;
+ }
+ pe.precise_ip--;
+ }
+ pe.config = config0;
+ while( pe.precise_ip != 0 )
+ {
+ const int fd = perf_event_open( &pe, pid, 0, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ close( fd );
+ break;
+ }
+ pe.precise_ip--;
+ }
+ TracyDebug( " Probed precise_ip: %i\n", pe.precise_ip );
+}
+
+static void ProbePreciseIp( perf_event_attr& pe, pid_t pid )
+{
+ pe.precise_ip = 3;
+ while( pe.precise_ip != 0 )
+ {
+ const int fd = perf_event_open( &pe, pid, 0, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ close( fd );
+ break;
+ }
+ pe.precise_ip--;
+ }
+ TracyDebug( " Probed precise_ip: %i\n", pe.precise_ip );
+}
+
+static bool IsGenuineIntel()
+{
+#if defined __i386 || defined __x86_64__
+ uint32_t regs[4] = {};
+ __get_cpuid( 0, regs, regs+1, regs+2, regs+3 );
+ char manufacturer[12];
+ memcpy( manufacturer, regs+1, 4 );
+ memcpy( manufacturer+4, regs+3, 4 );
+ memcpy( manufacturer+8, regs+2, 4 );
+ return memcmp( manufacturer, "GenuineIntel", 12 ) == 0;
+#else
+ return false;
+#endif
+}
+
+static const char* ReadFile( const char* path )
+{
+ int fd = open( path, O_RDONLY );
+ if( fd < 0 ) return nullptr;
+
+ static char tmp[64];
+ const auto cnt = read( fd, tmp, 63 );
+ close( fd );
+ if( cnt < 0 ) return nullptr;
+ tmp[cnt] = '\0';
+ return tmp;
+}
+
+bool SysTraceStart( int64_t& samplingPeriod )
+{
+#ifndef CLOCK_MONOTONIC_RAW
+ return false;
+#endif
+
+ int paranoidLevel = 2;
+ const auto paranoidLevelStr = ReadFile( "/proc/sys/kernel/perf_event_paranoid" );
+ if( !paranoidLevelStr ) return false;
+ paranoidLevel = atoi( paranoidLevelStr );
+ TracyDebug( "perf_event_paranoid: %i\n", paranoidLevel );
+
+ int switchId = -1, wakeupId = -1;
+ const auto switchIdStr = ReadFile( "/sys/kernel/debug/tracing/events/sched/sched_switch/id" );
+ if( switchIdStr ) switchId = atoi( switchIdStr );
+ const auto wakeupIdStr = ReadFile( "/sys/kernel/debug/tracing/events/sched/sched_wakeup/id" );
+ if( wakeupIdStr ) wakeupId = atoi( wakeupIdStr );
+
+ TracyDebug( "sched_switch id: %i\nsched_wakeup id: %i\n", switchId, wakeupId );
+
+#ifdef TRACY_NO_SAMPLE_RETIREMENT
+ const bool noRetirement = true;
+#else
+ const char* noRetirementEnv = GetEnvVar( "TRACY_NO_SAMPLE_RETIREMENT" );
+ const bool noRetirement = noRetirementEnv && noRetirementEnv[0] == '1';
+#endif
+
+#ifdef TRACY_NO_SAMPLE_CACHE
+ const bool noCache = true;
+#else
+ const char* noCacheEnv = GetEnvVar( "TRACY_NO_SAMPLE_CACHE" );
+ const bool noCache = noCacheEnv && noCacheEnv[0] == '1';
+#endif
+
+#ifdef TRACY_NO_SAMPLE_BRANCH
+ const bool noBranch = true;
+#else
+ const char* noBranchEnv = GetEnvVar( "TRACY_NO_SAMPLE_BRANCH" );
+ const bool noBranch = noBranchEnv && noBranchEnv[0] == '1';
+#endif
+
+#ifdef TRACY_NO_CONTEXT_SWITCH
+ const bool noCtxSwitch = true;
+#else
+ const char* noCtxSwitchEnv = GetEnvVar( "TRACY_NO_CONTEXT_SWITCH" );
+ const bool noCtxSwitch = noCtxSwitchEnv && noCtxSwitchEnv[0] == '1';
+#endif
+
+ samplingPeriod = GetSamplingPeriod();
+ uint32_t currentPid = (uint32_t)getpid();
+
+ s_numCpus = (int)std::thread::hardware_concurrency();
+
+ const auto maxNumBuffers = s_numCpus * (
+ 1 + // software sampling
+ 2 + // CPU cycles + instructions retired
+ 2 + // cache reference + miss
+ 2 + // branch retired + miss
+ 2 // context switches + wakeups
+ );
+ s_ring = (RingBuffer*)tracy_malloc( sizeof( RingBuffer ) * maxNumBuffers );
+ s_numBuffers = 0;
+
+ // software sampling
+ perf_event_attr pe = {};
+ pe.type = PERF_TYPE_SOFTWARE;
+ pe.size = sizeof( perf_event_attr );
+ pe.config = PERF_COUNT_SW_CPU_CLOCK;
+ pe.sample_freq = GetSamplingFrequency();
+ pe.sample_type = PERF_SAMPLE_TID | PERF_SAMPLE_TIME | PERF_SAMPLE_CALLCHAIN;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 8, 0 )
+ pe.sample_max_stack = 127;
+#endif
+ pe.disabled = 1;
+ pe.freq = 1;
+ pe.inherit = 1;
+#if !defined TRACY_HW_TIMER || !( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 )
+ pe.use_clockid = 1;
+ pe.clockid = CLOCK_MONOTONIC_RAW;
+#endif
+
+ TracyDebug( "Setup software sampling\n" );
+ ProbePreciseIp( pe, currentPid );
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ int fd = perf_event_open( &pe, currentPid, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd == -1 )
+ {
+ pe.exclude_kernel = 1;
+ ProbePreciseIp( pe, currentPid );
+ fd = perf_event_open( &pe, currentPid, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd == -1 ) break;
+ TracyDebug( " No access to kernel samples\n" );
+ }
+ new( s_ring+s_numBuffers ) RingBuffer( 64*1024, fd, EventCallstack );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+
+ // CPU cycles + instructions retired
+ pe = {};
+ pe.type = PERF_TYPE_HARDWARE;
+ pe.size = sizeof( perf_event_attr );
+ pe.sample_freq = 5000;
+ pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TIME;
+ pe.disabled = 1;
+ pe.exclude_kernel = 1;
+ pe.exclude_guest = 1;
+ pe.exclude_hv = 1;
+ pe.freq = 1;
+ pe.inherit = 1;
+#if !defined TRACY_HW_TIMER || !( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 )
+ pe.use_clockid = 1;
+ pe.clockid = CLOCK_MONOTONIC_RAW;
+#endif
+
+ if( !noRetirement )
+ {
+ TracyDebug( "Setup sampling cycles + retirement\n" );
+ ProbePreciseIp( pe, PERF_COUNT_HW_CPU_CYCLES, PERF_COUNT_HW_INSTRUCTIONS, currentPid );
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ const int fd = perf_event_open( &pe, currentPid, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ new( s_ring+s_numBuffers ) RingBuffer( 64*1024, fd, EventCpuCycles );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+ }
+
+ pe.config = PERF_COUNT_HW_INSTRUCTIONS;
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ const int fd = perf_event_open( &pe, currentPid, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ new( s_ring+s_numBuffers ) RingBuffer( 64*1024, fd, EventInstructionsRetired );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+ }
+ }
+
+ // cache reference + miss
+ if( !noCache )
+ {
+ TracyDebug( "Setup sampling CPU cache references + misses\n" );
+ ProbePreciseIp( pe, PERF_COUNT_HW_CACHE_REFERENCES, PERF_COUNT_HW_CACHE_MISSES, currentPid );
+ if( IsGenuineIntel() )
+ {
+ pe.precise_ip = 0;
+ TracyDebug( " CPU is GenuineIntel, forcing precise_ip down to 0\n" );
+ }
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ const int fd = perf_event_open( &pe, currentPid, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ new( s_ring+s_numBuffers ) RingBuffer( 64*1024, fd, EventCacheReference );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+ }
+
+ pe.config = PERF_COUNT_HW_CACHE_MISSES;
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ const int fd = perf_event_open( &pe, currentPid, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ new( s_ring+s_numBuffers ) RingBuffer( 64*1024, fd, EventCacheMiss );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+ }
+ }
+
+ // branch retired + miss
+ if( !noBranch )
+ {
+ TracyDebug( "Setup sampling CPU branch retirements + misses\n" );
+ ProbePreciseIp( pe, PERF_COUNT_HW_BRANCH_INSTRUCTIONS, PERF_COUNT_HW_BRANCH_MISSES, currentPid );
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ const int fd = perf_event_open( &pe, currentPid, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ new( s_ring+s_numBuffers ) RingBuffer( 64*1024, fd, EventBranchRetired );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+ }
+
+ pe.config = PERF_COUNT_HW_BRANCH_MISSES;
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ const int fd = perf_event_open( &pe, currentPid, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ new( s_ring+s_numBuffers ) RingBuffer( 64*1024, fd, EventBranchMiss );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+ }
+ }
+
+ s_ctxBufferIdx = s_numBuffers;
+
+ // context switches
+ if( !noCtxSwitch && switchId != -1 )
+ {
+ pe = {};
+ pe.type = PERF_TYPE_TRACEPOINT;
+ pe.size = sizeof( perf_event_attr );
+ pe.sample_period = 1;
+ pe.sample_type = PERF_SAMPLE_TIME | PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 8, 0 )
+ pe.sample_max_stack = 127;
+#endif
+ pe.disabled = 1;
+ pe.inherit = 1;
+ pe.config = switchId;
+#if !defined TRACY_HW_TIMER || !( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 )
+ pe.use_clockid = 1;
+ pe.clockid = CLOCK_MONOTONIC_RAW;
+#endif
+
+ TracyDebug( "Setup context switch capture\n" );
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ const int fd = perf_event_open( &pe, -1, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ new( s_ring+s_numBuffers ) RingBuffer( 256*1024, fd, EventContextSwitch, i );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+ }
+
+ if( wakeupId != -1 )
+ {
+ pe.config = wakeupId;
+ pe.config &= ~PERF_SAMPLE_CALLCHAIN;
+
+ TracyDebug( "Setup wakeup capture\n" );
+ for( int i=0; i<s_numCpus; i++ )
+ {
+ const int fd = perf_event_open( &pe, -1, i, -1, PERF_FLAG_FD_CLOEXEC );
+ if( fd != -1 )
+ {
+ new( s_ring+s_numBuffers ) RingBuffer( 64*1024, fd, EventWakeup, i );
+ s_numBuffers++;
+ TracyDebug( " Core %i ok\n", i );
+ }
+ }
+ }
+ }
+
+ TracyDebug( "Ringbuffers in use: %i\n", s_numBuffers );
+
+ traceActive.store( true, std::memory_order_relaxed );
+ return true;
+}
+
+void SysTraceStop()
+{
+ traceActive.store( false, std::memory_order_relaxed );
+}
+
+static uint64_t* GetCallstackBlock( uint64_t cnt, RingBuffer& ring, uint64_t offset )
+{
+ auto trace = (uint64_t*)tracy_malloc_fast( ( 1 + cnt ) * sizeof( uint64_t ) );
+ ring.Read( trace+1, offset, sizeof( uint64_t ) * cnt );
+
+#if defined __x86_64__ || defined _M_X64
+ // remove non-canonical pointers
+ do
+ {
+ const auto test = (int64_t)trace[cnt];
+ const auto m1 = test >> 63;
+ const auto m2 = test >> 47;
+ if( m1 == m2 ) break;
+ }
+ while( --cnt > 0 );
+ for( uint64_t j=1; j<cnt; j++ )
+ {
+ const auto test = (int64_t)trace[j];
+ const auto m1 = test >> 63;
+ const auto m2 = test >> 47;
+ if( m1 != m2 ) trace[j] = 0;
+ }
+#endif
+
+ for( uint64_t j=1; j<=cnt; j++ )
+ {
+ if( trace[j] >= (uint64_t)-4095 ) // PERF_CONTEXT_MAX
+ {
+ memmove( trace+j, trace+j+1, sizeof( uint64_t ) * ( cnt - j ) );
+ cnt--;
+ }
+ }
+
+ memcpy( trace, &cnt, sizeof( uint64_t ) );
+ return trace;
+}
+
+void SysTraceWorker( void* ptr )
+{
+ ThreadExitHandler threadExitHandler;
+ SetThreadName( "Tracy Sampling" );
+ InitRpmalloc();
+ sched_param sp = { 5 };
+ pthread_setschedparam( pthread_self(), SCHED_FIFO, &sp );
+ for( int i=0; i<s_numBuffers; i++ ) s_ring[i].Enable();
+ for(;;)
+ {
+#ifdef TRACY_ON_DEMAND
+ if( !GetProfiler().IsConnected() )
+ {
+ if( !traceActive.load( std::memory_order_relaxed ) ) break;
+ for( int i=0; i<s_numBuffers; i++ )
+ {
+ auto& ring = s_ring[i];
+ const auto head = ring.LoadHead();
+ const auto tail = ring.GetTail();
+ if( head != tail )
+ {
+ const auto end = head - tail;
+ ring.Advance( end );
+ }
+ }
+ if( !traceActive.load( std::memory_order_relaxed ) ) break;
+ std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+ continue;
+ }
+#endif
+
+ bool hadData = false;
+ for( int i=0; i<s_ctxBufferIdx; i++ )
+ {
+ if( !traceActive.load( std::memory_order_relaxed ) ) break;
+ auto& ring = s_ring[i];
+ const auto head = ring.LoadHead();
+ const auto tail = ring.GetTail();
+ if( head == tail ) continue;
+ assert( head > tail );
+ hadData = true;
+
+ const auto end = head - tail;
+ uint64_t pos = 0;
+ while( pos < end )
+ {
+ perf_event_header hdr;
+ ring.Read( &hdr, pos, sizeof( perf_event_header ) );
+ if( hdr.type == PERF_RECORD_SAMPLE )
+ {
+ auto offset = pos + sizeof( perf_event_header );
+ const auto id = ring.GetId();
+ assert( id != EventContextSwitch );
+ if( id == EventCallstack )
+ {
+ // Layout:
+ // u32 pid, tid
+ // u64 time
+ // u64 cnt
+ // u64 ip[cnt]
+
+ uint32_t tid;
+ uint64_t t0;
+ uint64_t cnt;
+
+ offset += sizeof( uint32_t );
+ ring.Read( &tid, offset, sizeof( uint32_t ) );
+ offset += sizeof( uint32_t );
+ ring.Read( &t0, offset, sizeof( uint64_t ) );
+ offset += sizeof( uint64_t );
+ ring.Read( &cnt, offset, sizeof( uint64_t ) );
+ offset += sizeof( uint64_t );
+
+ if( cnt > 0 )
+ {
+#if defined TRACY_HW_TIMER && ( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 )
+ t0 = ring.ConvertTimeToTsc( t0 );
+#endif
+ auto trace = GetCallstackBlock( cnt, ring, offset );
+
+ TracyLfqPrepare( QueueType::CallstackSample );
+ MemWrite( &item->callstackSampleFat.time, t0 );
+ MemWrite( &item->callstackSampleFat.thread, tid );
+ MemWrite( &item->callstackSampleFat.ptr, (uint64_t)trace );
+ TracyLfqCommit;
+ }
+ }
+ else
+ {
+ // Layout:
+ // u64 ip
+ // u64 time
+
+ uint64_t ip, t0;
+ ring.Read( &ip, offset, sizeof( uint64_t ) );
+ offset += sizeof( uint64_t );
+ ring.Read( &t0, offset, sizeof( uint64_t ) );
+
+#if defined TRACY_HW_TIMER && ( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 )
+ t0 = ring.ConvertTimeToTsc( t0 );
+#endif
+ QueueType type;
+ switch( id )
+ {
+ case EventCpuCycles:
+ type = QueueType::HwSampleCpuCycle;
+ break;
+ case EventInstructionsRetired:
+ type = QueueType::HwSampleInstructionRetired;
+ break;
+ case EventCacheReference:
+ type = QueueType::HwSampleCacheReference;
+ break;
+ case EventCacheMiss:
+ type = QueueType::HwSampleCacheMiss;
+ break;
+ case EventBranchRetired:
+ type = QueueType::HwSampleBranchRetired;
+ break;
+ case EventBranchMiss:
+ type = QueueType::HwSampleBranchMiss;
+ break;
+ default:
+ assert( false );
+ break;
+ }
+
+ TracyLfqPrepare( type );
+ MemWrite( &item->hwSample.ip, ip );
+ MemWrite( &item->hwSample.time, t0 );
+ TracyLfqCommit;
+ }
+ }
+ pos += hdr.size;
+ }
+ assert( pos == end );
+ ring.Advance( end );
+ }
+ if( !traceActive.load( std::memory_order_relaxed ) ) break;
+
+ if( s_ctxBufferIdx != s_numBuffers )
+ {
+ const auto ctxBufNum = s_numBuffers - s_ctxBufferIdx;
+
+ int activeNum = 0;
+ bool active[512];
+ uint32_t end[512];
+ uint32_t pos[512];
+ for( int i=0; i<ctxBufNum; i++ )
+ {
+ const auto rbIdx = s_ctxBufferIdx + i;
+ const auto rbHead = s_ring[rbIdx].LoadHead();
+ const auto rbTail = s_ring[rbIdx].GetTail();
+ const auto rbActive = rbHead != rbTail;
+
+ active[i] = rbActive;
+ if( rbActive )
+ {
+ activeNum++;
+ end[i] = rbHead - rbTail;
+ pos[i] = 0;
+ }
+ else
+ {
+ end[i] = 0;
+ }
+ }
+ if( activeNum > 0 )
+ {
+ hadData = true;
+ while( activeNum > 0 )
+ {
+ int sel = -1;
+ int64_t t0 = std::numeric_limits<int64_t>::max();
+ for( int i=0; i<ctxBufNum; i++ )
+ {
+ if( !active[i] ) continue;
+ auto rbPos = pos[i];
+ assert( rbPos < end[i] );
+ const auto rbIdx = s_ctxBufferIdx + i;
+ perf_event_header hdr;
+ s_ring[rbIdx].Read( &hdr, rbPos, sizeof( perf_event_header ) );
+ if( hdr.type == PERF_RECORD_SAMPLE )
+ {
+ int64_t rbTime;
+ s_ring[rbIdx].Read( &rbTime, rbPos + sizeof( perf_event_header ), sizeof( int64_t ) );
+ if( rbTime < t0 )
+ {
+ t0 = rbTime;
+ sel = i;
+ }
+ }
+ else
+ {
+ rbPos += hdr.size;
+ if( rbPos == end[i] )
+ {
+ active[i] = false;
+ activeNum--;
+ }
+ else
+ {
+ pos[i] = rbPos;
+ }
+ }
+ }
+ assert( sel >= 0 || activeNum == 0 );
+ if( sel >= 0 )
+ {
+ auto& ring = s_ring[s_ctxBufferIdx + sel];
+ auto rbPos = pos[sel];
+ auto offset = rbPos;
+ perf_event_header hdr;
+ ring.Read( &hdr, offset, sizeof( perf_event_header ) );
+
+#if defined TRACY_HW_TIMER && ( defined __i386 || defined _M_IX86 || defined __x86_64__ || defined _M_X64 )
+ t0 = ring.ConvertTimeToTsc( t0 );
+#endif
+
+ if( ring.GetId() == EventContextSwitch )
+ {
+ // Layout:
+ // u64 time
+ // u64 cnt
+ // u64 ip[cnt]
+ // u32 size
+ // u8 data[size]
+ // Data (not ABI stable, but has not changed since it was added, in 2009):
+ // u8 hdr[8]
+ // u8 prev_comm[16]
+ // u32 prev_pid
+ // u32 prev_prio
+ // lng prev_state
+ // u8 next_comm[16]
+ // u32 next_pid
+ // u32 next_prio
+
+ offset += sizeof( perf_event_header ) + sizeof( uint64_t );
+
+ uint64_t cnt;
+ ring.Read( &cnt, offset, sizeof( uint64_t ) );
+ offset += sizeof( uint64_t );
+ const auto traceOffset = offset;
+ offset += sizeof( uint64_t ) * cnt + sizeof( uint32_t ) + 8 + 16;
+
+ uint32_t prev_pid, next_pid;
+ long prev_state;
+
+ ring.Read( &prev_pid, offset, sizeof( uint32_t ) );
+ offset += sizeof( uint32_t ) + sizeof( uint32_t );
+ ring.Read( &prev_state, offset, sizeof( long ) );
+ offset += sizeof( long ) + 16;
+ ring.Read( &next_pid, offset, sizeof( uint32_t ) );
+
+ uint8_t reason = 100;
+ uint8_t state;
+
+ if( prev_state & 0x0001 ) state = 104;
+ else if( prev_state & 0x0002 ) state = 101;
+ else if( prev_state & 0x0004 ) state = 105;
+ else if( prev_state & 0x0008 ) state = 106;
+ else if( prev_state & 0x0010 ) state = 108;
+ else if( prev_state & 0x0020 ) state = 109;
+ else if( prev_state & 0x0040 ) state = 110;
+ else if( prev_state & 0x0080 ) state = 102;
+ else state = 103;
+
+ TracyLfqPrepare( QueueType::ContextSwitch );
+ MemWrite( &item->contextSwitch.time, t0 );
+ MemWrite( &item->contextSwitch.oldThread, prev_pid );
+ MemWrite( &item->contextSwitch.newThread, next_pid );
+ MemWrite( &item->contextSwitch.cpu, uint8_t( ring.GetCpu() ) );
+ MemWrite( &item->contextSwitch.reason, reason );
+ MemWrite( &item->contextSwitch.state, state );
+ TracyLfqCommit;
+
+ if( cnt > 0 && prev_pid != 0 && CurrentProcOwnsThread( prev_pid ) )
+ {
+ auto trace = GetCallstackBlock( cnt, ring, traceOffset );
+
+ TracyLfqPrepare( QueueType::CallstackSampleContextSwitch );
+ MemWrite( &item->callstackSampleFat.time, t0 );
+ MemWrite( &item->callstackSampleFat.thread, prev_pid );
+ MemWrite( &item->callstackSampleFat.ptr, (uint64_t)trace );
+ TracyLfqCommit;
+ }
+ }
+ else
+ {
+ assert( ring.GetId() == EventWakeup );
+
+ // Layout:
+ // u64 time
+ // u32 size
+ // u8 data[size]
+ // Data:
+ // u8 hdr[8]
+ // u8 comm[16]
+ // u32 pid
+ // u32 prio
+ // u64 target_cpu
+
+ offset += sizeof( perf_event_header ) + sizeof( uint64_t ) + sizeof( uint32_t ) + 8 + 16;
+
+ uint32_t pid;
+ ring.Read( &pid, offset, sizeof( uint32_t ) );
+
+ TracyLfqPrepare( QueueType::ThreadWakeup );
+ MemWrite( &item->threadWakeup.time, t0 );
+ MemWrite( &item->threadWakeup.thread, pid );
+ TracyLfqCommit;
+ }
+
+ rbPos += hdr.size;
+ if( rbPos == end[sel] )
+ {
+ active[sel] = false;
+ activeNum--;
+ }
+ else
+ {
+ pos[sel] = rbPos;
+ }
+ }
+ }
+ for( int i=0; i<ctxBufNum; i++ )
+ {
+ if( end[i] != 0 ) s_ring[s_ctxBufferIdx + i].Advance( end[i] );
+ }
+ }
+ }
+ if( !traceActive.load( std::memory_order_relaxed ) ) break;
+ if( !hadData )
+ {
+ std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
+ }
+ }
+
+ for( int i=0; i<s_numBuffers; i++ ) s_ring[i].~RingBuffer();
+ tracy_free_fast( s_ring );
+}
+
+void SysTraceGetExternalName( uint64_t thread, const char*& threadName, const char*& name )
+{
+ FILE* f;
+ char fn[256];
+ sprintf( fn, "/proc/%" PRIu64 "/comm", thread );
+ f = fopen( fn, "rb" );
+ if( f )
+ {
+ char buf[256];
+ const auto sz = fread( buf, 1, 256, f );
+ if( sz > 0 && buf[sz-1] == '\n' ) buf[sz-1] = '\0';
+ threadName = CopyString( buf );
+ fclose( f );
+ }
+ else
+ {
+ threadName = CopyString( "???", 3 );
+ }
+
+ sprintf( fn, "/proc/%" PRIu64 "/status", thread );
+ f = fopen( fn, "rb" );
+ if( f )
+ {
+ char* tmp = (char*)tracy_malloc_fast( 8*1024 );
+ const auto fsz = (ptrdiff_t)fread( tmp, 1, 8*1024, f );
+ fclose( f );
+
+ int pid = -1;
+ auto line = tmp;
+ for(;;)
+ {
+ if( memcmp( "Tgid:\t", line, 6 ) == 0 )
+ {
+ pid = atoi( line + 6 );
+ break;
+ }
+ while( line - tmp < fsz && *line != '\n' ) line++;
+ if( *line != '\n' ) break;
+ line++;
+ }
+ tracy_free_fast( tmp );
+
+ if( pid >= 0 )
+ {
+ {
+ uint64_t _pid = pid;
+ TracyLfqPrepare( QueueType::TidToPid );
+ MemWrite( &item->tidToPid.tid, thread );
+ MemWrite( &item->tidToPid.pid, _pid );
+ TracyLfqCommit;
+ }
+ sprintf( fn, "/proc/%i/comm", pid );
+ f = fopen( fn, "rb" );
+ if( f )
+ {
+ char buf[256];
+ const auto sz = fread( buf, 1, 256, f );
+ if( sz > 0 && buf[sz-1] == '\n' ) buf[sz-1] = '\0';
+ name = CopyStringFast( buf );
+ fclose( f );
+ return;
+ }
+ }
+ }
+ name = CopyStringFast( "???", 3 );
+}
+
+}
+
+# endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracySysTrace.hpp b/3rdparty/tracy/tracy/client/TracySysTrace.hpp
new file mode 100644
index 0000000..8c663cd
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracySysTrace.hpp
@@ -0,0 +1,28 @@
+#ifndef __TRACYSYSTRACE_HPP__
+#define __TRACYSYSTRACE_HPP__
+
+#if !defined TRACY_NO_SYSTEM_TRACING && ( defined _WIN32 || defined __linux__ )
+# include "../common/TracyUwp.hpp"
+# ifndef TRACY_UWP
+# define TRACY_HAS_SYSTEM_TRACING
+# endif
+#endif
+
+#ifdef TRACY_HAS_SYSTEM_TRACING
+
+#include <stdint.h>
+
+namespace tracy
+{
+
+bool SysTraceStart( int64_t& samplingPeriod );
+void SysTraceStop();
+void SysTraceWorker( void* ptr );
+
+void SysTraceGetExternalName( uint64_t thread, const char*& threadName, const char*& name );
+
+}
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/TracyThread.hpp b/3rdparty/tracy/tracy/client/TracyThread.hpp
new file mode 100644
index 0000000..5b5ad20
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/TracyThread.hpp
@@ -0,0 +1,85 @@
+#ifndef __TRACYTHREAD_HPP__
+#define __TRACYTHREAD_HPP__
+
+#if defined _WIN32
+# include <windows.h>
+#else
+# include <pthread.h>
+#endif
+
+#ifdef TRACY_MANUAL_LIFETIME
+# include "tracy_rpmalloc.hpp"
+#endif
+
+namespace tracy
+{
+
+class ThreadExitHandler
+{
+public:
+ ~ThreadExitHandler()
+ {
+#ifdef TRACY_MANUAL_LIFETIME
+ rpmalloc_thread_finalize();
+#endif
+ }
+};
+
+#if defined _WIN32
+
+class Thread
+{
+public:
+ Thread( void(*func)( void* ptr ), void* ptr )
+ : m_func( func )
+ , m_ptr( ptr )
+ , m_hnd( CreateThread( nullptr, 0, Launch, this, 0, nullptr ) )
+ {}
+
+ ~Thread()
+ {
+ WaitForSingleObject( m_hnd, INFINITE );
+ CloseHandle( m_hnd );
+ }
+
+ HANDLE Handle() const { return m_hnd; }
+
+private:
+ static DWORD WINAPI Launch( void* ptr ) { ((Thread*)ptr)->m_func( ((Thread*)ptr)->m_ptr ); return 0; }
+
+ void(*m_func)( void* ptr );
+ void* m_ptr;
+ HANDLE m_hnd;
+};
+
+#else
+
+class Thread
+{
+public:
+ Thread( void(*func)( void* ptr ), void* ptr )
+ : m_func( func )
+ , m_ptr( ptr )
+ {
+ pthread_create( &m_thread, nullptr, Launch, this );
+ }
+
+ ~Thread()
+ {
+ pthread_join( m_thread, nullptr );
+ }
+
+ pthread_t Handle() const { return m_thread; }
+
+private:
+ static void* Launch( void* ptr ) { ((Thread*)ptr)->m_func( ((Thread*)ptr)->m_ptr ); return nullptr; }
+ void(*m_func)( void* ptr );
+ void* m_ptr;
+ pthread_t m_thread;
+};
+
+#endif
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/tracy_SPSCQueue.h b/3rdparty/tracy/tracy/client/tracy_SPSCQueue.h
new file mode 100644
index 0000000..d683ed1
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/tracy_SPSCQueue.h
@@ -0,0 +1,148 @@
+/*
+Copyright (c) 2020 Erik Rigtorp <[email protected]>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+ */
+
+#pragma once
+
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <stdexcept>
+#include <type_traits> // std::enable_if, std::is_*_constructible
+
+#include "../common/TracyAlloc.hpp"
+
+#if defined (_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable:4324)
+#endif
+
+namespace tracy {
+
+template <typename T> class SPSCQueue {
+public:
+ explicit SPSCQueue(const size_t capacity)
+ : capacity_(capacity) {
+ capacity_++; // Needs one slack element
+ slots_ = (T*)tracy_malloc(sizeof(T) * (capacity_ + 2 * kPadding));
+
+ static_assert(alignof(SPSCQueue<T>) == kCacheLineSize, "");
+ static_assert(sizeof(SPSCQueue<T>) >= 3 * kCacheLineSize, "");
+ assert(reinterpret_cast<char *>(&readIdx_) -
+ reinterpret_cast<char *>(&writeIdx_) >=
+ static_cast<std::ptrdiff_t>(kCacheLineSize));
+ }
+
+ ~SPSCQueue() {
+ while (front()) {
+ pop();
+ }
+ tracy_free(slots_);
+ }
+
+ // non-copyable and non-movable
+ SPSCQueue(const SPSCQueue &) = delete;
+ SPSCQueue &operator=(const SPSCQueue &) = delete;
+
+ template <typename... Args>
+ void emplace(Args &&...args) noexcept(
+ std::is_nothrow_constructible<T, Args &&...>::value) {
+ static_assert(std::is_constructible<T, Args &&...>::value,
+ "T must be constructible with Args&&...");
+ auto const writeIdx = writeIdx_.load(std::memory_order_relaxed);
+ auto nextWriteIdx = writeIdx + 1;
+ if (nextWriteIdx == capacity_) {
+ nextWriteIdx = 0;
+ }
+ while (nextWriteIdx == readIdxCache_) {
+ readIdxCache_ = readIdx_.load(std::memory_order_acquire);
+ }
+ new (&slots_[writeIdx + kPadding]) T(std::forward<Args>(args)...);
+ writeIdx_.store(nextWriteIdx, std::memory_order_release);
+ }
+
+ [[nodiscard]] T *front() noexcept {
+ auto const readIdx = readIdx_.load(std::memory_order_relaxed);
+ if (readIdx == writeIdxCache_) {
+ writeIdxCache_ = writeIdx_.load(std::memory_order_acquire);
+ if (writeIdxCache_ == readIdx) {
+ return nullptr;
+ }
+ }
+ return &slots_[readIdx + kPadding];
+ }
+
+ void pop() noexcept {
+ static_assert(std::is_nothrow_destructible<T>::value,
+ "T must be nothrow destructible");
+ auto const readIdx = readIdx_.load(std::memory_order_relaxed);
+ assert(writeIdx_.load(std::memory_order_acquire) != readIdx);
+ slots_[readIdx + kPadding].~T();
+ auto nextReadIdx = readIdx + 1;
+ if (nextReadIdx == capacity_) {
+ nextReadIdx = 0;
+ }
+ readIdx_.store(nextReadIdx, std::memory_order_release);
+ }
+
+ [[nodiscard]] size_t size() const noexcept {
+ std::ptrdiff_t diff = writeIdx_.load(std::memory_order_acquire) -
+ readIdx_.load(std::memory_order_acquire);
+ if (diff < 0) {
+ diff += capacity_;
+ }
+ return static_cast<size_t>(diff);
+ }
+
+ [[nodiscard]] bool empty() const noexcept {
+ return writeIdx_.load(std::memory_order_acquire) ==
+ readIdx_.load(std::memory_order_acquire);
+ }
+
+ [[nodiscard]] size_t capacity() const noexcept { return capacity_ - 1; }
+
+private:
+ static constexpr size_t kCacheLineSize = 64;
+
+ // Padding to avoid false sharing between slots_ and adjacent allocations
+ static constexpr size_t kPadding = (kCacheLineSize - 1) / sizeof(T) + 1;
+
+private:
+ size_t capacity_;
+ T *slots_;
+
+ // Align to cache line size in order to avoid false sharing
+ // readIdxCache_ and writeIdxCache_ is used to reduce the amount of cache
+ // coherency traffic
+ alignas(kCacheLineSize) std::atomic<size_t> writeIdx_ = {0};
+ alignas(kCacheLineSize) size_t readIdxCache_ = 0;
+ alignas(kCacheLineSize) std::atomic<size_t> readIdx_ = {0};
+ alignas(kCacheLineSize) size_t writeIdxCache_ = 0;
+
+ // Padding to avoid adjacent allocations to share cache line with
+ // writeIdxCache_
+ char padding_[kCacheLineSize - sizeof(SPSCQueue<T>::writeIdxCache_)];
+};
+} // namespace rigtorp
+
+#if defined (_MSC_VER)
+#pragma warning(pop)
+#endif
diff --git a/3rdparty/tracy/tracy/client/tracy_concurrentqueue.h b/3rdparty/tracy/tracy/client/tracy_concurrentqueue.h
new file mode 100644
index 0000000..a1f4173
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/tracy_concurrentqueue.h
@@ -0,0 +1,1446 @@
+// Provides a C++11 implementation of a multi-producer, multi-consumer lock-free queue.
+// An overview, including benchmark results, is provided here:
+// http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++
+// The full design is also described in excruciating detail at:
+// http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue
+
+// Simplified BSD license:
+// Copyright (c) 2013-2016, Cameron Desrochers.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// - Redistributions of source code must retain the above copyright notice, this list of
+// conditions and the following disclaimer.
+// - Redistributions in binary form must reproduce the above copyright notice, this list of
+// conditions and the following disclaimer in the documentation and/or other materials
+// provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#pragma once
+
+#include "../common/TracyAlloc.hpp"
+#include "../common/TracyForceInline.hpp"
+#include "../common/TracySystem.hpp"
+
+#if defined(__GNUC__)
+// Disable -Wconversion warnings (spuriously triggered when Traits::size_t and
+// Traits::index_t are set to < 32 bits, causing integer promotion, causing warnings
+// upon assigning any computed values)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion"
+#endif
+
+#if defined(__APPLE__)
+#include "TargetConditionals.h"
+#endif
+
+#include <atomic> // Requires C++11. Sorry VS2010.
+#include <cassert>
+#include <cstddef> // for max_align_t
+#include <cstdint>
+#include <cstdlib>
+#include <type_traits>
+#include <algorithm>
+#include <utility>
+#include <limits>
+#include <climits> // for CHAR_BIT
+#include <array>
+#include <thread> // partly for __WINPTHREADS_VERSION if on MinGW-w64 w/ POSIX threading
+
+namespace tracy
+{
+
+// Compiler-specific likely/unlikely hints
+namespace moodycamel { namespace details {
+#if defined(__GNUC__)
+ inline bool cqLikely(bool x) { return __builtin_expect((x), true); }
+ inline bool cqUnlikely(bool x) { return __builtin_expect((x), false); }
+#else
+ inline bool cqLikely(bool x) { return x; }
+ inline bool cqUnlikely(bool x) { return x; }
+#endif
+} }
+
+namespace
+{
+ // to avoid MSVC warning 4127: conditional expression is constant
+ template <bool>
+ struct compile_time_condition
+ {
+ static const bool value = false;
+ };
+ template <>
+ struct compile_time_condition<true>
+ {
+ static const bool value = true;
+ };
+}
+
+namespace moodycamel {
+namespace details {
+ template<typename T>
+ struct const_numeric_max {
+ static_assert(std::is_integral<T>::value, "const_numeric_max can only be used with integers");
+ static const T value = std::numeric_limits<T>::is_signed
+ ? (static_cast<T>(1) << (sizeof(T) * CHAR_BIT - 1)) - static_cast<T>(1)
+ : static_cast<T>(-1);
+ };
+
+#if defined(__GLIBCXX__)
+ typedef ::max_align_t std_max_align_t; // libstdc++ forgot to add it to std:: for a while
+#else
+ typedef std::max_align_t std_max_align_t; // Others (e.g. MSVC) insist it can *only* be accessed via std::
+#endif
+
+ // Some platforms have incorrectly set max_align_t to a type with <8 bytes alignment even while supporting
+ // 8-byte aligned scalar values (*cough* 32-bit iOS). Work around this with our own union. See issue #64.
+ typedef union {
+ std_max_align_t x;
+ long long y;
+ void* z;
+ } max_align_t;
+}
+
+// Default traits for the ConcurrentQueue. To change some of the
+// traits without re-implementing all of them, inherit from this
+// struct and shadow the declarations you wish to be different;
+// since the traits are used as a template type parameter, the
+// shadowed declarations will be used where defined, and the defaults
+// otherwise.
+struct ConcurrentQueueDefaultTraits
+{
+ // General-purpose size type. std::size_t is strongly recommended.
+ typedef std::size_t size_t;
+
+ // The type used for the enqueue and dequeue indices. Must be at least as
+ // large as size_t. Should be significantly larger than the number of elements
+ // you expect to hold at once, especially if you have a high turnover rate;
+ // for example, on 32-bit x86, if you expect to have over a hundred million
+ // elements or pump several million elements through your queue in a very
+ // short space of time, using a 32-bit type *may* trigger a race condition.
+ // A 64-bit int type is recommended in that case, and in practice will
+ // prevent a race condition no matter the usage of the queue. Note that
+ // whether the queue is lock-free with a 64-int type depends on the whether
+ // std::atomic<std::uint64_t> is lock-free, which is platform-specific.
+ typedef std::size_t index_t;
+
+ // Internally, all elements are enqueued and dequeued from multi-element
+ // blocks; this is the smallest controllable unit. If you expect few elements
+ // but many producers, a smaller block size should be favoured. For few producers
+ // and/or many elements, a larger block size is preferred. A sane default
+ // is provided. Must be a power of 2.
+ static const size_t BLOCK_SIZE = 64*1024;
+
+ // For explicit producers (i.e. when using a producer token), the block is
+ // checked for being empty by iterating through a list of flags, one per element.
+ // For large block sizes, this is too inefficient, and switching to an atomic
+ // counter-based approach is faster. The switch is made for block sizes strictly
+ // larger than this threshold.
+ static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = 32;
+
+ // How many full blocks can be expected for a single explicit producer? This should
+ // reflect that number's maximum for optimal performance. Must be a power of 2.
+ static const size_t EXPLICIT_INITIAL_INDEX_SIZE = 32;
+
+ // Controls the number of items that an explicit consumer (i.e. one with a token)
+ // must consume before it causes all consumers to rotate and move on to the next
+ // internal queue.
+ static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 256;
+
+ // The maximum number of elements (inclusive) that can be enqueued to a sub-queue.
+ // Enqueue operations that would cause this limit to be surpassed will fail. Note
+ // that this limit is enforced at the block level (for performance reasons), i.e.
+ // it's rounded up to the nearest block size.
+ static const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max<size_t>::value;
+
+
+ // Memory allocation can be customized if needed.
+ // malloc should return nullptr on failure, and handle alignment like std::malloc.
+#if defined(malloc) || defined(free)
+ // Gah, this is 2015, stop defining macros that break standard code already!
+ // Work around malloc/free being special macros:
+ static inline void* WORKAROUND_malloc(size_t size) { return malloc(size); }
+ static inline void WORKAROUND_free(void* ptr) { return free(ptr); }
+ static inline void* (malloc)(size_t size) { return WORKAROUND_malloc(size); }
+ static inline void (free)(void* ptr) { return WORKAROUND_free(ptr); }
+#else
+ static inline void* malloc(size_t size) { return tracy::tracy_malloc(size); }
+ static inline void free(void* ptr) { return tracy::tracy_free(ptr); }
+#endif
+};
+
+
+// When producing or consuming many elements, the most efficient way is to:
+// 1) Use one of the bulk-operation methods of the queue with a token
+// 2) Failing that, use the bulk-operation methods without a token
+// 3) Failing that, create a token and use that with the single-item methods
+// 4) Failing that, use the single-parameter methods of the queue
+// Having said that, don't create tokens willy-nilly -- ideally there should be
+// a maximum of one token per thread (of each kind).
+struct ProducerToken;
+struct ConsumerToken;
+
+template<typename T, typename Traits> class ConcurrentQueue;
+
+
+namespace details
+{
+ struct ConcurrentQueueProducerTypelessBase
+ {
+ ConcurrentQueueProducerTypelessBase* next;
+ std::atomic<bool> inactive;
+ ProducerToken* token;
+ uint32_t threadId;
+
+ ConcurrentQueueProducerTypelessBase()
+ : next(nullptr), inactive(false), token(nullptr), threadId(0)
+ {
+ }
+ };
+
+ template<typename T>
+ static inline bool circular_less_than(T a, T b)
+ {
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4554)
+#endif
+ static_assert(std::is_integral<T>::value && !std::numeric_limits<T>::is_signed, "circular_less_than is intended to be used only with unsigned integer types");
+ return static_cast<T>(a - b) > static_cast<T>(static_cast<T>(1) << static_cast<T>(sizeof(T) * CHAR_BIT - 1));
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+ }
+
+ template<typename U>
+ static inline char* align_for(char* ptr)
+ {
+ const std::size_t alignment = std::alignment_of<U>::value;
+ return ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;
+ }
+
+ template<typename T>
+ static inline T ceil_to_pow_2(T x)
+ {
+ static_assert(std::is_integral<T>::value && !std::numeric_limits<T>::is_signed, "ceil_to_pow_2 is intended to be used only with unsigned integer types");
+
+ // Adapted from http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+ --x;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ for (std::size_t i = 1; i < sizeof(T); i <<= 1) {
+ x |= x >> (i << 3);
+ }
+ ++x;
+ return x;
+ }
+
+ template<typename T>
+ static inline void swap_relaxed(std::atomic<T>& left, std::atomic<T>& right)
+ {
+ T temp = std::move(left.load(std::memory_order_relaxed));
+ left.store(std::move(right.load(std::memory_order_relaxed)), std::memory_order_relaxed);
+ right.store(std::move(temp), std::memory_order_relaxed);
+ }
+
+ template<typename T>
+ static inline T const& nomove(T const& x)
+ {
+ return x;
+ }
+
+ template<bool Enable>
+ struct nomove_if
+ {
+ template<typename T>
+ static inline T const& eval(T const& x)
+ {
+ return x;
+ }
+ };
+
+ template<>
+ struct nomove_if<false>
+ {
+ template<typename U>
+ static inline auto eval(U&& x)
+ -> decltype(std::forward<U>(x))
+ {
+ return std::forward<U>(x);
+ }
+ };
+
+ template<typename It>
+ static inline auto deref_noexcept(It& it) noexcept -> decltype(*it)
+ {
+ return *it;
+ }
+
+#if defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ template<typename T> struct is_trivially_destructible : std::is_trivially_destructible<T> { };
+#else
+ template<typename T> struct is_trivially_destructible : std::has_trivial_destructor<T> { };
+#endif
+
+ template<typename T> struct static_is_lock_free_num { enum { value = 0 }; };
+ template<> struct static_is_lock_free_num<signed char> { enum { value = ATOMIC_CHAR_LOCK_FREE }; };
+ template<> struct static_is_lock_free_num<short> { enum { value = ATOMIC_SHORT_LOCK_FREE }; };
+ template<> struct static_is_lock_free_num<int> { enum { value = ATOMIC_INT_LOCK_FREE }; };
+ template<> struct static_is_lock_free_num<long> { enum { value = ATOMIC_LONG_LOCK_FREE }; };
+ template<> struct static_is_lock_free_num<long long> { enum { value = ATOMIC_LLONG_LOCK_FREE }; };
+ template<typename T> struct static_is_lock_free : static_is_lock_free_num<typename std::make_signed<T>::type> { };
+ template<> struct static_is_lock_free<bool> { enum { value = ATOMIC_BOOL_LOCK_FREE }; };
+ template<typename U> struct static_is_lock_free<U*> { enum { value = ATOMIC_POINTER_LOCK_FREE }; };
+}
+
+
+struct ProducerToken
+{
+ template<typename T, typename Traits>
+ explicit ProducerToken(ConcurrentQueue<T, Traits>& queue);
+
+ ProducerToken(ProducerToken&& other) noexcept
+ : producer(other.producer)
+ {
+ other.producer = nullptr;
+ if (producer != nullptr) {
+ producer->token = this;
+ }
+ }
+
+ inline ProducerToken& operator=(ProducerToken&& other) noexcept
+ {
+ swap(other);
+ return *this;
+ }
+
+ void swap(ProducerToken& other) noexcept
+ {
+ std::swap(producer, other.producer);
+ if (producer != nullptr) {
+ producer->token = this;
+ }
+ if (other.producer != nullptr) {
+ other.producer->token = &other;
+ }
+ }
+
+ // A token is always valid unless:
+ // 1) Memory allocation failed during construction
+ // 2) It was moved via the move constructor
+ // (Note: assignment does a swap, leaving both potentially valid)
+ // 3) The associated queue was destroyed
+ // Note that if valid() returns true, that only indicates
+ // that the token is valid for use with a specific queue,
+ // but not which one; that's up to the user to track.
+ inline bool valid() const { return producer != nullptr; }
+
+ ~ProducerToken()
+ {
+ if (producer != nullptr) {
+ producer->token = nullptr;
+ producer->inactive.store(true, std::memory_order_release);
+ }
+ }
+
+ // Disable copying and assignment
+ ProducerToken(ProducerToken const&) = delete;
+ ProducerToken& operator=(ProducerToken const&) = delete;
+
+private:
+ template<typename T, typename Traits> friend class ConcurrentQueue;
+
+protected:
+ details::ConcurrentQueueProducerTypelessBase* producer;
+};
+
+
+struct ConsumerToken
+{
+ template<typename T, typename Traits>
+ explicit ConsumerToken(ConcurrentQueue<T, Traits>& q);
+
+ ConsumerToken(ConsumerToken&& other) noexcept
+ : initialOffset(other.initialOffset), lastKnownGlobalOffset(other.lastKnownGlobalOffset), itemsConsumedFromCurrent(other.itemsConsumedFromCurrent), currentProducer(other.currentProducer), desiredProducer(other.desiredProducer)
+ {
+ }
+
+ inline ConsumerToken& operator=(ConsumerToken&& other) noexcept
+ {
+ swap(other);
+ return *this;
+ }
+
+ void swap(ConsumerToken& other) noexcept
+ {
+ std::swap(initialOffset, other.initialOffset);
+ std::swap(lastKnownGlobalOffset, other.lastKnownGlobalOffset);
+ std::swap(itemsConsumedFromCurrent, other.itemsConsumedFromCurrent);
+ std::swap(currentProducer, other.currentProducer);
+ std::swap(desiredProducer, other.desiredProducer);
+ }
+
+ // Disable copying and assignment
+ ConsumerToken(ConsumerToken const&) = delete;
+ ConsumerToken& operator=(ConsumerToken const&) = delete;
+
+private:
+ template<typename T, typename Traits> friend class ConcurrentQueue;
+
+private: // but shared with ConcurrentQueue
+ std::uint32_t initialOffset;
+ std::uint32_t lastKnownGlobalOffset;
+ std::uint32_t itemsConsumedFromCurrent;
+ details::ConcurrentQueueProducerTypelessBase* currentProducer;
+ details::ConcurrentQueueProducerTypelessBase* desiredProducer;
+};
+
+
+template<typename T, typename Traits = ConcurrentQueueDefaultTraits>
+class ConcurrentQueue
+{
+public:
+ struct ExplicitProducer;
+
+ typedef moodycamel::ProducerToken producer_token_t;
+ typedef moodycamel::ConsumerToken consumer_token_t;
+
+ typedef typename Traits::index_t index_t;
+ typedef typename Traits::size_t size_t;
+
+ static const size_t BLOCK_SIZE = static_cast<size_t>(Traits::BLOCK_SIZE);
+ static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = static_cast<size_t>(Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD);
+ static const size_t EXPLICIT_INITIAL_INDEX_SIZE = static_cast<size_t>(Traits::EXPLICIT_INITIAL_INDEX_SIZE);
+ static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = static_cast<std::uint32_t>(Traits::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE);
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4307) // + integral constant overflow (that's what the ternary expression is for!)
+#pragma warning(disable: 4309) // static_cast: Truncation of constant value
+#endif
+ static const size_t MAX_SUBQUEUE_SIZE = (details::const_numeric_max<size_t>::value - static_cast<size_t>(Traits::MAX_SUBQUEUE_SIZE) < BLOCK_SIZE) ? details::const_numeric_max<size_t>::value : ((static_cast<size_t>(Traits::MAX_SUBQUEUE_SIZE) + (BLOCK_SIZE - 1)) / BLOCK_SIZE * BLOCK_SIZE);
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+ static_assert(!std::numeric_limits<size_t>::is_signed && std::is_integral<size_t>::value, "Traits::size_t must be an unsigned integral type");
+ static_assert(!std::numeric_limits<index_t>::is_signed && std::is_integral<index_t>::value, "Traits::index_t must be an unsigned integral type");
+ static_assert(sizeof(index_t) >= sizeof(size_t), "Traits::index_t must be at least as wide as Traits::size_t");
+ static_assert((BLOCK_SIZE > 1) && !(BLOCK_SIZE & (BLOCK_SIZE - 1)), "Traits::BLOCK_SIZE must be a power of 2 (and at least 2)");
+ static_assert((EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD > 1) && !(EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD & (EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD - 1)), "Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD must be a power of 2 (and greater than 1)");
+ static_assert((EXPLICIT_INITIAL_INDEX_SIZE > 1) && !(EXPLICIT_INITIAL_INDEX_SIZE & (EXPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::EXPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)");
+
+public:
+ // Creates a queue with at least `capacity` element slots; note that the
+ // actual number of elements that can be inserted without additional memory
+ // allocation depends on the number of producers and the block size (e.g. if
+ // the block size is equal to `capacity`, only a single block will be allocated
+ // up-front, which means only a single producer will be able to enqueue elements
+ // without an extra allocation -- blocks aren't shared between producers).
+ // This method is not thread safe -- it is up to the user to ensure that the
+ // queue is fully constructed before it starts being used by other threads (this
+ // includes making the memory effects of construction visible, possibly with a
+ // memory barrier).
+ explicit ConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE)
+ : producerListTail(nullptr),
+ producerCount(0),
+ initialBlockPoolIndex(0),
+ nextExplicitConsumerId(0),
+ globalExplicitConsumerOffset(0)
+ {
+ populate_initial_block_list(capacity / BLOCK_SIZE + ((capacity & (BLOCK_SIZE - 1)) == 0 ? 0 : 1));
+ }
+
+ // Computes the correct amount of pre-allocated blocks for you based
+ // on the minimum number of elements you want available at any given
+ // time, and the maximum concurrent number of each type of producer.
+ ConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers)
+ : producerListTail(nullptr),
+ producerCount(0),
+ initialBlockPoolIndex(0),
+ nextExplicitConsumerId(0),
+ globalExplicitConsumerOffset(0)
+ {
+ size_t blocks = (((minCapacity + BLOCK_SIZE - 1) / BLOCK_SIZE) - 1) * (maxExplicitProducers + 1) + 2 * (maxExplicitProducers);
+ populate_initial_block_list(blocks);
+ }
+
+ // Note: The queue should not be accessed concurrently while it's
+ // being deleted. It's up to the user to synchronize this.
+ // This method is not thread safe.
+ ~ConcurrentQueue()
+ {
+ // Destroy producers
+ auto ptr = producerListTail.load(std::memory_order_relaxed);
+ while (ptr != nullptr) {
+ auto next = ptr->next_prod();
+ if (ptr->token != nullptr) {
+ ptr->token->producer = nullptr;
+ }
+ destroy(ptr);
+ ptr = next;
+ }
+
+ // Destroy global free list
+ auto block = freeList.head_unsafe();
+ while (block != nullptr) {
+ auto next = block->freeListNext.load(std::memory_order_relaxed);
+ if (block->dynamicallyAllocated) {
+ destroy(block);
+ }
+ block = next;
+ }
+
+ // Destroy initial free list
+ destroy_array(initialBlockPool, initialBlockPoolSize);
+ }
+
+ // Disable copying and copy assignment
+ ConcurrentQueue(ConcurrentQueue const&) = delete;
+ ConcurrentQueue(ConcurrentQueue&& other) = delete;
+ ConcurrentQueue& operator=(ConcurrentQueue const&) = delete;
+ ConcurrentQueue& operator=(ConcurrentQueue&& other) = delete;
+
+public:
+ tracy_force_inline T* enqueue_begin(producer_token_t const& token, index_t& currentTailIndex)
+ {
+ return static_cast<ExplicitProducer*>(token.producer)->ConcurrentQueue::ExplicitProducer::enqueue_begin(currentTailIndex);
+ }
+
+ template<class NotifyThread, class ProcessData>
+ size_t try_dequeue_bulk_single(consumer_token_t& token, NotifyThread notifyThread, ProcessData processData )
+ {
+ if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) {
+ if (!update_current_producer_after_rotation(token)) {
+ return 0;
+ }
+ }
+
+ size_t count = static_cast<ProducerBase*>(token.currentProducer)->dequeue_bulk(notifyThread, processData);
+ token.itemsConsumedFromCurrent += static_cast<std::uint32_t>(count);
+
+ auto tail = producerListTail.load(std::memory_order_acquire);
+ auto ptr = static_cast<ProducerBase*>(token.currentProducer)->next_prod();
+ if (ptr == nullptr) {
+ ptr = tail;
+ }
+ if( count == 0 )
+ {
+ while (ptr != static_cast<ProducerBase*>(token.currentProducer)) {
+ auto dequeued = ptr->dequeue_bulk(notifyThread, processData);
+ if (dequeued != 0) {
+ token.currentProducer = ptr;
+ token.itemsConsumedFromCurrent = static_cast<std::uint32_t>(dequeued);
+ return dequeued;
+ }
+ ptr = ptr->next_prod();
+ if (ptr == nullptr) {
+ ptr = tail;
+ }
+ }
+ return 0;
+ }
+ else
+ {
+ token.currentProducer = ptr;
+ token.itemsConsumedFromCurrent = 0;
+ return count;
+ }
+ }
+
+
+ // Returns an estimate of the total number of elements currently in the queue. This
+ // estimate is only accurate if the queue has completely stabilized before it is called
+ // (i.e. all enqueue and dequeue operations have completed and their memory effects are
+ // visible on the calling thread, and no further operations start while this method is
+ // being called).
+ // Thread-safe.
+ size_t size_approx() const
+ {
+ size_t size = 0;
+ for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {
+ size += ptr->size_approx();
+ }
+ return size;
+ }
+
+
+ // Returns true if the underlying atomic variables used by
+ // the queue are lock-free (they should be on most platforms).
+ // Thread-safe.
+ static bool is_lock_free()
+ {
+ return
+ details::static_is_lock_free<bool>::value == 2 &&
+ details::static_is_lock_free<size_t>::value == 2 &&
+ details::static_is_lock_free<std::uint32_t>::value == 2 &&
+ details::static_is_lock_free<index_t>::value == 2 &&
+ details::static_is_lock_free<void*>::value == 2;
+ }
+
+
+private:
+ friend struct ProducerToken;
+ friend struct ConsumerToken;
+ friend struct ExplicitProducer;
+
+
+ ///////////////////////////////
+ // Queue methods
+ ///////////////////////////////
+
+ inline bool update_current_producer_after_rotation(consumer_token_t& token)
+ {
+ // Ah, there's been a rotation, figure out where we should be!
+ auto tail = producerListTail.load(std::memory_order_acquire);
+ if (token.desiredProducer == nullptr && tail == nullptr) {
+ return false;
+ }
+ auto prodCount = producerCount.load(std::memory_order_relaxed);
+ auto globalOffset = globalExplicitConsumerOffset.load(std::memory_order_relaxed);
+ if (details::cqUnlikely(token.desiredProducer == nullptr)) {
+ // Aha, first time we're dequeueing anything.
+ // Figure out our local position
+ // Note: offset is from start, not end, but we're traversing from end -- subtract from count first
+ std::uint32_t offset = prodCount - 1 - (token.initialOffset % prodCount);
+ token.desiredProducer = tail;
+ for (std::uint32_t i = 0; i != offset; ++i) {
+ token.desiredProducer = static_cast<ProducerBase*>(token.desiredProducer)->next_prod();
+ if (token.desiredProducer == nullptr) {
+ token.desiredProducer = tail;
+ }
+ }
+ }
+
+ std::uint32_t delta = globalOffset - token.lastKnownGlobalOffset;
+ if (delta >= prodCount) {
+ delta = delta % prodCount;
+ }
+ for (std::uint32_t i = 0; i != delta; ++i) {
+ token.desiredProducer = static_cast<ProducerBase*>(token.desiredProducer)->next_prod();
+ if (token.desiredProducer == nullptr) {
+ token.desiredProducer = tail;
+ }
+ }
+
+ token.lastKnownGlobalOffset = globalOffset;
+ token.currentProducer = token.desiredProducer;
+ token.itemsConsumedFromCurrent = 0;
+ return true;
+ }
+
+
+ ///////////////////////////
+ // Free list
+ ///////////////////////////
+
+ template <typename N>
+ struct FreeListNode
+ {
+ FreeListNode() : freeListRefs(0), freeListNext(nullptr) { }
+
+ std::atomic<std::uint32_t> freeListRefs;
+ std::atomic<N*> freeListNext;
+ };
+
+ // A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention, but
+ // simple and correct (assuming nodes are never freed until after the free list is destroyed), and fairly
+ // speedy under low contention.
+ template<typename N> // N must inherit FreeListNode or have the same fields (and initialization of them)
+ struct FreeList
+ {
+ FreeList() : freeListHead(nullptr) { }
+ FreeList(FreeList&& other) : freeListHead(other.freeListHead.load(std::memory_order_relaxed)) { other.freeListHead.store(nullptr, std::memory_order_relaxed); }
+ void swap(FreeList& other) { details::swap_relaxed(freeListHead, other.freeListHead); }
+
+ FreeList(FreeList const&) = delete;
+ FreeList& operator=(FreeList const&) = delete;
+
+ inline void add(N* node)
+ {
+ // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to
+ // set it using a fetch_add
+ if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) {
+ // Oh look! We were the last ones referencing this node, and we know
+ // we want to add it to the free list, so let's do it!
+ add_knowing_refcount_is_zero(node);
+ }
+ }
+
+ inline N* try_get()
+ {
+ auto head = freeListHead.load(std::memory_order_acquire);
+ while (head != nullptr) {
+ auto prevHead = head;
+ auto refs = head->freeListRefs.load(std::memory_order_relaxed);
+ if ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1, std::memory_order_acquire, std::memory_order_relaxed)) {
+ head = freeListHead.load(std::memory_order_acquire);
+ continue;
+ }
+
+ // Good, reference count has been incremented (it wasn't at zero), which means we can read the
+ // next and not worry about it changing between now and the time we do the CAS
+ auto next = head->freeListNext.load(std::memory_order_relaxed);
+ if (freeListHead.compare_exchange_strong(head, next, std::memory_order_acquire, std::memory_order_relaxed)) {
+ // Yay, got the node. This means it was on the list, which means shouldBeOnFreeList must be false no
+ // matter the refcount (because nobody else knows it's been taken off yet, it can't have been put back on).
+ assert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0);
+
+ // Decrease refcount twice, once for our ref, and once for the list's ref
+ head->freeListRefs.fetch_sub(2, std::memory_order_release);
+ return head;
+ }
+
+ // OK, the head must have changed on us, but we still need to decrease the refcount we increased.
+ // Note that we don't need to release any memory effects, but we do need to ensure that the reference
+ // count decrement happens-after the CAS on the head.
+ refs = prevHead->freeListRefs.fetch_sub(1, std::memory_order_acq_rel);
+ if (refs == SHOULD_BE_ON_FREELIST + 1) {
+ add_knowing_refcount_is_zero(prevHead);
+ }
+ }
+
+ return nullptr;
+ }
+
+ // Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes)
+ N* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); }
+
+ private:
+ inline void add_knowing_refcount_is_zero(N* node)
+ {
+ // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we run
+ // only one copy of this method per node at a time, i.e. the single thread case), then we know
+ // we can safely change the next pointer of the node; however, once the refcount is back above
+ // zero, then other threads could increase it (happens under heavy contention, when the refcount
+ // goes to zero in between a load and a refcount increment of a node in try_get, then back up to
+ // something non-zero, then the refcount increment is done by the other thread) -- so, if the CAS
+ // to add the node to the actual list fails, decrease the refcount and leave the add operation to
+ // the next thread who puts the refcount back at zero (which could be us, hence the loop).
+ auto head = freeListHead.load(std::memory_order_relaxed);
+ while (true) {
+ node->freeListNext.store(head, std::memory_order_relaxed);
+ node->freeListRefs.store(1, std::memory_order_release);
+ if (!freeListHead.compare_exchange_strong(head, node, std::memory_order_release, std::memory_order_relaxed)) {
+ // Hmm, the add failed, but we can only try again when the refcount goes back to zero
+ if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) {
+ continue;
+ }
+ }
+ return;
+ }
+ }
+
+ private:
+ // Implemented like a stack, but where node order doesn't matter (nodes are inserted out of order under contention)
+ std::atomic<N*> freeListHead;
+
+ static const std::uint32_t REFS_MASK = 0x7FFFFFFF;
+ static const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000;
+ };
+
+
+ ///////////////////////////
+ // Block
+ ///////////////////////////
+
+ struct Block
+ {
+ Block()
+ : next(nullptr), elementsCompletelyDequeued(0), freeListRefs(0), freeListNext(nullptr), shouldBeOnFreeList(false), dynamicallyAllocated(true)
+ {
+ }
+
+ inline bool is_empty() const
+ {
+ if (compile_time_condition<BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD>::value) {
+ // Check flags
+ for (size_t i = 0; i < BLOCK_SIZE; ++i) {
+ if (!emptyFlags[i].load(std::memory_order_relaxed)) {
+ return false;
+ }
+ }
+
+ // Aha, empty; make sure we have all other memory effects that happened before the empty flags were set
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return true;
+ }
+ else {
+ // Check counter
+ if (elementsCompletelyDequeued.load(std::memory_order_relaxed) == BLOCK_SIZE) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return true;
+ }
+ assert(elementsCompletelyDequeued.load(std::memory_order_relaxed) <= BLOCK_SIZE);
+ return false;
+ }
+ }
+
+ // Returns true if the block is now empty (does not apply in explicit context)
+ inline bool set_empty(index_t i)
+ {
+ if (BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {
+ // Set flag
+ assert(!emptyFlags[BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1))].load(std::memory_order_relaxed));
+ emptyFlags[BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1))].store(true, std::memory_order_release);
+ return false;
+ }
+ else {
+ // Increment counter
+ auto prevVal = elementsCompletelyDequeued.fetch_add(1, std::memory_order_release);
+ assert(prevVal < BLOCK_SIZE);
+ return prevVal == BLOCK_SIZE - 1;
+ }
+ }
+
+ // Sets multiple contiguous item statuses to 'empty' (assumes no wrapping and count > 0).
+ // Returns true if the block is now empty (does not apply in explicit context).
+ inline bool set_many_empty(index_t i, size_t count)
+ {
+ if (compile_time_condition<BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD>::value) {
+ // Set flags
+ std::atomic_thread_fence(std::memory_order_release);
+ i = BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1)) - count + 1;
+ for (size_t j = 0; j != count; ++j) {
+ assert(!emptyFlags[i + j].load(std::memory_order_relaxed));
+ emptyFlags[i + j].store(true, std::memory_order_relaxed);
+ }
+ return false;
+ }
+ else {
+ // Increment counter
+ auto prevVal = elementsCompletelyDequeued.fetch_add(count, std::memory_order_release);
+ assert(prevVal + count <= BLOCK_SIZE);
+ return prevVal + count == BLOCK_SIZE;
+ }
+ }
+
+ inline void set_all_empty()
+ {
+ if (BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {
+ // Set all flags
+ for (size_t i = 0; i != BLOCK_SIZE; ++i) {
+ emptyFlags[i].store(true, std::memory_order_relaxed);
+ }
+ }
+ else {
+ // Reset counter
+ elementsCompletelyDequeued.store(BLOCK_SIZE, std::memory_order_relaxed);
+ }
+ }
+
+ inline void reset_empty()
+ {
+ if (compile_time_condition<BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD>::value) {
+ // Reset flags
+ for (size_t i = 0; i != BLOCK_SIZE; ++i) {
+ emptyFlags[i].store(false, std::memory_order_relaxed);
+ }
+ }
+ else {
+ // Reset counter
+ elementsCompletelyDequeued.store(0, std::memory_order_relaxed);
+ }
+ }
+
+ inline T* operator[](index_t idx) noexcept { return static_cast<T*>(static_cast<void*>(elements)) + static_cast<size_t>(idx & static_cast<index_t>(BLOCK_SIZE - 1)); }
+ inline T const* operator[](index_t idx) const noexcept { return static_cast<T const*>(static_cast<void const*>(elements)) + static_cast<size_t>(idx & static_cast<index_t>(BLOCK_SIZE - 1)); }
+
+ private:
+ // IMPORTANT: This must be the first member in Block, so that if T depends on the alignment of
+ // addresses returned by malloc, that alignment will be preserved. Apparently clang actually
+ // generates code that uses this assumption for AVX instructions in some cases. Ideally, we
+ // should also align Block to the alignment of T in case it's higher than malloc's 16-byte
+ // alignment, but this is hard to do in a cross-platform way. Assert for this case:
+ static_assert(std::alignment_of<T>::value <= std::alignment_of<details::max_align_t>::value, "The queue does not support super-aligned types at this time");
+ // Additionally, we need the alignment of Block itself to be a multiple of max_align_t since
+ // otherwise the appropriate padding will not be added at the end of Block in order to make
+ // arrays of Blocks all be properly aligned (not just the first one). We use a union to force
+ // this.
+ union {
+ char elements[sizeof(T) * BLOCK_SIZE];
+ details::max_align_t dummy;
+ };
+ public:
+ Block* next;
+ std::atomic<size_t> elementsCompletelyDequeued;
+ std::atomic<bool> emptyFlags[BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD ? BLOCK_SIZE : 1];
+ public:
+ std::atomic<std::uint32_t> freeListRefs;
+ std::atomic<Block*> freeListNext;
+ std::atomic<bool> shouldBeOnFreeList;
+ bool dynamicallyAllocated; // Perhaps a better name for this would be 'isNotPartOfInitialBlockPool'
+ };
+ static_assert(std::alignment_of<Block>::value >= std::alignment_of<details::max_align_t>::value, "Internal error: Blocks must be at least as aligned as the type they are wrapping");
+
+
+ ///////////////////////////
+ // Producer base
+ ///////////////////////////
+
+ struct ProducerBase : public details::ConcurrentQueueProducerTypelessBase
+ {
+ ProducerBase(ConcurrentQueue* parent_) :
+ tailIndex(0),
+ headIndex(0),
+ dequeueOptimisticCount(0),
+ dequeueOvercommit(0),
+ tailBlock(nullptr),
+ parent(parent_)
+ {
+ }
+
+ virtual ~ProducerBase() { };
+
+ template<class NotifyThread, class ProcessData>
+ inline size_t dequeue_bulk(NotifyThread notifyThread, ProcessData processData)
+ {
+ return static_cast<ExplicitProducer*>(this)->dequeue_bulk(notifyThread, processData);
+ }
+
+ inline ProducerBase* next_prod() const { return static_cast<ProducerBase*>(next); }
+
+ inline size_t size_approx() const
+ {
+ auto tail = tailIndex.load(std::memory_order_relaxed);
+ auto head = headIndex.load(std::memory_order_relaxed);
+ return details::circular_less_than(head, tail) ? static_cast<size_t>(tail - head) : 0;
+ }
+
+ inline index_t getTail() const { return tailIndex.load(std::memory_order_relaxed); }
+ protected:
+ std::atomic<index_t> tailIndex; // Where to enqueue to next
+ std::atomic<index_t> headIndex; // Where to dequeue from next
+
+ std::atomic<index_t> dequeueOptimisticCount;
+ std::atomic<index_t> dequeueOvercommit;
+
+ Block* tailBlock;
+
+ public:
+ ConcurrentQueue* parent;
+ };
+
+
+ public:
+ ///////////////////////////
+ // Explicit queue
+ ///////////////////////////
+ struct ExplicitProducer : public ProducerBase
+ {
+ explicit ExplicitProducer(ConcurrentQueue* _parent) :
+ ProducerBase(_parent),
+ blockIndex(nullptr),
+ pr_blockIndexSlotsUsed(0),
+ pr_blockIndexSize(EXPLICIT_INITIAL_INDEX_SIZE >> 1),
+ pr_blockIndexFront(0),
+ pr_blockIndexEntries(nullptr),
+ pr_blockIndexRaw(nullptr)
+ {
+ size_t poolBasedIndexSize = details::ceil_to_pow_2(_parent->initialBlockPoolSize) >> 1;
+ if (poolBasedIndexSize > pr_blockIndexSize) {
+ pr_blockIndexSize = poolBasedIndexSize;
+ }
+
+ new_block_index(0); // This creates an index with double the number of current entries, i.e. EXPLICIT_INITIAL_INDEX_SIZE
+ }
+
+ ~ExplicitProducer()
+ {
+ // Destruct any elements not yet dequeued.
+ // Since we're in the destructor, we can assume all elements
+ // are either completely dequeued or completely not (no halfways).
+ if (this->tailBlock != nullptr) { // Note this means there must be a block index too
+ // First find the block that's partially dequeued, if any
+ Block* halfDequeuedBlock = nullptr;
+ if ((this->headIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1)) != 0) {
+ // The head's not on a block boundary, meaning a block somewhere is partially dequeued
+ // (or the head block is the tail block and was fully dequeued, but the head/tail are still not on a boundary)
+ size_t i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & (pr_blockIndexSize - 1);
+ while (details::circular_less_than<index_t>(pr_blockIndexEntries[i].base + BLOCK_SIZE, this->headIndex.load(std::memory_order_relaxed))) {
+ i = (i + 1) & (pr_blockIndexSize - 1);
+ }
+ assert(details::circular_less_than<index_t>(pr_blockIndexEntries[i].base, this->headIndex.load(std::memory_order_relaxed)));
+ halfDequeuedBlock = pr_blockIndexEntries[i].block;
+ }
+
+ // Start at the head block (note the first line in the loop gives us the head from the tail on the first iteration)
+ auto block = this->tailBlock;
+ do {
+ block = block->next;
+ if (block->ConcurrentQueue::Block::is_empty()) {
+ continue;
+ }
+
+ size_t i = 0; // Offset into block
+ if (block == halfDequeuedBlock) {
+ i = static_cast<size_t>(this->headIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1));
+ }
+
+ // Walk through all the items in the block; if this is the tail block, we need to stop when we reach the tail index
+ auto lastValidIndex = (this->tailIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 ? BLOCK_SIZE : static_cast<size_t>(this->tailIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1));
+ while (i != BLOCK_SIZE && (block != this->tailBlock || i != lastValidIndex)) {
+ (*block)[i++]->~T();
+ }
+ } while (block != this->tailBlock);
+ }
+
+ // Destroy all blocks that we own
+ if (this->tailBlock != nullptr) {
+ auto block = this->tailBlock;
+ do {
+ auto nextBlock = block->next;
+ if (block->dynamicallyAllocated) {
+ destroy(block);
+ }
+ else {
+ this->parent->add_block_to_free_list(block);
+ }
+ block = nextBlock;
+ } while (block != this->tailBlock);
+ }
+
+ // Destroy the block indices
+ auto header = static_cast<BlockIndexHeader*>(pr_blockIndexRaw);
+ while (header != nullptr) {
+ auto prev = static_cast<BlockIndexHeader*>(header->prev);
+ header->~BlockIndexHeader();
+ (Traits::free)(header);
+ header = prev;
+ }
+ }
+
+ inline void enqueue_begin_alloc(index_t currentTailIndex)
+ {
+ // We reached the end of a block, start a new one
+ if (this->tailBlock != nullptr && this->tailBlock->next->ConcurrentQueue::Block::is_empty()) {
+ // We can re-use the block ahead of us, it's empty!
+ this->tailBlock = this->tailBlock->next;
+ this->tailBlock->ConcurrentQueue::Block::reset_empty();
+
+ // We'll put the block on the block index (guaranteed to be room since we're conceptually removing the
+ // last block from it first -- except instead of removing then adding, we can just overwrite).
+ // Note that there must be a valid block index here, since even if allocation failed in the ctor,
+ // it would have been re-attempted when adding the first block to the queue; since there is such
+ // a block, a block index must have been successfully allocated.
+ }
+ else {
+ // We're going to need a new block; check that the block index has room
+ if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize) {
+ // Hmm, the circular block index is already full -- we'll need
+ // to allocate a new index. Note pr_blockIndexRaw can only be nullptr if
+ // the initial allocation failed in the constructor.
+ new_block_index(pr_blockIndexSlotsUsed);
+ }
+
+ // Insert a new block in the circular linked list
+ auto newBlock = this->parent->ConcurrentQueue::requisition_block();
+ newBlock->ConcurrentQueue::Block::reset_empty();
+ if (this->tailBlock == nullptr) {
+ newBlock->next = newBlock;
+ }
+ else {
+ newBlock->next = this->tailBlock->next;
+ this->tailBlock->next = newBlock;
+ }
+ this->tailBlock = newBlock;
+ ++pr_blockIndexSlotsUsed;
+ }
+
+ // Add block to block index
+ auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];
+ entry.base = currentTailIndex;
+ entry.block = this->tailBlock;
+ blockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release);
+ pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);
+ }
+
+ tracy_force_inline T* enqueue_begin(index_t& currentTailIndex)
+ {
+ currentTailIndex = this->tailIndex.load(std::memory_order_relaxed);
+ if (details::cqUnlikely((currentTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0)) {
+ this->enqueue_begin_alloc(currentTailIndex);
+ }
+ return (*this->tailBlock)[currentTailIndex];
+ }
+
+ tracy_force_inline std::atomic<index_t>& get_tail_index()
+ {
+ return this->tailIndex;
+ }
+
+ template<class NotifyThread, class ProcessData>
+ size_t dequeue_bulk(NotifyThread notifyThread, ProcessData processData)
+ {
+ auto tail = this->tailIndex.load(std::memory_order_relaxed);
+ auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);
+ auto desiredCount = static_cast<size_t>(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit));
+ if (details::circular_less_than<size_t>(0, desiredCount)) {
+ desiredCount = desiredCount < 8192 ? desiredCount : 8192;
+ std::atomic_thread_fence(std::memory_order_acquire);
+
+ auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed);
+ assert(overcommit <= myDequeueCount);
+
+ tail = this->tailIndex.load(std::memory_order_acquire);
+ auto actualCount = static_cast<size_t>(tail - (myDequeueCount - overcommit));
+ if (details::circular_less_than<size_t>(0, actualCount)) {
+ actualCount = desiredCount < actualCount ? desiredCount : actualCount;
+ if (actualCount < desiredCount) {
+ this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release);
+ }
+
+ // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this
+ // will never exceed tail.
+ auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel);
+
+ // Determine which block the first element is in
+ auto localBlockIndex = blockIndex.load(std::memory_order_acquire);
+ auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire);
+
+ auto headBase = localBlockIndex->entries[localBlockIndexHead].base;
+ auto firstBlockBaseIndex = firstIndex & ~static_cast<index_t>(BLOCK_SIZE - 1);
+ auto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(firstBlockBaseIndex - headBase) / BLOCK_SIZE);
+ auto indexIndex = (localBlockIndexHead + offset) & (localBlockIndex->size - 1);
+
+ notifyThread( this->threadId );
+
+ // Iterate the blocks and dequeue
+ auto index = firstIndex;
+ do {
+ auto firstIndexInBlock = index;
+ auto endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+ endIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;
+ auto block = localBlockIndex->entries[indexIndex].block;
+
+ const auto sz = endIndex - index;
+ processData( (*block)[index], sz );
+ index += sz;
+
+ block->ConcurrentQueue::Block::set_many_empty(firstIndexInBlock, static_cast<size_t>(endIndex - firstIndexInBlock));
+ indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1);
+ } while (index != firstIndex + actualCount);
+
+ return actualCount;
+ }
+ else {
+ // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent
+ this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release);
+ }
+ }
+
+ return 0;
+ }
+
+ private:
+ struct BlockIndexEntry
+ {
+ index_t base;
+ Block* block;
+ };
+
+ struct BlockIndexHeader
+ {
+ size_t size;
+ std::atomic<size_t> front; // Current slot (not next, like pr_blockIndexFront)
+ BlockIndexEntry* entries;
+ void* prev;
+ };
+
+
+ bool new_block_index(size_t numberOfFilledSlotsToExpose)
+ {
+ auto prevBlockSizeMask = pr_blockIndexSize - 1;
+
+ // Create the new block
+ pr_blockIndexSize <<= 1;
+ auto newRawPtr = static_cast<char*>((Traits::malloc)(sizeof(BlockIndexHeader) + std::alignment_of<BlockIndexEntry>::value - 1 + sizeof(BlockIndexEntry) * pr_blockIndexSize));
+ if (newRawPtr == nullptr) {
+ pr_blockIndexSize >>= 1; // Reset to allow graceful retry
+ return false;
+ }
+
+ auto newBlockIndexEntries = reinterpret_cast<BlockIndexEntry*>(details::align_for<BlockIndexEntry>(newRawPtr + sizeof(BlockIndexHeader)));
+
+ // Copy in all the old indices, if any
+ size_t j = 0;
+ if (pr_blockIndexSlotsUsed != 0) {
+ auto i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & prevBlockSizeMask;
+ do {
+ newBlockIndexEntries[j++] = pr_blockIndexEntries[i];
+ i = (i + 1) & prevBlockSizeMask;
+ } while (i != pr_blockIndexFront);
+ }
+
+ // Update everything
+ auto header = new (newRawPtr) BlockIndexHeader;
+ header->size = pr_blockIndexSize;
+ header->front.store(numberOfFilledSlotsToExpose - 1, std::memory_order_relaxed);
+ header->entries = newBlockIndexEntries;
+ header->prev = pr_blockIndexRaw; // we link the new block to the old one so we can free it later
+
+ pr_blockIndexFront = j;
+ pr_blockIndexEntries = newBlockIndexEntries;
+ pr_blockIndexRaw = newRawPtr;
+ blockIndex.store(header, std::memory_order_release);
+
+ return true;
+ }
+
+ private:
+ std::atomic<BlockIndexHeader*> blockIndex;
+
+ // To be used by producer only -- consumer must use the ones in referenced by blockIndex
+ size_t pr_blockIndexSlotsUsed;
+ size_t pr_blockIndexSize;
+ size_t pr_blockIndexFront; // Next slot (not current)
+ BlockIndexEntry* pr_blockIndexEntries;
+ void* pr_blockIndexRaw;
+ };
+
+ ExplicitProducer* get_explicit_producer(producer_token_t const& token)
+ {
+ return static_cast<ExplicitProducer*>(token.producer);
+ }
+
+ private:
+
+ //////////////////////////////////
+ // Block pool manipulation
+ //////////////////////////////////
+
+ void populate_initial_block_list(size_t blockCount)
+ {
+ initialBlockPoolSize = blockCount;
+ if (initialBlockPoolSize == 0) {
+ initialBlockPool = nullptr;
+ return;
+ }
+
+ initialBlockPool = create_array<Block>(blockCount);
+ if (initialBlockPool == nullptr) {
+ initialBlockPoolSize = 0;
+ }
+ for (size_t i = 0; i < initialBlockPoolSize; ++i) {
+ initialBlockPool[i].dynamicallyAllocated = false;
+ }
+ }
+
+ inline Block* try_get_block_from_initial_pool()
+ {
+ if (initialBlockPoolIndex.load(std::memory_order_relaxed) >= initialBlockPoolSize) {
+ return nullptr;
+ }
+
+ auto index = initialBlockPoolIndex.fetch_add(1, std::memory_order_relaxed);
+
+ return index < initialBlockPoolSize ? (initialBlockPool + index) : nullptr;
+ }
+
+ inline void add_block_to_free_list(Block* block)
+ {
+ freeList.add(block);
+ }
+
+ inline void add_blocks_to_free_list(Block* block)
+ {
+ while (block != nullptr) {
+ auto next = block->next;
+ add_block_to_free_list(block);
+ block = next;
+ }
+ }
+
+ inline Block* try_get_block_from_free_list()
+ {
+ return freeList.try_get();
+ }
+
+ // Gets a free block from one of the memory pools, or allocates a new one (if applicable)
+ Block* requisition_block()
+ {
+ auto block = try_get_block_from_initial_pool();
+ if (block != nullptr) {
+ return block;
+ }
+
+ block = try_get_block_from_free_list();
+ if (block != nullptr) {
+ return block;
+ }
+
+ return create<Block>();
+ }
+
+
+ //////////////////////////////////
+ // Producer list manipulation
+ //////////////////////////////////
+
+ ProducerBase* recycle_or_create_producer()
+ {
+ bool recycled;
+ return recycle_or_create_producer(recycled);
+ }
+
+ ProducerBase* recycle_or_create_producer(bool& recycled)
+ {
+ // Try to re-use one first
+ for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {
+ if (ptr->inactive.load(std::memory_order_relaxed)) {
+ if( ptr->size_approx() == 0 )
+ {
+ bool expected = true;
+ if (ptr->inactive.compare_exchange_strong(expected, /* desired */ false, std::memory_order_acquire, std::memory_order_relaxed)) {
+ // We caught one! It's been marked as activated, the caller can have it
+ recycled = true;
+ return ptr;
+ }
+ }
+ }
+ }
+
+ recycled = false;
+ return add_producer(static_cast<ProducerBase*>(create<ExplicitProducer>(this)));
+ }
+
+ ProducerBase* add_producer(ProducerBase* producer)
+ {
+ // Handle failed memory allocation
+ if (producer == nullptr) {
+ return nullptr;
+ }
+
+ producerCount.fetch_add(1, std::memory_order_relaxed);
+
+ // Add it to the lock-free list
+ auto prevTail = producerListTail.load(std::memory_order_relaxed);
+ do {
+ producer->next = prevTail;
+ } while (!producerListTail.compare_exchange_weak(prevTail, producer, std::memory_order_release, std::memory_order_relaxed));
+
+ return producer;
+ }
+
+ void reown_producers()
+ {
+ // After another instance is moved-into/swapped-with this one, all the
+ // producers we stole still think their parents are the other queue.
+ // So fix them up!
+ for (auto ptr = producerListTail.load(std::memory_order_relaxed); ptr != nullptr; ptr = ptr->next_prod()) {
+ ptr->parent = this;
+ }
+ }
+
+ //////////////////////////////////
+ // Utility functions
+ //////////////////////////////////
+
+ template<typename U>
+ static inline U* create_array(size_t count)
+ {
+ assert(count > 0);
+ return static_cast<U*>((Traits::malloc)(sizeof(U) * count));
+ }
+
+ template<typename U>
+ static inline void destroy_array(U* p, size_t count)
+ {
+ ((void)count);
+ if (p != nullptr) {
+ assert(count > 0);
+ (Traits::free)(p);
+ }
+ }
+
+ template<typename U>
+ static inline U* create()
+ {
+ auto p = (Traits::malloc)(sizeof(U));
+ return new (p) U;
+ }
+
+ template<typename U, typename A1>
+ static inline U* create(A1&& a1)
+ {
+ auto p = (Traits::malloc)(sizeof(U));
+ return new (p) U(std::forward<A1>(a1));
+ }
+
+ template<typename U>
+ static inline void destroy(U* p)
+ {
+ if (p != nullptr) {
+ p->~U();
+ }
+ (Traits::free)(p);
+ }
+
+private:
+ std::atomic<ProducerBase*> producerListTail;
+ std::atomic<std::uint32_t> producerCount;
+
+ std::atomic<size_t> initialBlockPoolIndex;
+ Block* initialBlockPool;
+ size_t initialBlockPoolSize;
+
+ FreeList<Block> freeList;
+
+ std::atomic<std::uint32_t> nextExplicitConsumerId;
+ std::atomic<std::uint32_t> globalExplicitConsumerOffset;
+};
+
+
+template<typename T, typename Traits>
+ProducerToken::ProducerToken(ConcurrentQueue<T, Traits>& queue)
+ : producer(queue.recycle_or_create_producer())
+{
+ if (producer != nullptr) {
+ producer->token = this;
+ producer->threadId = detail::GetThreadHandleImpl();
+ }
+}
+
+template<typename T, typename Traits>
+ConsumerToken::ConsumerToken(ConcurrentQueue<T, Traits>& queue)
+ : itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)
+{
+ initialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release);
+ lastKnownGlobalOffset = static_cast<std::uint32_t>(-1);
+}
+
+template<typename T, typename Traits>
+inline void swap(ConcurrentQueue<T, Traits>& a, ConcurrentQueue<T, Traits>& b) noexcept
+{
+ a.swap(b);
+}
+
+inline void swap(ProducerToken& a, ProducerToken& b) noexcept
+{
+ a.swap(b);
+}
+
+inline void swap(ConsumerToken& a, ConsumerToken& b) noexcept
+{
+ a.swap(b);
+}
+
+}
+
+} /* namespace tracy */
+
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
diff --git a/3rdparty/tracy/tracy/client/tracy_rpmalloc.cpp b/3rdparty/tracy/tracy/client/tracy_rpmalloc.cpp
new file mode 100644
index 0000000..fbfd74a
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/tracy_rpmalloc.cpp
@@ -0,0 +1,2500 @@
+#ifdef TRACY_ENABLE
+
+/* rpmalloc.c - Memory allocator - Public Domain - 2016 Mattias Jansson
+ *
+ * This library provides a cross-platform lock free thread caching malloc implementation in C11.
+ * The latest source code is always available at
+ *
+ * https://github.com/mjansson/rpmalloc
+ *
+ * This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
+ *
+ */
+
+#include "tracy_rpmalloc.hpp"
+
+/// Build time configurable limits
+#ifndef HEAP_ARRAY_SIZE
+//! Size of heap hashmap
+#define HEAP_ARRAY_SIZE 47
+#endif
+#ifndef ENABLE_THREAD_CACHE
+//! Enable per-thread cache
+#define ENABLE_THREAD_CACHE 1
+#endif
+#ifndef ENABLE_GLOBAL_CACHE
+//! Enable global cache shared between all threads, requires thread cache
+#define ENABLE_GLOBAL_CACHE 1
+#endif
+#ifndef ENABLE_VALIDATE_ARGS
+//! Enable validation of args to public entry points
+#define ENABLE_VALIDATE_ARGS 0
+#endif
+#ifndef ENABLE_STATISTICS
+//! Enable statistics collection
+#define ENABLE_STATISTICS 0
+#endif
+#ifndef ENABLE_ASSERTS
+//! Enable asserts
+#define ENABLE_ASSERTS 0
+#endif
+#ifndef ENABLE_OVERRIDE
+//! Override standard library malloc/free and new/delete entry points
+#define ENABLE_OVERRIDE 0
+#endif
+#ifndef ENABLE_PRELOAD
+//! Support preloading
+#define ENABLE_PRELOAD 0
+#endif
+#ifndef DISABLE_UNMAP
+//! Disable unmapping memory pages
+#define DISABLE_UNMAP 0
+#endif
+#ifndef DEFAULT_SPAN_MAP_COUNT
+//! Default number of spans to map in call to map more virtual memory (default values yield 4MiB here)
+#define DEFAULT_SPAN_MAP_COUNT 64
+#endif
+
+#if ENABLE_THREAD_CACHE
+#ifndef ENABLE_UNLIMITED_CACHE
+//! Unlimited thread and global cache
+#define ENABLE_UNLIMITED_CACHE 0
+#endif
+#ifndef ENABLE_UNLIMITED_THREAD_CACHE
+//! Unlimited cache disables any thread cache limitations
+#define ENABLE_UNLIMITED_THREAD_CACHE ENABLE_UNLIMITED_CACHE
+#endif
+#if !ENABLE_UNLIMITED_THREAD_CACHE
+#ifndef THREAD_CACHE_MULTIPLIER
+//! Multiplier for thread cache (cache limit will be span release count multiplied by this value)
+#define THREAD_CACHE_MULTIPLIER 16
+#endif
+#ifndef ENABLE_ADAPTIVE_THREAD_CACHE
+//! Enable adaptive size of per-thread cache (still bounded by THREAD_CACHE_MULTIPLIER hard limit)
+#define ENABLE_ADAPTIVE_THREAD_CACHE 0
+#endif
+#endif
+#endif
+
+#if ENABLE_GLOBAL_CACHE && ENABLE_THREAD_CACHE
+#ifndef ENABLE_UNLIMITED_GLOBAL_CACHE
+//! Unlimited cache disables any global cache limitations
+#define ENABLE_UNLIMITED_GLOBAL_CACHE ENABLE_UNLIMITED_CACHE
+#endif
+#if !ENABLE_UNLIMITED_GLOBAL_CACHE
+//! Multiplier for global cache (cache limit will be span release count multiplied by this value)
+#define GLOBAL_CACHE_MULTIPLIER (THREAD_CACHE_MULTIPLIER * 6)
+#endif
+#else
+# undef ENABLE_GLOBAL_CACHE
+# define ENABLE_GLOBAL_CACHE 0
+#endif
+
+#if !ENABLE_THREAD_CACHE || ENABLE_UNLIMITED_THREAD_CACHE
+# undef ENABLE_ADAPTIVE_THREAD_CACHE
+# define ENABLE_ADAPTIVE_THREAD_CACHE 0
+#endif
+
+#if DISABLE_UNMAP && !ENABLE_GLOBAL_CACHE
+# error Must use global cache if unmap is disabled
+#endif
+
+#if defined( _WIN32 ) || defined( __WIN32__ ) || defined( _WIN64 )
+# define PLATFORM_WINDOWS 1
+# define PLATFORM_POSIX 0
+#else
+# define PLATFORM_WINDOWS 0
+# define PLATFORM_POSIX 1
+#endif
+
+#define _Static_assert static_assert
+
+/// Platform and arch specifics
+#ifndef FORCEINLINE
+# if defined(_MSC_VER) && !defined(__clang__)
+# define FORCEINLINE inline __forceinline
+# else
+# define FORCEINLINE inline __attribute__((__always_inline__))
+# endif
+#endif
+#if PLATFORM_WINDOWS
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# endif
+# include <windows.h>
+# if ENABLE_VALIDATE_ARGS
+# include <Intsafe.h>
+# endif
+#else
+# include <unistd.h>
+# include <stdio.h>
+# include <stdlib.h>
+# if defined(__APPLE__)
+# if !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
+# include <mach/mach_vm.h>
+# endif
+# include <mach/vm_statistics.h>
+# include <pthread.h>
+# endif
+# if defined(__HAIKU__)
+# include <OS.h>
+# include <pthread.h>
+# endif
+#endif
+
+#include <stdint.h>
+#include <string.h>
+
+#if ENABLE_ASSERTS
+# undef NDEBUG
+# if defined(_MSC_VER) && !defined(_DEBUG)
+# define _DEBUG
+# endif
+# include <assert.h>
+#else
+# undef assert
+# define assert(x) do {} while(0)
+#endif
+#if ENABLE_STATISTICS
+# include <stdio.h>
+#endif
+
+#include <atomic>
+
+namespace tracy
+{
+
+typedef std::atomic<int32_t> atomic32_t;
+typedef std::atomic<int64_t> atomic64_t;
+typedef std::atomic<void*> atomicptr_t;
+
+#define atomic_thread_fence_acquire() std::atomic_thread_fence(std::memory_order_acquire)
+#define atomic_thread_fence_release() std::atomic_thread_fence(std::memory_order_release)
+
+static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return std::atomic_load_explicit(src, std::memory_order_relaxed); }
+static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { std::atomic_store_explicit(dst, val, std::memory_order_relaxed); }
+static FORCEINLINE int32_t atomic_incr32(atomic32_t* val) { return std::atomic_fetch_add_explicit(val, 1, std::memory_order_relaxed) + 1; }
+#if ENABLE_STATISTICS || ENABLE_ADAPTIVE_THREAD_CACHE
+static FORCEINLINE int32_t atomic_decr32(atomic32_t* val) { return atomic_fetch_add_explicit(val, -1, memory_order_relaxed) - 1; }
+#endif
+static FORCEINLINE int32_t atomic_add32(atomic32_t* val, int32_t add) { return std::atomic_fetch_add_explicit(val, add, std::memory_order_relaxed) + add; }
+static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return std::atomic_load_explicit(src, std::memory_order_relaxed); }
+static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { std::atomic_store_explicit(dst, val, std::memory_order_relaxed); }
+static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return std::atomic_compare_exchange_weak_explicit(dst, &ref, val, std::memory_order_release, std::memory_order_acquire); }
+
+#if defined(_MSC_VER) && !defined(__clang__)
+# define EXPECTED(x) (x)
+# define UNEXPECTED(x) (x)
+#else
+# define EXPECTED(x) __builtin_expect((x), 1)
+# define UNEXPECTED(x) __builtin_expect((x), 0)
+#endif
+
+/// Preconfigured limits and sizes
+//! Granularity of a small allocation block
+#define SMALL_GRANULARITY 16
+//! Small granularity shift count
+#define SMALL_GRANULARITY_SHIFT 4
+//! Number of small block size classes
+#define SMALL_CLASS_COUNT 65
+//! Maximum size of a small block
+#define SMALL_SIZE_LIMIT (SMALL_GRANULARITY * (SMALL_CLASS_COUNT - 1))
+//! Granularity of a medium allocation block
+#define MEDIUM_GRANULARITY 512
+//! Medium granularity shift count
+#define MEDIUM_GRANULARITY_SHIFT 9
+//! Number of medium block size classes
+#define MEDIUM_CLASS_COUNT 61
+//! Total number of small + medium size classes
+#define SIZE_CLASS_COUNT (SMALL_CLASS_COUNT + MEDIUM_CLASS_COUNT)
+//! Number of large block size classes
+#define LARGE_CLASS_COUNT 32
+//! Maximum size of a medium block
+#define MEDIUM_SIZE_LIMIT (SMALL_SIZE_LIMIT + (MEDIUM_GRANULARITY * MEDIUM_CLASS_COUNT))
+//! Maximum size of a large block
+#define LARGE_SIZE_LIMIT ((LARGE_CLASS_COUNT * _memory_span_size) - SPAN_HEADER_SIZE)
+//! Size of a span header (must be a multiple of SMALL_GRANULARITY)
+#define SPAN_HEADER_SIZE 96
+
+#if ENABLE_VALIDATE_ARGS
+//! Maximum allocation size to avoid integer overflow
+#undef MAX_ALLOC_SIZE
+#define MAX_ALLOC_SIZE (((size_t)-1) - _memory_span_size)
+#endif
+
+#define pointer_offset(ptr, ofs) (void*)((char*)(ptr) + (ptrdiff_t)(ofs))
+#define pointer_diff(first, second) (ptrdiff_t)((const char*)(first) - (const char*)(second))
+
+#define INVALID_POINTER ((void*)((uintptr_t)-1))
+
+/// Data types
+//! A memory heap, per thread
+typedef struct heap_t heap_t;
+//! Heap spans per size class
+typedef struct heap_class_t heap_class_t;
+//! Span of memory pages
+typedef struct span_t span_t;
+//! Span list
+typedef struct span_list_t span_list_t;
+//! Span active data
+typedef struct span_active_t span_active_t;
+//! Size class definition
+typedef struct size_class_t size_class_t;
+//! Global cache
+typedef struct global_cache_t global_cache_t;
+
+//! Flag indicating span is the first (master) span of a split superspan
+#define SPAN_FLAG_MASTER 1U
+//! Flag indicating span is a secondary (sub) span of a split superspan
+#define SPAN_FLAG_SUBSPAN 2U
+//! Flag indicating span has blocks with increased alignment
+#define SPAN_FLAG_ALIGNED_BLOCKS 4U
+
+#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
+struct span_use_t {
+ //! Current number of spans used (actually used, not in cache)
+ atomic32_t current;
+ //! High water mark of spans used
+ uint32_t high;
+#if ENABLE_STATISTICS
+ //! Number of spans transitioned to global cache
+ uint32_t spans_to_global;
+ //! Number of spans transitioned from global cache
+ uint32_t spans_from_global;
+ //! Number of spans transitioned to thread cache
+ uint32_t spans_to_cache;
+ //! Number of spans transitioned from thread cache
+ uint32_t spans_from_cache;
+ //! Number of spans transitioned to reserved state
+ uint32_t spans_to_reserved;
+ //! Number of spans transitioned from reserved state
+ uint32_t spans_from_reserved;
+ //! Number of raw memory map calls
+ uint32_t spans_map_calls;
+#endif
+};
+typedef struct span_use_t span_use_t;
+#endif
+
+#if ENABLE_STATISTICS
+struct size_class_use_t {
+ //! Current number of allocations
+ atomic32_t alloc_current;
+ //! Peak number of allocations
+ int32_t alloc_peak;
+ //! Total number of allocations
+ int32_t alloc_total;
+ //! Total number of frees
+ atomic32_t free_total;
+ //! Number of spans in use
+ uint32_t spans_current;
+ //! Number of spans transitioned to cache
+ uint32_t spans_peak;
+ //! Number of spans transitioned to cache
+ uint32_t spans_to_cache;
+ //! Number of spans transitioned from cache
+ uint32_t spans_from_cache;
+ //! Number of spans transitioned from reserved state
+ uint32_t spans_from_reserved;
+ //! Number of spans mapped
+ uint32_t spans_map_calls;
+};
+typedef struct size_class_use_t size_class_use_t;
+#endif
+
+typedef enum span_state_t {
+ SPAN_STATE_ACTIVE = 0,
+ SPAN_STATE_PARTIAL,
+ SPAN_STATE_FULL
+} span_state_t;
+
+//A span can either represent a single span of memory pages with size declared by span_map_count configuration variable,
+//or a set of spans in a continuous region, a super span. Any reference to the term "span" usually refers to both a single
+//span or a super span. A super span can further be divided into multiple spans (or this, super spans), where the first
+//(super)span is the master and subsequent (super)spans are subspans. The master span keeps track of how many subspans
+//that are still alive and mapped in virtual memory, and once all subspans and master have been unmapped the entire
+//superspan region is released and unmapped (on Windows for example, the entire superspan range has to be released
+//in the same call to release the virtual memory range, but individual subranges can be decommitted individually
+//to reduce physical memory use).
+struct span_t {
+ //! Free list
+ void* free_list;
+ //! State
+ uint32_t state;
+ //! Used count when not active (not including deferred free list)
+ uint32_t used_count;
+ //! Block count
+ uint32_t block_count;
+ //! Size class
+ uint32_t size_class;
+ //! Index of last block initialized in free list
+ uint32_t free_list_limit;
+ //! Span list size when part of a cache list, or size of deferred free list when partial/full
+ uint32_t list_size;
+ //! Deferred free list
+ atomicptr_t free_list_deferred;
+ //! Size of a block
+ uint32_t block_size;
+ //! Flags and counters
+ uint32_t flags;
+ //! Number of spans
+ uint32_t span_count;
+ //! Total span counter for master spans, distance for subspans
+ uint32_t total_spans_or_distance;
+ //! Remaining span counter, for master spans
+ atomic32_t remaining_spans;
+ //! Alignment offset
+ uint32_t align_offset;
+ //! Owning heap
+ heap_t* heap;
+ //! Next span
+ span_t* next;
+ //! Previous span
+ span_t* prev;
+};
+_Static_assert(sizeof(span_t) <= SPAN_HEADER_SIZE, "span size mismatch");
+
+struct heap_class_t {
+ //! Free list of active span
+ void* free_list;
+ //! Double linked list of partially used spans with free blocks for each size class.
+ // Current active span is at head of list. Previous span pointer in head points to tail span of list.
+ span_t* partial_span;
+};
+
+struct heap_t {
+ //! Active and semi-used span data per size class
+ heap_class_t span_class[SIZE_CLASS_COUNT];
+#if ENABLE_THREAD_CACHE
+ //! List of free spans (single linked list)
+ span_t* span_cache[LARGE_CLASS_COUNT];
+ //! List of deferred free spans of class 0 (single linked list)
+ atomicptr_t span_cache_deferred;
+#endif
+#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
+ //! Current and high water mark of spans used per span count
+ span_use_t span_use[LARGE_CLASS_COUNT];
+#endif
+ //! Mapped but unused spans
+ span_t* span_reserve;
+ //! Master span for mapped but unused spans
+ span_t* span_reserve_master;
+ //! Number of mapped but unused spans
+ size_t spans_reserved;
+ //! Next heap in id list
+ heap_t* next_heap;
+ //! Next heap in orphan list
+ heap_t* next_orphan;
+ //! Memory pages alignment offset
+ size_t align_offset;
+ //! Heap ID
+ int32_t id;
+#if ENABLE_STATISTICS
+ //! Number of bytes transitioned thread -> global
+ size_t thread_to_global;
+ //! Number of bytes transitioned global -> thread
+ size_t global_to_thread;
+ //! Allocation stats per size class
+ size_class_use_t size_class_use[SIZE_CLASS_COUNT + 1];
+#endif
+};
+
+struct size_class_t {
+ //! Size of blocks in this class
+ uint32_t block_size;
+ //! Number of blocks in each chunk
+ uint16_t block_count;
+ //! Class index this class is merged with
+ uint16_t class_idx;
+};
+_Static_assert(sizeof(size_class_t) == 8, "Size class size mismatch");
+
+struct global_cache_t {
+ //! Cache list pointer
+ atomicptr_t cache;
+ //! Cache size
+ atomic32_t size;
+ //! ABA counter
+ atomic32_t counter;
+};
+
+/// Global data
+//! Initialized flag
+static int _rpmalloc_initialized;
+//! Configuration
+static rpmalloc_config_t _memory_config;
+//! Memory page size
+static size_t _memory_page_size;
+//! Shift to divide by page size
+static size_t _memory_page_size_shift;
+//! Granularity at which memory pages are mapped by OS
+static size_t _memory_map_granularity;
+#if RPMALLOC_CONFIGURABLE
+//! Size of a span of memory pages
+static size_t _memory_span_size;
+//! Shift to divide by span size
+static size_t _memory_span_size_shift;
+//! Mask to get to start of a memory span
+static uintptr_t _memory_span_mask;
+#else
+//! Hardwired span size (64KiB)
+#define _memory_span_size (64 * 1024)
+#define _memory_span_size_shift 16
+#define _memory_span_mask (~((uintptr_t)(_memory_span_size - 1)))
+#endif
+//! Number of spans to map in each map call
+static size_t _memory_span_map_count;
+//! Number of spans to release from thread cache to global cache (single spans)
+static size_t _memory_span_release_count;
+//! Number of spans to release from thread cache to global cache (large multiple spans)
+static size_t _memory_span_release_count_large;
+//! Global size classes
+static size_class_t _memory_size_class[SIZE_CLASS_COUNT];
+//! Run-time size limit of medium blocks
+static size_t _memory_medium_size_limit;
+//! Heap ID counter
+static atomic32_t _memory_heap_id;
+//! Huge page support
+static int _memory_huge_pages;
+#if ENABLE_GLOBAL_CACHE
+//! Global span cache
+static global_cache_t _memory_span_cache[LARGE_CLASS_COUNT];
+#endif
+//! All heaps
+static atomicptr_t _memory_heaps[HEAP_ARRAY_SIZE];
+//! Orphaned heaps
+static atomicptr_t _memory_orphan_heaps;
+//! Running orphan counter to avoid ABA issues in linked list
+static atomic32_t _memory_orphan_counter;
+#if ENABLE_STATISTICS
+//! Active heap count
+static atomic32_t _memory_active_heaps;
+//! Number of currently mapped memory pages
+static atomic32_t _mapped_pages;
+//! Peak number of concurrently mapped memory pages
+static int32_t _mapped_pages_peak;
+//! Number of currently unused spans
+static atomic32_t _reserved_spans;
+//! Running counter of total number of mapped memory pages since start
+static atomic32_t _mapped_total;
+//! Running counter of total number of unmapped memory pages since start
+static atomic32_t _unmapped_total;
+//! Number of currently mapped memory pages in OS calls
+static atomic32_t _mapped_pages_os;
+//! Number of currently allocated pages in huge allocations
+static atomic32_t _huge_pages_current;
+//! Peak number of currently allocated pages in huge allocations
+static int32_t _huge_pages_peak;
+#endif
+
+//! Current thread heap
+#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
+static pthread_key_t _memory_thread_heap;
+#else
+# ifdef _MSC_VER
+# define _Thread_local __declspec(thread)
+# define TLS_MODEL
+# else
+# define TLS_MODEL __attribute__((tls_model("initial-exec")))
+# if !defined(__clang__) && defined(__GNUC__)
+# define _Thread_local __thread
+# endif
+# endif
+static _Thread_local heap_t* _memory_thread_heap TLS_MODEL;
+#endif
+
+static inline heap_t*
+get_thread_heap_raw(void) {
+#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
+ return pthread_getspecific(_memory_thread_heap);
+#else
+ return _memory_thread_heap;
+#endif
+}
+
+//! Get the current thread heap
+static inline heap_t*
+get_thread_heap(void) {
+ heap_t* heap = get_thread_heap_raw();
+#if ENABLE_PRELOAD
+ if (EXPECTED(heap != 0))
+ return heap;
+ rpmalloc_initialize();
+ return get_thread_heap_raw();
+#else
+ return heap;
+#endif
+}
+
+//! Set the current thread heap
+static void
+set_thread_heap(heap_t* heap) {
+#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
+ pthread_setspecific(_memory_thread_heap, heap);
+#else
+ _memory_thread_heap = heap;
+#endif
+}
+
+//! Default implementation to map more virtual memory
+static void*
+_memory_map_os(size_t size, size_t* offset);
+
+//! Default implementation to unmap virtual memory
+static void
+_memory_unmap_os(void* address, size_t size, size_t offset, size_t release);
+
+//! Lookup a memory heap from heap ID
+static heap_t*
+_memory_heap_lookup(int32_t id) {
+ uint32_t list_idx = id % HEAP_ARRAY_SIZE;
+ heap_t* heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
+ while (heap && (heap->id != id))
+ heap = heap->next_heap;
+ return heap;
+}
+
+#if ENABLE_STATISTICS
+# define _memory_statistics_inc(counter, value) counter += value
+# define _memory_statistics_dec(counter, value) counter -= value
+# define _memory_statistics_add(atomic_counter, value) atomic_add32(atomic_counter, (int32_t)(value))
+# define _memory_statistics_add_peak(atomic_counter, value, peak) do { int32_t _cur_count = atomic_add32(atomic_counter, (int32_t)(value)); if (_cur_count > (peak)) peak = _cur_count; } while (0)
+# define _memory_statistics_sub(atomic_counter, value) atomic_add32(atomic_counter, -(int32_t)(value))
+# define _memory_statistics_inc_alloc(heap, class_idx) do { \
+ int32_t alloc_current = atomic_incr32(&heap->size_class_use[class_idx].alloc_current); \
+ if (alloc_current > heap->size_class_use[class_idx].alloc_peak) \
+ heap->size_class_use[class_idx].alloc_peak = alloc_current; \
+ heap->size_class_use[class_idx].alloc_total++; \
+} while(0)
+# define _memory_statistics_inc_free(heap, class_idx) do { \
+ atomic_decr32(&heap->size_class_use[class_idx].alloc_current); \
+ atomic_incr32(&heap->size_class_use[class_idx].free_total); \
+} while(0)
+#else
+# define _memory_statistics_inc(counter, value) do {} while(0)
+# define _memory_statistics_dec(counter, value) do {} while(0)
+# define _memory_statistics_add(atomic_counter, value) do {} while(0)
+# define _memory_statistics_add_peak(atomic_counter, value, peak) do {} while (0)
+# define _memory_statistics_sub(atomic_counter, value) do {} while(0)
+# define _memory_statistics_inc_alloc(heap, class_idx) do {} while(0)
+# define _memory_statistics_inc_free(heap, class_idx) do {} while(0)
+#endif
+
+static void
+_memory_heap_cache_insert(heap_t* heap, span_t* span);
+
+//! Map more virtual memory
+static void*
+_memory_map(size_t size, size_t* offset) {
+ assert(!(size % _memory_page_size));
+ assert(size >= _memory_page_size);
+ _memory_statistics_add_peak(&_mapped_pages, (size >> _memory_page_size_shift), _mapped_pages_peak);
+ _memory_statistics_add(&_mapped_total, (size >> _memory_page_size_shift));
+ return _memory_config.memory_map(size, offset);
+}
+
+//! Unmap virtual memory
+static void
+_memory_unmap(void* address, size_t size, size_t offset, size_t release) {
+ assert(!release || (release >= size));
+ assert(!release || (release >= _memory_page_size));
+ if (release) {
+ assert(!(release % _memory_page_size));
+ _memory_statistics_sub(&_mapped_pages, (release >> _memory_page_size_shift));
+ _memory_statistics_add(&_unmapped_total, (release >> _memory_page_size_shift));
+ }
+ _memory_config.memory_unmap(address, size, offset, release);
+}
+
+//! Declare the span to be a subspan and store distance from master span and span count
+static void
+_memory_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, size_t span_count) {
+ assert((subspan != master) || (subspan->flags & SPAN_FLAG_MASTER));
+ if (subspan != master) {
+ subspan->flags = SPAN_FLAG_SUBSPAN;
+ subspan->total_spans_or_distance = (uint32_t)((uintptr_t)pointer_diff(subspan, master) >> _memory_span_size_shift);
+ subspan->align_offset = 0;
+ }
+ subspan->span_count = (uint32_t)span_count;
+}
+
+//! Use reserved spans to fulfill a memory map request (reserve size must be checked by caller)
+static span_t*
+_memory_map_from_reserve(heap_t* heap, size_t span_count) {
+ //Update the heap span reserve
+ span_t* span = heap->span_reserve;
+ heap->span_reserve = (span_t*)pointer_offset(span, span_count * _memory_span_size);
+ heap->spans_reserved -= span_count;
+
+ _memory_span_mark_as_subspan_unless_master(heap->span_reserve_master, span, span_count);
+ if (span_count <= LARGE_CLASS_COUNT)
+ _memory_statistics_inc(heap->span_use[span_count - 1].spans_from_reserved, 1);
+
+ return span;
+}
+
+//! Get the aligned number of spans to map in based on wanted count, configured mapping granularity and the page size
+static size_t
+_memory_map_align_span_count(size_t span_count) {
+ size_t request_count = (span_count > _memory_span_map_count) ? span_count : _memory_span_map_count;
+ if ((_memory_page_size > _memory_span_size) && ((request_count * _memory_span_size) % _memory_page_size))
+ request_count += _memory_span_map_count - (request_count % _memory_span_map_count);
+ return request_count;
+}
+
+//! Store the given spans as reserve in the given heap
+static void
+_memory_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve, size_t reserve_span_count) {
+ heap->span_reserve_master = master;
+ heap->span_reserve = reserve;
+ heap->spans_reserved = reserve_span_count;
+}
+
+//! Setup a newly mapped span
+static void
+_memory_span_initialize(span_t* span, size_t total_span_count, size_t span_count, size_t align_offset) {
+ span->total_spans_or_distance = (uint32_t)total_span_count;
+ span->span_count = (uint32_t)span_count;
+ span->align_offset = (uint32_t)align_offset;
+ span->flags = SPAN_FLAG_MASTER;
+ atomic_store32(&span->remaining_spans, (int32_t)total_span_count);
+}
+
+//! Map a akigned set of spans, taking configured mapping granularity and the page size into account
+static span_t*
+_memory_map_aligned_span_count(heap_t* heap, size_t span_count) {
+ //If we already have some, but not enough, reserved spans, release those to heap cache and map a new
+ //full set of spans. Otherwise we would waste memory if page size > span size (huge pages)
+ size_t aligned_span_count = _memory_map_align_span_count(span_count);
+ size_t align_offset = 0;
+ span_t* span = (span_t*)_memory_map(aligned_span_count * _memory_span_size, &align_offset);
+ if (!span)
+ return 0;
+ _memory_span_initialize(span, aligned_span_count, span_count, align_offset);
+ _memory_statistics_add(&_reserved_spans, aligned_span_count);
+ if (span_count <= LARGE_CLASS_COUNT)
+ _memory_statistics_inc(heap->span_use[span_count - 1].spans_map_calls, 1);
+ if (aligned_span_count > span_count) {
+ if (heap->spans_reserved) {
+ _memory_span_mark_as_subspan_unless_master(heap->span_reserve_master, heap->span_reserve, heap->spans_reserved);
+ _memory_heap_cache_insert(heap, heap->span_reserve);
+ }
+ _memory_heap_set_reserved_spans(heap, span, (span_t*)pointer_offset(span, span_count * _memory_span_size), aligned_span_count - span_count);
+ }
+ return span;
+}
+
+//! Map in memory pages for the given number of spans (or use previously reserved pages)
+static span_t*
+_memory_map_spans(heap_t* heap, size_t span_count) {
+ if (span_count <= heap->spans_reserved)
+ return _memory_map_from_reserve(heap, span_count);
+ return _memory_map_aligned_span_count(heap, span_count);
+}
+
+//! Unmap memory pages for the given number of spans (or mark as unused if no partial unmappings)
+static void
+_memory_unmap_span(span_t* span) {
+ assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN));
+ assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN));
+
+ int is_master = !!(span->flags & SPAN_FLAG_MASTER);
+ span_t* master = is_master ? span : (span_t*)(pointer_offset(span, -(int32_t)(span->total_spans_or_distance * _memory_span_size)));
+ assert(is_master || (span->flags & SPAN_FLAG_SUBSPAN));
+ assert(master->flags & SPAN_FLAG_MASTER);
+
+ size_t span_count = span->span_count;
+ if (!is_master) {
+ //Directly unmap subspans (unless huge pages, in which case we defer and unmap entire page range with master)
+ assert(span->align_offset == 0);
+ if (_memory_span_size >= _memory_page_size) {
+ _memory_unmap(span, span_count * _memory_span_size, 0, 0);
+ _memory_statistics_sub(&_reserved_spans, span_count);
+ }
+ } else {
+ //Special double flag to denote an unmapped master
+ //It must be kept in memory since span header must be used
+ span->flags |= SPAN_FLAG_MASTER | SPAN_FLAG_SUBSPAN;
+ }
+
+ if (atomic_add32(&master->remaining_spans, -(int32_t)span_count) <= 0) {
+ //Everything unmapped, unmap the master span with release flag to unmap the entire range of the super span
+ assert(!!(master->flags & SPAN_FLAG_MASTER) && !!(master->flags & SPAN_FLAG_SUBSPAN));
+ size_t unmap_count = master->span_count;
+ if (_memory_span_size < _memory_page_size)
+ unmap_count = master->total_spans_or_distance;
+ _memory_statistics_sub(&_reserved_spans, unmap_count);
+ _memory_unmap(master, unmap_count * _memory_span_size, master->align_offset, master->total_spans_or_distance * _memory_span_size);
+ }
+}
+
+#if ENABLE_THREAD_CACHE
+
+//! Unmap a single linked list of spans
+static void
+_memory_unmap_span_list(span_t* span) {
+ size_t list_size = span->list_size;
+ for (size_t ispan = 0; ispan < list_size; ++ispan) {
+ span_t* next_span = span->next;
+ _memory_unmap_span(span);
+ span = next_span;
+ }
+ assert(!span);
+}
+
+//! Add span to head of single linked span list
+static size_t
+_memory_span_list_push(span_t** head, span_t* span) {
+ span->next = *head;
+ if (*head)
+ span->list_size = (*head)->list_size + 1;
+ else
+ span->list_size = 1;
+ *head = span;
+ return span->list_size;
+}
+
+//! Remove span from head of single linked span list, returns the new list head
+static span_t*
+_memory_span_list_pop(span_t** head) {
+ span_t* span = *head;
+ span_t* next_span = 0;
+ if (span->list_size > 1) {
+ assert(span->next);
+ next_span = span->next;
+ assert(next_span);
+ next_span->list_size = span->list_size - 1;
+ }
+ *head = next_span;
+ return span;
+}
+
+//! Split a single linked span list
+static span_t*
+_memory_span_list_split(span_t* span, size_t limit) {
+ span_t* next = 0;
+ if (limit < 2)
+ limit = 2;
+ if (span->list_size > limit) {
+ uint32_t list_size = 1;
+ span_t* last = span;
+ next = span->next;
+ while (list_size < limit) {
+ last = next;
+ next = next->next;
+ ++list_size;
+ }
+ last->next = 0;
+ assert(next);
+ next->list_size = span->list_size - list_size;
+ span->list_size = list_size;
+ span->prev = 0;
+ }
+ return next;
+}
+
+#endif
+
+//! Add a span to partial span double linked list at the head
+static void
+_memory_span_partial_list_add(span_t** head, span_t* span) {
+ if (*head) {
+ span->next = *head;
+ //Maintain pointer to tail span
+ span->prev = (*head)->prev;
+ (*head)->prev = span;
+ } else {
+ span->next = 0;
+ span->prev = span;
+ }
+ *head = span;
+}
+
+//! Add a span to partial span double linked list at the tail
+static void
+_memory_span_partial_list_add_tail(span_t** head, span_t* span) {
+ span->next = 0;
+ if (*head) {
+ span_t* tail = (*head)->prev;
+ tail->next = span;
+ span->prev = tail;
+ //Maintain pointer to tail span
+ (*head)->prev = span;
+ } else {
+ span->prev = span;
+ *head = span;
+ }
+}
+
+//! Pop head span from partial span double linked list
+static void
+_memory_span_partial_list_pop_head(span_t** head) {
+ span_t* span = *head;
+ *head = span->next;
+ if (*head) {
+ //Maintain pointer to tail span
+ (*head)->prev = span->prev;
+ }
+}
+
+//! Remove a span from partial span double linked list
+static void
+_memory_span_partial_list_remove(span_t** head, span_t* span) {
+ if (UNEXPECTED(*head == span)) {
+ _memory_span_partial_list_pop_head(head);
+ } else {
+ span_t* next_span = span->next;
+ span_t* prev_span = span->prev;
+ prev_span->next = next_span;
+ if (EXPECTED(next_span != 0)) {
+ next_span->prev = prev_span;
+ } else {
+ //Update pointer to tail span
+ (*head)->prev = prev_span;
+ }
+ }
+}
+
+#if ENABLE_GLOBAL_CACHE
+
+//! Insert the given list of memory page spans in the global cache
+static void
+_memory_cache_insert(global_cache_t* cache, span_t* span, size_t cache_limit) {
+ assert((span->list_size == 1) || (span->next != 0));
+ int32_t list_size = (int32_t)span->list_size;
+ //Unmap if cache has reached the limit
+ if (atomic_add32(&cache->size, list_size) > (int32_t)cache_limit) {
+#if !ENABLE_UNLIMITED_GLOBAL_CACHE
+ _memory_unmap_span_list(span);
+ atomic_add32(&cache->size, -list_size);
+ return;
+#endif
+ }
+ void* current_cache, *new_cache;
+ do {
+ current_cache = atomic_load_ptr(&cache->cache);
+ span->prev = (span_t*)((uintptr_t)current_cache & _memory_span_mask);
+ new_cache = (void*)((uintptr_t)span | ((uintptr_t)atomic_incr32(&cache->counter) & ~_memory_span_mask));
+ } while (!atomic_cas_ptr(&cache->cache, new_cache, current_cache));
+}
+
+//! Extract a number of memory page spans from the global cache
+static span_t*
+_memory_cache_extract(global_cache_t* cache) {
+ uintptr_t span_ptr;
+ do {
+ void* global_span = atomic_load_ptr(&cache->cache);
+ span_ptr = (uintptr_t)global_span & _memory_span_mask;
+ if (span_ptr) {
+ span_t* span = (span_t*)span_ptr;
+ //By accessing the span ptr before it is swapped out of list we assume that a contending thread
+ //does not manage to traverse the span to being unmapped before we access it
+ void* new_cache = (void*)((uintptr_t)span->prev | ((uintptr_t)atomic_incr32(&cache->counter) & ~_memory_span_mask));
+ if (atomic_cas_ptr(&cache->cache, new_cache, global_span)) {
+ atomic_add32(&cache->size, -(int32_t)span->list_size);
+ return span;
+ }
+ }
+ } while (span_ptr);
+ return 0;
+}
+
+//! Finalize a global cache, only valid from allocator finalization (not thread safe)
+static void
+_memory_cache_finalize(global_cache_t* cache) {
+ void* current_cache = atomic_load_ptr(&cache->cache);
+ span_t* span = (span_t*)((uintptr_t)current_cache & _memory_span_mask);
+ while (span) {
+ span_t* skip_span = (span_t*)((uintptr_t)span->prev & _memory_span_mask);
+ atomic_add32(&cache->size, -(int32_t)span->list_size);
+ _memory_unmap_span_list(span);
+ span = skip_span;
+ }
+ assert(!atomic_load32(&cache->size));
+ atomic_store_ptr(&cache->cache, 0);
+ atomic_store32(&cache->size, 0);
+}
+
+//! Insert the given list of memory page spans in the global cache
+static void
+_memory_global_cache_insert(span_t* span) {
+ size_t span_count = span->span_count;
+#if ENABLE_UNLIMITED_GLOBAL_CACHE
+ _memory_cache_insert(&_memory_span_cache[span_count - 1], span, 0);
+#else
+ const size_t cache_limit = (GLOBAL_CACHE_MULTIPLIER * ((span_count == 1) ? _memory_span_release_count : _memory_span_release_count_large));
+ _memory_cache_insert(&_memory_span_cache[span_count - 1], span, cache_limit);
+#endif
+}
+
+//! Extract a number of memory page spans from the global cache for large blocks
+static span_t*
+_memory_global_cache_extract(size_t span_count) {
+ span_t* span = _memory_cache_extract(&_memory_span_cache[span_count - 1]);
+ assert(!span || (span->span_count == span_count));
+ return span;
+}
+
+#endif
+
+#if ENABLE_THREAD_CACHE
+//! Adopt the deferred span cache list
+static void
+_memory_heap_cache_adopt_deferred(heap_t* heap) {
+ atomic_thread_fence_acquire();
+ span_t* span = (span_t*)atomic_load_ptr(&heap->span_cache_deferred);
+ if (!span)
+ return;
+ do {
+ span = (span_t*)atomic_load_ptr(&heap->span_cache_deferred);
+ } while (!atomic_cas_ptr(&heap->span_cache_deferred, 0, span));
+ while (span) {
+ span_t* next_span = span->next;
+ _memory_span_list_push(&heap->span_cache[0], span);
+#if ENABLE_STATISTICS
+ atomic_decr32(&heap->span_use[span->span_count - 1].current);
+ ++heap->size_class_use[span->size_class].spans_to_cache;
+ --heap->size_class_use[span->size_class].spans_current;
+#endif
+ span = next_span;
+ }
+}
+#endif
+
+//! Insert a single span into thread heap cache, releasing to global cache if overflow
+static void
+_memory_heap_cache_insert(heap_t* heap, span_t* span) {
+#if ENABLE_THREAD_CACHE
+ size_t span_count = span->span_count;
+ size_t idx = span_count - 1;
+ _memory_statistics_inc(heap->span_use[idx].spans_to_cache, 1);
+ if (!idx)
+ _memory_heap_cache_adopt_deferred(heap);
+#if ENABLE_UNLIMITED_THREAD_CACHE
+ _memory_span_list_push(&heap->span_cache[idx], span);
+#else
+ const size_t release_count = (!idx ? _memory_span_release_count : _memory_span_release_count_large);
+ size_t current_cache_size = _memory_span_list_push(&heap->span_cache[idx], span);
+ if (current_cache_size <= release_count)
+ return;
+ const size_t hard_limit = release_count * THREAD_CACHE_MULTIPLIER;
+ if (current_cache_size <= hard_limit) {
+#if ENABLE_ADAPTIVE_THREAD_CACHE
+ //Require 25% of high water mark to remain in cache (and at least 1, if use is 0)
+ const size_t high_mark = heap->span_use[idx].high;
+ const size_t min_limit = (high_mark >> 2) + release_count + 1;
+ if (current_cache_size < min_limit)
+ return;
+#else
+ return;
+#endif
+ }
+ heap->span_cache[idx] = _memory_span_list_split(span, release_count);
+ assert(span->list_size == release_count);
+#if ENABLE_STATISTICS
+ heap->thread_to_global += (size_t)span->list_size * span_count * _memory_span_size;
+ heap->span_use[idx].spans_to_global += span->list_size;
+#endif
+#if ENABLE_GLOBAL_CACHE
+ _memory_global_cache_insert(span);
+#else
+ _memory_unmap_span_list(span);
+#endif
+#endif
+#else
+ (void)sizeof(heap);
+ _memory_unmap_span(span);
+#endif
+}
+
+//! Extract the given number of spans from the different cache levels
+static span_t*
+_memory_heap_thread_cache_extract(heap_t* heap, size_t span_count) {
+#if ENABLE_THREAD_CACHE
+ size_t idx = span_count - 1;
+ if (!idx)
+ _memory_heap_cache_adopt_deferred(heap);
+ if (heap->span_cache[idx]) {
+#if ENABLE_STATISTICS
+ heap->span_use[idx].spans_from_cache++;
+#endif
+ return _memory_span_list_pop(&heap->span_cache[idx]);
+ }
+#endif
+ return 0;
+}
+
+static span_t*
+_memory_heap_reserved_extract(heap_t* heap, size_t span_count) {
+ if (heap->spans_reserved >= span_count)
+ return _memory_map_spans(heap, span_count);
+ return 0;
+}
+
+//! Extract a span from the global cache
+static span_t*
+_memory_heap_global_cache_extract(heap_t* heap, size_t span_count) {
+#if ENABLE_GLOBAL_CACHE
+ size_t idx = span_count - 1;
+ heap->span_cache[idx] = _memory_global_cache_extract(span_count);
+ if (heap->span_cache[idx]) {
+#if ENABLE_STATISTICS
+ heap->global_to_thread += (size_t)heap->span_cache[idx]->list_size * span_count * _memory_span_size;
+ heap->span_use[idx].spans_from_global += heap->span_cache[idx]->list_size;
+#endif
+ return _memory_span_list_pop(&heap->span_cache[idx]);
+ }
+#endif
+ return 0;
+}
+
+//! Get a span from one of the cache levels (thread cache, reserved, global cache) or fallback to mapping more memory
+static span_t*
+_memory_heap_extract_new_span(heap_t* heap, size_t span_count, uint32_t class_idx) {
+ (void)sizeof(class_idx);
+#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
+ uint32_t idx = (uint32_t)span_count - 1;
+ uint32_t current_count = (uint32_t)atomic_incr32(&heap->span_use[idx].current);
+ if (current_count > heap->span_use[idx].high)
+ heap->span_use[idx].high = current_count;
+#if ENABLE_STATISTICS
+ uint32_t spans_current = ++heap->size_class_use[class_idx].spans_current;
+ if (spans_current > heap->size_class_use[class_idx].spans_peak)
+ heap->size_class_use[class_idx].spans_peak = spans_current;
+#endif
+#endif
+ span_t* span = _memory_heap_thread_cache_extract(heap, span_count);
+ if (EXPECTED(span != 0)) {
+ _memory_statistics_inc(heap->size_class_use[class_idx].spans_from_cache, 1);
+ return span;
+ }
+ span = _memory_heap_reserved_extract(heap, span_count);
+ if (EXPECTED(span != 0)) {
+ _memory_statistics_inc(heap->size_class_use[class_idx].spans_from_reserved, 1);
+ return span;
+ }
+ span = _memory_heap_global_cache_extract(heap, span_count);
+ if (EXPECTED(span != 0)) {
+ _memory_statistics_inc(heap->size_class_use[class_idx].spans_from_cache, 1);
+ return span;
+ }
+ //Final fallback, map in more virtual memory
+ span = _memory_map_spans(heap, span_count);
+ _memory_statistics_inc(heap->size_class_use[class_idx].spans_map_calls, 1);
+ return span;
+}
+
+//! Move the span (used for small or medium allocations) to the heap thread cache
+static void
+_memory_span_release_to_cache(heap_t* heap, span_t* span) {
+ heap_class_t* heap_class = heap->span_class + span->size_class;
+ assert(heap_class->partial_span != span);
+ if (span->state == SPAN_STATE_PARTIAL)
+ _memory_span_partial_list_remove(&heap_class->partial_span, span);
+#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
+ atomic_decr32(&heap->span_use[0].current);
+#endif
+ _memory_statistics_inc(heap->span_use[0].spans_to_cache, 1);
+ _memory_statistics_inc(heap->size_class_use[span->size_class].spans_to_cache, 1);
+ _memory_statistics_dec(heap->size_class_use[span->size_class].spans_current, 1);
+ _memory_heap_cache_insert(heap, span);
+}
+
+//! Initialize a (partial) free list up to next system memory page, while reserving the first block
+//! as allocated, returning number of blocks in list
+static uint32_t
+free_list_partial_init(void** list, void** first_block, void* page_start, void* block_start,
+ uint32_t block_count, uint32_t block_size) {
+ assert(block_count);
+ *first_block = block_start;
+ if (block_count > 1) {
+ void* free_block = pointer_offset(block_start, block_size);
+ void* block_end = pointer_offset(block_start, block_size * block_count);
+ //If block size is less than half a memory page, bound init to next memory page boundary
+ if (block_size < (_memory_page_size >> 1)) {
+ void* page_end = pointer_offset(page_start, _memory_page_size);
+ if (page_end < block_end)
+ block_end = page_end;
+ }
+ *list = free_block;
+ block_count = 2;
+ void* next_block = pointer_offset(free_block, block_size);
+ while (next_block < block_end) {
+ *((void**)free_block) = next_block;
+ free_block = next_block;
+ ++block_count;
+ next_block = pointer_offset(next_block, block_size);
+ }
+ *((void**)free_block) = 0;
+ } else {
+ *list = 0;
+ }
+ return block_count;
+}
+
+//! Initialize an unused span (from cache or mapped) to be new active span
+static void*
+_memory_span_set_new_active(heap_t* heap, heap_class_t* heap_class, span_t* span, uint32_t class_idx) {
+ assert(span->span_count == 1);
+ size_class_t* size_class = _memory_size_class + class_idx;
+ span->size_class = class_idx;
+ span->heap = heap;
+ span->flags &= ~SPAN_FLAG_ALIGNED_BLOCKS;
+ span->block_count = size_class->block_count;
+ span->block_size = size_class->block_size;
+ span->state = SPAN_STATE_ACTIVE;
+ span->free_list = 0;
+
+ //Setup free list. Only initialize one system page worth of free blocks in list
+ void* block;
+ span->free_list_limit = free_list_partial_init(&heap_class->free_list, &block,
+ span, pointer_offset(span, SPAN_HEADER_SIZE), size_class->block_count, size_class->block_size);
+ atomic_store_ptr(&span->free_list_deferred, 0);
+ span->list_size = 0;
+ atomic_thread_fence_release();
+
+ _memory_span_partial_list_add(&heap_class->partial_span, span);
+ return block;
+}
+
+//! Promote a partially used span (from heap used list) to be new active span
+static void
+_memory_span_set_partial_active(heap_class_t* heap_class, span_t* span) {
+ assert(span->state == SPAN_STATE_PARTIAL);
+ assert(span->block_count == _memory_size_class[span->size_class].block_count);
+ //Move data to heap size class and set span as active
+ heap_class->free_list = span->free_list;
+ span->state = SPAN_STATE_ACTIVE;
+ span->free_list = 0;
+ assert(heap_class->free_list);
+}
+
+//! Mark span as full (from active)
+static void
+_memory_span_set_active_full(heap_class_t* heap_class, span_t* span) {
+ assert(span->state == SPAN_STATE_ACTIVE);
+ assert(span == heap_class->partial_span);
+ _memory_span_partial_list_pop_head(&heap_class->partial_span);
+ span->used_count = span->block_count;
+ span->state = SPAN_STATE_FULL;
+ span->free_list = 0;
+}
+
+//! Move span from full to partial state
+static void
+_memory_span_set_full_partial(heap_t* heap, span_t* span) {
+ assert(span->state == SPAN_STATE_FULL);
+ heap_class_t* heap_class = &heap->span_class[span->size_class];
+ span->state = SPAN_STATE_PARTIAL;
+ _memory_span_partial_list_add_tail(&heap_class->partial_span, span);
+}
+
+static void*
+_memory_span_extract_deferred(span_t* span) {
+ void* free_list;
+ do {
+ free_list = atomic_load_ptr(&span->free_list_deferred);
+ } while ((free_list == INVALID_POINTER) || !atomic_cas_ptr(&span->free_list_deferred, INVALID_POINTER, free_list));
+ span->list_size = 0;
+ atomic_store_ptr(&span->free_list_deferred, 0);
+ atomic_thread_fence_release();
+ return free_list;
+}
+
+//! Pop first block from a free list
+static void*
+free_list_pop(void** list) {
+ void* block = *list;
+ *list = *((void**)block);
+ return block;
+}
+
+//! Allocate a small/medium sized memory block from the given heap
+static void*
+_memory_allocate_from_heap_fallback(heap_t* heap, uint32_t class_idx) {
+ heap_class_t* heap_class = &heap->span_class[class_idx];
+ void* block;
+
+ span_t* active_span = heap_class->partial_span;
+ if (EXPECTED(active_span != 0)) {
+ assert(active_span->state == SPAN_STATE_ACTIVE);
+ assert(active_span->block_count == _memory_size_class[active_span->size_class].block_count);
+ //Swap in free list if not empty
+ if (active_span->free_list) {
+ heap_class->free_list = active_span->free_list;
+ active_span->free_list = 0;
+ return free_list_pop(&heap_class->free_list);
+ }
+ //If the span did not fully initialize free list, link up another page worth of blocks
+ if (active_span->free_list_limit < active_span->block_count) {
+ void* block_start = pointer_offset(active_span, SPAN_HEADER_SIZE + (active_span->free_list_limit * active_span->block_size));
+ active_span->free_list_limit += free_list_partial_init(&heap_class->free_list, &block,
+ (void*)((uintptr_t)block_start & ~(_memory_page_size - 1)), block_start,
+ active_span->block_count - active_span->free_list_limit, active_span->block_size);
+ return block;
+ }
+ //Swap in deferred free list
+ atomic_thread_fence_acquire();
+ if (atomic_load_ptr(&active_span->free_list_deferred)) {
+ heap_class->free_list = _memory_span_extract_deferred(active_span);
+ return free_list_pop(&heap_class->free_list);
+ }
+
+ //If the active span is fully allocated, mark span as free floating (fully allocated and not part of any list)
+ assert(!heap_class->free_list);
+ assert(active_span->free_list_limit >= active_span->block_count);
+ _memory_span_set_active_full(heap_class, active_span);
+ }
+ assert(!heap_class->free_list);
+
+ //Try promoting a semi-used span to active
+ active_span = heap_class->partial_span;
+ if (EXPECTED(active_span != 0)) {
+ _memory_span_set_partial_active(heap_class, active_span);
+ return free_list_pop(&heap_class->free_list);
+ }
+ assert(!heap_class->free_list);
+ assert(!heap_class->partial_span);
+
+ //Find a span in one of the cache levels
+ active_span = _memory_heap_extract_new_span(heap, 1, class_idx);
+
+ //Mark span as owned by this heap and set base data, return first block
+ return _memory_span_set_new_active(heap, heap_class, active_span, class_idx);
+}
+
+//! Allocate a small sized memory block from the given heap
+static void*
+_memory_allocate_small(heap_t* heap, size_t size) {
+ //Small sizes have unique size classes
+ const uint32_t class_idx = (uint32_t)((size + (SMALL_GRANULARITY - 1)) >> SMALL_GRANULARITY_SHIFT);
+ _memory_statistics_inc_alloc(heap, class_idx);
+ if (EXPECTED(heap->span_class[class_idx].free_list != 0))
+ return free_list_pop(&heap->span_class[class_idx].free_list);
+ return _memory_allocate_from_heap_fallback(heap, class_idx);
+}
+
+//! Allocate a medium sized memory block from the given heap
+static void*
+_memory_allocate_medium(heap_t* heap, size_t size) {
+ //Calculate the size class index and do a dependent lookup of the final class index (in case of merged classes)
+ const uint32_t base_idx = (uint32_t)(SMALL_CLASS_COUNT + ((size - (SMALL_SIZE_LIMIT + 1)) >> MEDIUM_GRANULARITY_SHIFT));
+ const uint32_t class_idx = _memory_size_class[base_idx].class_idx;
+ _memory_statistics_inc_alloc(heap, class_idx);
+ if (EXPECTED(heap->span_class[class_idx].free_list != 0))
+ return free_list_pop(&heap->span_class[class_idx].free_list);
+ return _memory_allocate_from_heap_fallback(heap, class_idx);
+}
+
+//! Allocate a large sized memory block from the given heap
+static void*
+_memory_allocate_large(heap_t* heap, size_t size) {
+ //Calculate number of needed max sized spans (including header)
+ //Since this function is never called if size > LARGE_SIZE_LIMIT
+ //the span_count is guaranteed to be <= LARGE_CLASS_COUNT
+ size += SPAN_HEADER_SIZE;
+ size_t span_count = size >> _memory_span_size_shift;
+ if (size & (_memory_span_size - 1))
+ ++span_count;
+ size_t idx = span_count - 1;
+
+ //Find a span in one of the cache levels
+ span_t* span = _memory_heap_extract_new_span(heap, span_count, SIZE_CLASS_COUNT);
+
+ //Mark span as owned by this heap and set base data
+ assert(span->span_count == span_count);
+ span->size_class = (uint32_t)(SIZE_CLASS_COUNT + idx);
+ span->heap = heap;
+ atomic_thread_fence_release();
+
+ return pointer_offset(span, SPAN_HEADER_SIZE);
+}
+
+//! Allocate a huge block by mapping memory pages directly
+static void*
+_memory_allocate_huge(size_t size) {
+ size += SPAN_HEADER_SIZE;
+ size_t num_pages = size >> _memory_page_size_shift;
+ if (size & (_memory_page_size - 1))
+ ++num_pages;
+ size_t align_offset = 0;
+ span_t* span = (span_t*)_memory_map(num_pages * _memory_page_size, &align_offset);
+ if (!span)
+ return span;
+ //Store page count in span_count
+ span->size_class = (uint32_t)-1;
+ span->span_count = (uint32_t)num_pages;
+ span->align_offset = (uint32_t)align_offset;
+ _memory_statistics_add_peak(&_huge_pages_current, num_pages, _huge_pages_peak);
+
+ return pointer_offset(span, SPAN_HEADER_SIZE);
+}
+
+//! Allocate a block larger than medium size
+static void*
+_memory_allocate_oversized(heap_t* heap, size_t size) {
+ if (size <= LARGE_SIZE_LIMIT)
+ return _memory_allocate_large(heap, size);
+ return _memory_allocate_huge(size);
+}
+
+//! Allocate a block of the given size
+static void*
+_memory_allocate(heap_t* heap, size_t size) {
+ if (EXPECTED(size <= SMALL_SIZE_LIMIT))
+ return _memory_allocate_small(heap, size);
+ else if (size <= _memory_medium_size_limit)
+ return _memory_allocate_medium(heap, size);
+ return _memory_allocate_oversized(heap, size);
+}
+
+//! Allocate a new heap
+static heap_t*
+_memory_allocate_heap(void) {
+ void* raw_heap;
+ void* next_raw_heap;
+ uintptr_t orphan_counter;
+ heap_t* heap;
+ heap_t* next_heap;
+ //Try getting an orphaned heap
+ atomic_thread_fence_acquire();
+ do {
+ raw_heap = atomic_load_ptr(&_memory_orphan_heaps);
+ heap = (heap_t*)((uintptr_t)raw_heap & ~(uintptr_t)0x1FF);
+ if (!heap)
+ break;
+ next_heap = heap->next_orphan;
+ orphan_counter = (uintptr_t)atomic_incr32(&_memory_orphan_counter);
+ next_raw_heap = (void*)((uintptr_t)next_heap | (orphan_counter & (uintptr_t)0x1FF));
+ } while (!atomic_cas_ptr(&_memory_orphan_heaps, next_raw_heap, raw_heap));
+
+ if (!heap) {
+ //Map in pages for a new heap
+ size_t align_offset = 0;
+ heap = (heap_t*)_memory_map((1 + (sizeof(heap_t) >> _memory_page_size_shift)) * _memory_page_size, &align_offset);
+ if (!heap)
+ return heap;
+ memset((char*)heap, 0, sizeof(heap_t));
+ heap->align_offset = align_offset;
+
+ //Get a new heap ID
+ do {
+ heap->id = atomic_incr32(&_memory_heap_id);
+ if (_memory_heap_lookup(heap->id))
+ heap->id = 0;
+ } while (!heap->id);
+
+ //Link in heap in heap ID map
+ size_t list_idx = heap->id % HEAP_ARRAY_SIZE;
+ do {
+ next_heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
+ heap->next_heap = next_heap;
+ } while (!atomic_cas_ptr(&_memory_heaps[list_idx], heap, next_heap));
+ }
+
+ return heap;
+}
+
+//! Deallocate the given small/medium memory block in the current thread local heap
+static void
+_memory_deallocate_direct(span_t* span, void* block) {
+ assert(span->heap == get_thread_heap_raw());
+ uint32_t state = span->state;
+ //Add block to free list
+ *((void**)block) = span->free_list;
+ span->free_list = block;
+ if (UNEXPECTED(state == SPAN_STATE_ACTIVE))
+ return;
+ uint32_t used = --span->used_count;
+ uint32_t free = span->list_size;
+ if (UNEXPECTED(used == free))
+ _memory_span_release_to_cache(span->heap, span);
+ else if (UNEXPECTED(state == SPAN_STATE_FULL))
+ _memory_span_set_full_partial(span->heap, span);
+}
+
+//! Put the block in the deferred free list of the owning span
+static void
+_memory_deallocate_defer(span_t* span, void* block) {
+ atomic_thread_fence_acquire();
+ if (span->state == SPAN_STATE_FULL) {
+ if ((span->list_size + 1) == span->block_count) {
+ //Span will be completely freed by deferred deallocations, no other thread can
+ //currently touch it. Safe to move to owner heap deferred cache
+ span_t* last_head;
+ heap_t* heap = span->heap;
+ do {
+ last_head = (span_t*)atomic_load_ptr(&heap->span_cache_deferred);
+ span->next = last_head;
+ } while (!atomic_cas_ptr(&heap->span_cache_deferred, span, last_head));
+ return;
+ }
+ }
+
+ void* free_list;
+ do {
+ atomic_thread_fence_acquire();
+ free_list = atomic_load_ptr(&span->free_list_deferred);
+ *((void**)block) = free_list;
+ } while ((free_list == INVALID_POINTER) || !atomic_cas_ptr(&span->free_list_deferred, INVALID_POINTER, free_list));
+ ++span->list_size;
+ atomic_store_ptr(&span->free_list_deferred, block);
+}
+
+static void
+_memory_deallocate_small_or_medium(span_t* span, void* p) {
+ _memory_statistics_inc_free(span->heap, span->size_class);
+ if (span->flags & SPAN_FLAG_ALIGNED_BLOCKS) {
+ //Realign pointer to block start
+ void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
+ uint32_t block_offset = (uint32_t)pointer_diff(p, blocks_start);
+ p = pointer_offset(p, -(int32_t)(block_offset % span->block_size));
+ }
+ //Check if block belongs to this heap or if deallocation should be deferred
+ if (span->heap == get_thread_heap_raw())
+ _memory_deallocate_direct(span, p);
+ else
+ _memory_deallocate_defer(span, p);
+}
+
+//! Deallocate the given large memory block to the current heap
+static void
+_memory_deallocate_large(span_t* span) {
+ //Decrease counter
+ assert(span->span_count == ((size_t)span->size_class - SIZE_CLASS_COUNT + 1));
+ assert(span->size_class >= SIZE_CLASS_COUNT);
+ assert(span->size_class - SIZE_CLASS_COUNT < LARGE_CLASS_COUNT);
+ assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN));
+ assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN));
+ //Large blocks can always be deallocated and transferred between heaps
+ //Investigate if it is better to defer large spans as well through span_cache_deferred,
+ //possibly with some heuristics to pick either scheme at runtime per deallocation
+ heap_t* heap = get_thread_heap();
+ if (!heap) return;
+#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
+ size_t idx = span->span_count - 1;
+ atomic_decr32(&span->heap->span_use[idx].current);
+#endif
+ if ((span->span_count > 1) && !heap->spans_reserved) {
+ heap->span_reserve = span;
+ heap->spans_reserved = span->span_count;
+ if (span->flags & SPAN_FLAG_MASTER) {
+ heap->span_reserve_master = span;
+ } else { //SPAN_FLAG_SUBSPAN
+ uint32_t distance = span->total_spans_or_distance;
+ span_t* master = (span_t*)pointer_offset(span, -(int32_t)(distance * _memory_span_size));
+ heap->span_reserve_master = master;
+ assert(master->flags & SPAN_FLAG_MASTER);
+ assert(atomic_load32(&master->remaining_spans) >= (int32_t)span->span_count);
+ }
+ _memory_statistics_inc(heap->span_use[idx].spans_to_reserved, 1);
+ } else {
+ //Insert into cache list
+ _memory_heap_cache_insert(heap, span);
+ }
+}
+
+//! Deallocate the given huge span
+static void
+_memory_deallocate_huge(span_t* span) {
+ //Oversized allocation, page count is stored in span_count
+ size_t num_pages = span->span_count;
+ _memory_unmap(span, num_pages * _memory_page_size, span->align_offset, num_pages * _memory_page_size);
+ _memory_statistics_sub(&_huge_pages_current, num_pages);
+}
+
+//! Deallocate the given block
+static void
+_memory_deallocate(void* p) {
+ //Grab the span (always at start of span, using span alignment)
+ span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
+ if (UNEXPECTED(!span))
+ return;
+ if (EXPECTED(span->size_class < SIZE_CLASS_COUNT))
+ _memory_deallocate_small_or_medium(span, p);
+ else if (span->size_class != (uint32_t)-1)
+ _memory_deallocate_large(span);
+ else
+ _memory_deallocate_huge(span);
+}
+
+//! Reallocate the given block to the given size
+static void*
+_memory_reallocate(void* p, size_t size, size_t oldsize, unsigned int flags) {
+ if (p) {
+ //Grab the span using guaranteed span alignment
+ span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
+ if (span->heap) {
+ if (span->size_class < SIZE_CLASS_COUNT) {
+ //Small/medium sized block
+ assert(span->span_count == 1);
+ void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
+ uint32_t block_offset = (uint32_t)pointer_diff(p, blocks_start);
+ uint32_t block_idx = block_offset / span->block_size;
+ void* block = pointer_offset(blocks_start, block_idx * span->block_size);
+ if (!oldsize)
+ oldsize = span->block_size - (uint32_t)pointer_diff(p, block);
+ if ((size_t)span->block_size >= size) {
+ //Still fits in block, never mind trying to save memory, but preserve data if alignment changed
+ if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
+ memmove(block, p, oldsize);
+ return block;
+ }
+ } else {
+ //Large block
+ size_t total_size = size + SPAN_HEADER_SIZE;
+ size_t num_spans = total_size >> _memory_span_size_shift;
+ if (total_size & (_memory_span_mask - 1))
+ ++num_spans;
+ size_t current_spans = span->span_count;
+ assert(current_spans == ((span->size_class - SIZE_CLASS_COUNT) + 1));
+ void* block = pointer_offset(span, SPAN_HEADER_SIZE);
+ if (!oldsize)
+ oldsize = (current_spans * _memory_span_size) - (size_t)pointer_diff(p, block) - SPAN_HEADER_SIZE;
+ if ((current_spans >= num_spans) && (num_spans >= (current_spans / 2))) {
+ //Still fits in block, never mind trying to save memory, but preserve data if alignment changed
+ if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
+ memmove(block, p, oldsize);
+ return block;
+ }
+ }
+ } else {
+ //Oversized block
+ size_t total_size = size + SPAN_HEADER_SIZE;
+ size_t num_pages = total_size >> _memory_page_size_shift;
+ if (total_size & (_memory_page_size - 1))
+ ++num_pages;
+ //Page count is stored in span_count
+ size_t current_pages = span->span_count;
+ void* block = pointer_offset(span, SPAN_HEADER_SIZE);
+ if (!oldsize)
+ oldsize = (current_pages * _memory_page_size) - (size_t)pointer_diff(p, block) - SPAN_HEADER_SIZE;
+ if ((current_pages >= num_pages) && (num_pages >= (current_pages / 2))) {
+ //Still fits in block, never mind trying to save memory, but preserve data if alignment changed
+ if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
+ memmove(block, p, oldsize);
+ return block;
+ }
+ }
+ } else {
+ oldsize = 0;
+ }
+
+ //Size is greater than block size, need to allocate a new block and deallocate the old
+ heap_t* heap = get_thread_heap();
+ //Avoid hysteresis by overallocating if increase is small (below 37%)
+ size_t lower_bound = oldsize + (oldsize >> 2) + (oldsize >> 3);
+ size_t new_size = (size > lower_bound) ? size : ((size > oldsize) ? lower_bound : size);
+ void* block = _memory_allocate(heap, new_size);
+ if (p && block) {
+ if (!(flags & RPMALLOC_NO_PRESERVE))
+ memcpy(block, p, oldsize < new_size ? oldsize : new_size);
+ _memory_deallocate(p);
+ }
+
+ return block;
+}
+
+//! Get the usable size of the given block
+static size_t
+_memory_usable_size(void* p) {
+ //Grab the span using guaranteed span alignment
+ span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
+ if (span->heap) {
+ //Small/medium block
+ if (span->size_class < SIZE_CLASS_COUNT) {
+ void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
+ return span->block_size - ((size_t)pointer_diff(p, blocks_start) % span->block_size);
+ }
+
+ //Large block
+ size_t current_spans = (span->size_class - SIZE_CLASS_COUNT) + 1;
+ return (current_spans * _memory_span_size) - (size_t)pointer_diff(p, span);
+ }
+
+ //Oversized block, page count is stored in span_count
+ size_t current_pages = span->span_count;
+ return (current_pages * _memory_page_size) - (size_t)pointer_diff(p, span);
+}
+
+//! Adjust and optimize the size class properties for the given class
+static void
+_memory_adjust_size_class(size_t iclass) {
+ size_t block_size = _memory_size_class[iclass].block_size;
+ size_t block_count = (_memory_span_size - SPAN_HEADER_SIZE) / block_size;
+
+ _memory_size_class[iclass].block_count = (uint16_t)block_count;
+ _memory_size_class[iclass].class_idx = (uint16_t)iclass;
+
+ //Check if previous size classes can be merged
+ size_t prevclass = iclass;
+ while (prevclass > 0) {
+ --prevclass;
+ //A class can be merged if number of pages and number of blocks are equal
+ if (_memory_size_class[prevclass].block_count == _memory_size_class[iclass].block_count)
+ memcpy(_memory_size_class + prevclass, _memory_size_class + iclass, sizeof(_memory_size_class[iclass]));
+ else
+ break;
+ }
+}
+
+extern thread_local bool RpThreadShutdown;
+
+static void
+_memory_heap_finalize(void* heapptr) {
+ heap_t* heap = (heap_t*)heapptr;
+ if (!heap)
+ return;
+ RpThreadShutdown = true;
+ //Release thread cache spans back to global cache
+#if ENABLE_THREAD_CACHE
+ _memory_heap_cache_adopt_deferred(heap);
+ for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
+ span_t* span = heap->span_cache[iclass];
+#if ENABLE_GLOBAL_CACHE
+ while (span) {
+ assert(span->span_count == (iclass + 1));
+ size_t release_count = (!iclass ? _memory_span_release_count : _memory_span_release_count_large);
+ span_t* next = _memory_span_list_split(span, (uint32_t)release_count);
+#if ENABLE_STATISTICS
+ heap->thread_to_global += (size_t)span->list_size * span->span_count * _memory_span_size;
+ heap->span_use[iclass].spans_to_global += span->list_size;
+#endif
+ _memory_global_cache_insert(span);
+ span = next;
+ }
+#else
+ if (span)
+ _memory_unmap_span_list(span);
+#endif
+ heap->span_cache[iclass] = 0;
+ }
+#endif
+
+ //Orphan the heap
+ void* raw_heap;
+ uintptr_t orphan_counter;
+ heap_t* last_heap;
+ do {
+ last_heap = (heap_t*)atomic_load_ptr(&_memory_orphan_heaps);
+ heap->next_orphan = (heap_t*)((uintptr_t)last_heap & ~(uintptr_t)0x1FF);
+ orphan_counter = (uintptr_t)atomic_incr32(&_memory_orphan_counter);
+ raw_heap = (void*)((uintptr_t)heap | (orphan_counter & (uintptr_t)0x1FF));
+ } while (!atomic_cas_ptr(&_memory_orphan_heaps, raw_heap, last_heap));
+
+ set_thread_heap(0);
+
+#if ENABLE_STATISTICS
+ atomic_decr32(&_memory_active_heaps);
+ assert(atomic_load32(&_memory_active_heaps) >= 0);
+#endif
+}
+
+#if defined(_MSC_VER) && !defined(__clang__) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
+#include <fibersapi.h>
+static DWORD fls_key;
+static void NTAPI
+rp_thread_destructor(void* value) {
+ if (value)
+ rpmalloc_thread_finalize();
+}
+#endif
+
+#if PLATFORM_POSIX
+# include <sys/mman.h>
+# include <sched.h>
+# ifdef __FreeBSD__
+# include <sys/sysctl.h>
+# define MAP_HUGETLB MAP_ALIGNED_SUPER
+# endif
+# ifndef MAP_UNINITIALIZED
+# define MAP_UNINITIALIZED 0
+# endif
+#endif
+#include <errno.h>
+
+//! Initialize the allocator and setup global data
+TRACY_API int
+rpmalloc_initialize(void) {
+ if (_rpmalloc_initialized) {
+ rpmalloc_thread_initialize();
+ return 0;
+ }
+ memset(&_memory_config, 0, sizeof(rpmalloc_config_t));
+ return rpmalloc_initialize_config(0);
+}
+
+int
+rpmalloc_initialize_config(const rpmalloc_config_t* config) {
+ if (_rpmalloc_initialized) {
+ rpmalloc_thread_initialize();
+ return 0;
+ }
+ _rpmalloc_initialized = 1;
+
+ if (config)
+ memcpy(&_memory_config, config, sizeof(rpmalloc_config_t));
+
+ if (!_memory_config.memory_map || !_memory_config.memory_unmap) {
+ _memory_config.memory_map = _memory_map_os;
+ _memory_config.memory_unmap = _memory_unmap_os;
+ }
+
+#if RPMALLOC_CONFIGURABLE
+ _memory_page_size = _memory_config.page_size;
+#else
+ _memory_page_size = 0;
+#endif
+ _memory_huge_pages = 0;
+ _memory_map_granularity = _memory_page_size;
+ if (!_memory_page_size) {
+#if PLATFORM_WINDOWS
+ SYSTEM_INFO system_info;
+ memset(&system_info, 0, sizeof(system_info));
+ GetSystemInfo(&system_info);
+ _memory_page_size = system_info.dwPageSize;
+ _memory_map_granularity = system_info.dwAllocationGranularity;
+ if (config && config->enable_huge_pages) {
+ HANDLE token = 0;
+ size_t large_page_minimum = GetLargePageMinimum();
+ if (large_page_minimum)
+ OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
+ if (token) {
+ LUID luid;
+ if (LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &luid)) {
+ TOKEN_PRIVILEGES token_privileges;
+ memset(&token_privileges, 0, sizeof(token_privileges));
+ token_privileges.PrivilegeCount = 1;
+ token_privileges.Privileges[0].Luid = luid;
+ token_privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+ if (AdjustTokenPrivileges(token, FALSE, &token_privileges, 0, 0, 0)) {
+ DWORD err = GetLastError();
+ if (err == ERROR_SUCCESS) {
+ _memory_huge_pages = 1;
+ _memory_page_size = large_page_minimum;
+ _memory_map_granularity = large_page_minimum;
+ }
+ }
+ }
+ CloseHandle(token);
+ }
+ }
+#else
+ _memory_page_size = (size_t)sysconf(_SC_PAGESIZE);
+ _memory_map_granularity = _memory_page_size;
+ if (config && config->enable_huge_pages) {
+#if defined(__linux__)
+ size_t huge_page_size = 0;
+ FILE* meminfo = fopen("/proc/meminfo", "r");
+ if (meminfo) {
+ char line[128];
+ while (!huge_page_size && fgets(line, sizeof(line) - 1, meminfo)) {
+ line[sizeof(line) - 1] = 0;
+ if (strstr(line, "Hugepagesize:"))
+ huge_page_size = (size_t)strtol(line + 13, 0, 10) * 1024;
+ }
+ fclose(meminfo);
+ }
+ if (huge_page_size) {
+ _memory_huge_pages = 1;
+ _memory_page_size = huge_page_size;
+ _memory_map_granularity = huge_page_size;
+ }
+#elif defined(__FreeBSD__)
+ int rc;
+ size_t sz = sizeof(rc);
+
+ if (sysctlbyname("vm.pmap.pg_ps_enabled", &rc, &sz, NULL, 0) == 0 && rc == 1) {
+ _memory_huge_pages = 1;
+ _memory_page_size = 2 * 1024 * 1024;
+ _memory_map_granularity = _memory_page_size;
+ }
+#elif defined(__APPLE__)
+ _memory_huge_pages = 1;
+ _memory_page_size = 2 * 1024 * 1024;
+ _memory_map_granularity = _memory_page_size;
+#endif
+ }
+#endif
+ } else {
+ if (config && config->enable_huge_pages)
+ _memory_huge_pages = 1;
+ }
+
+ //The ABA counter in heap orphan list is tied to using 512 (bitmask 0x1FF)
+ if (_memory_page_size < 512)
+ _memory_page_size = 512;
+ if (_memory_page_size > (64 * 1024 * 1024))
+ _memory_page_size = (64 * 1024 * 1024);
+ _memory_page_size_shift = 0;
+ size_t page_size_bit = _memory_page_size;
+ while (page_size_bit != 1) {
+ ++_memory_page_size_shift;
+ page_size_bit >>= 1;
+ }
+ _memory_page_size = ((size_t)1 << _memory_page_size_shift);
+
+#if RPMALLOC_CONFIGURABLE
+ size_t span_size = _memory_config.span_size;
+ if (!span_size)
+ span_size = (64 * 1024);
+ if (span_size > (256 * 1024))
+ span_size = (256 * 1024);
+ _memory_span_size = 4096;
+ _memory_span_size_shift = 12;
+ while (_memory_span_size < span_size) {
+ _memory_span_size <<= 1;
+ ++_memory_span_size_shift;
+ }
+ _memory_span_mask = ~(uintptr_t)(_memory_span_size - 1);
+#endif
+
+ _memory_span_map_count = ( _memory_config.span_map_count ? _memory_config.span_map_count : DEFAULT_SPAN_MAP_COUNT);
+ if ((_memory_span_size * _memory_span_map_count) < _memory_page_size)
+ _memory_span_map_count = (_memory_page_size / _memory_span_size);
+ if ((_memory_page_size >= _memory_span_size) && ((_memory_span_map_count * _memory_span_size) % _memory_page_size))
+ _memory_span_map_count = (_memory_page_size / _memory_span_size);
+
+ _memory_config.page_size = _memory_page_size;
+ _memory_config.span_size = _memory_span_size;
+ _memory_config.span_map_count = _memory_span_map_count;
+ _memory_config.enable_huge_pages = _memory_huge_pages;
+
+ _memory_span_release_count = (_memory_span_map_count > 4 ? ((_memory_span_map_count < 64) ? _memory_span_map_count : 64) : 4);
+ _memory_span_release_count_large = (_memory_span_release_count > 8 ? (_memory_span_release_count / 4) : 2);
+
+#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
+ if (pthread_key_create(&_memory_thread_heap, _memory_heap_finalize))
+ return -1;
+#endif
+#if defined(_MSC_VER) && !defined(__clang__) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
+ fls_key = FlsAlloc(&rp_thread_destructor);
+#endif
+
+ atomic_store32(&_memory_heap_id, 0);
+ atomic_store32(&_memory_orphan_counter, 0);
+#if ENABLE_STATISTICS
+ atomic_store32(&_memory_active_heaps, 0);
+ atomic_store32(&_reserved_spans, 0);
+ atomic_store32(&_mapped_pages, 0);
+ _mapped_pages_peak = 0;
+ atomic_store32(&_mapped_total, 0);
+ atomic_store32(&_unmapped_total, 0);
+ atomic_store32(&_mapped_pages_os, 0);
+ atomic_store32(&_huge_pages_current, 0);
+ _huge_pages_peak = 0;
+#endif
+
+ //Setup all small and medium size classes
+ size_t iclass = 0;
+ _memory_size_class[iclass].block_size = SMALL_GRANULARITY;
+ _memory_adjust_size_class(iclass);
+ for (iclass = 1; iclass < SMALL_CLASS_COUNT; ++iclass) {
+ size_t size = iclass * SMALL_GRANULARITY;
+ _memory_size_class[iclass].block_size = (uint32_t)size;
+ _memory_adjust_size_class(iclass);
+ }
+ //At least two blocks per span, then fall back to large allocations
+ _memory_medium_size_limit = (_memory_span_size - SPAN_HEADER_SIZE) >> 1;
+ if (_memory_medium_size_limit > MEDIUM_SIZE_LIMIT)
+ _memory_medium_size_limit = MEDIUM_SIZE_LIMIT;
+ for (iclass = 0; iclass < MEDIUM_CLASS_COUNT; ++iclass) {
+ size_t size = SMALL_SIZE_LIMIT + ((iclass + 1) * MEDIUM_GRANULARITY);
+ if (size > _memory_medium_size_limit)
+ break;
+ _memory_size_class[SMALL_CLASS_COUNT + iclass].block_size = (uint32_t)size;
+ _memory_adjust_size_class(SMALL_CLASS_COUNT + iclass);
+ }
+
+ for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx)
+ atomic_store_ptr(&_memory_heaps[list_idx], 0);
+
+ //Initialize this thread
+ rpmalloc_thread_initialize();
+ return 0;
+}
+
+//! Finalize the allocator
+TRACY_API void
+rpmalloc_finalize(void) {
+ atomic_thread_fence_acquire();
+
+ rpmalloc_thread_finalize();
+ //rpmalloc_dump_statistics(stderr);
+
+ //Free all thread caches
+ for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {
+ heap_t* heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
+ while (heap) {
+ if (heap->spans_reserved) {
+ span_t* span = _memory_map_spans(heap, heap->spans_reserved);
+ _memory_unmap_span(span);
+ }
+
+ for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
+ heap_class_t* heap_class = heap->span_class + iclass;
+ span_t* span = heap_class->partial_span;
+ while (span) {
+ span_t* next = span->next;
+ if (span->state == SPAN_STATE_ACTIVE) {
+ uint32_t used_blocks = span->block_count;
+ if (span->free_list_limit < span->block_count)
+ used_blocks = span->free_list_limit;
+ uint32_t free_blocks = 0;
+ void* block = heap_class->free_list;
+ while (block) {
+ ++free_blocks;
+ block = *((void**)block);
+ }
+ block = span->free_list;
+ while (block) {
+ ++free_blocks;
+ block = *((void**)block);
+ }
+ if (used_blocks == (free_blocks + span->list_size))
+ _memory_heap_cache_insert(heap, span);
+ } else {
+ if (span->used_count == span->list_size)
+ _memory_heap_cache_insert(heap, span);
+ }
+ span = next;
+ }
+ }
+
+#if ENABLE_THREAD_CACHE
+ //Free span caches (other thread might have deferred after the thread using this heap finalized)
+ _memory_heap_cache_adopt_deferred(heap);
+ for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
+ if (heap->span_cache[iclass])
+ _memory_unmap_span_list(heap->span_cache[iclass]);
+ }
+#endif
+ heap_t* next_heap = heap->next_heap;
+ size_t heap_size = (1 + (sizeof(heap_t) >> _memory_page_size_shift)) * _memory_page_size;
+ _memory_unmap(heap, heap_size, heap->align_offset, heap_size);
+ heap = next_heap;
+ }
+ }
+
+#if ENABLE_GLOBAL_CACHE
+ //Free global caches
+ for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass)
+ _memory_cache_finalize(&_memory_span_cache[iclass]);
+#endif
+
+ atomic_store_ptr(&_memory_orphan_heaps, 0);
+ atomic_thread_fence_release();
+
+#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
+ pthread_key_delete(_memory_thread_heap);
+#endif
+#if defined(_MSC_VER) && !defined(__clang__) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
+ FlsFree(fls_key);
+#endif
+
+#if ENABLE_STATISTICS
+ //If you hit these asserts you probably have memory leaks or double frees in your code
+ assert(!atomic_load32(&_mapped_pages));
+ assert(!atomic_load32(&_reserved_spans));
+ assert(!atomic_load32(&_mapped_pages_os));
+#endif
+
+ _rpmalloc_initialized = 0;
+}
+
+//! Initialize thread, assign heap
+TRACY_API void
+rpmalloc_thread_initialize(void) {
+ if (!get_thread_heap_raw()) {
+ heap_t* heap = _memory_allocate_heap();
+ if (heap) {
+ atomic_thread_fence_acquire();
+#if ENABLE_STATISTICS
+ atomic_incr32(&_memory_active_heaps);
+#endif
+ set_thread_heap(heap);
+#if defined(_MSC_VER) && !defined(__clang__) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
+ FlsSetValue(fls_key, heap);
+#endif
+ }
+ }
+}
+
+//! Finalize thread, orphan heap
+TRACY_API void
+rpmalloc_thread_finalize(void) {
+ heap_t* heap = get_thread_heap_raw();
+ if (heap)
+ _memory_heap_finalize(heap);
+}
+
+int
+rpmalloc_is_thread_initialized(void) {
+ return (get_thread_heap_raw() != 0) ? 1 : 0;
+}
+
+const rpmalloc_config_t*
+rpmalloc_config(void) {
+ return &_memory_config;
+}
+
+//! Map new pages to virtual memory
+static void*
+_memory_map_os(size_t size, size_t* offset) {
+ //Either size is a heap (a single page) or a (multiple) span - we only need to align spans, and only if larger than map granularity
+ size_t padding = ((size >= _memory_span_size) && (_memory_span_size > _memory_map_granularity)) ? _memory_span_size : 0;
+ assert(size >= _memory_page_size);
+#if PLATFORM_WINDOWS
+ //Ok to MEM_COMMIT - according to MSDN, "actual physical pages are not allocated unless/until the virtual addresses are actually accessed"
+ void* ptr = VirtualAlloc(0, size + padding, (_memory_huge_pages ? MEM_LARGE_PAGES : 0) | MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (!ptr) {
+ assert(!"Failed to map virtual memory block");
+ return 0;
+ }
+#else
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED;
+# if defined(__APPLE__)
+ int fd = (int)VM_MAKE_TAG(240U);
+ if (_memory_huge_pages)
+ fd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
+ void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, fd, 0);
+# elif defined(MAP_HUGETLB)
+ void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, (_memory_huge_pages ? MAP_HUGETLB : 0) | flags, -1, 0);
+# else
+ void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);
+# endif
+ if ((ptr == MAP_FAILED) || !ptr) {
+ assert("Failed to map virtual memory block" == 0);
+ return 0;
+ }
+#endif
+#if ENABLE_STATISTICS
+ atomic_add32(&_mapped_pages_os, (int32_t)((size + padding) >> _memory_page_size_shift));
+#endif
+ if (padding) {
+ size_t final_padding = padding - ((uintptr_t)ptr & ~_memory_span_mask);
+ assert(final_padding <= _memory_span_size);
+ assert(final_padding <= padding);
+ assert(!(final_padding % 8));
+ ptr = pointer_offset(ptr, final_padding);
+ *offset = final_padding >> 3;
+ }
+ assert((size < _memory_span_size) || !((uintptr_t)ptr & ~_memory_span_mask));
+ return ptr;
+}
+
+//! Unmap pages from virtual memory
+static void
+_memory_unmap_os(void* address, size_t size, size_t offset, size_t release) {
+ assert(release || (offset == 0));
+ assert(!release || (release >= _memory_page_size));
+ assert(size >= _memory_page_size);
+ if (release && offset) {
+ offset <<= 3;
+ address = pointer_offset(address, -(int32_t)offset);
+#if PLATFORM_POSIX
+ //Padding is always one span size
+ release += _memory_span_size;
+#endif
+ }
+#if !DISABLE_UNMAP
+#if PLATFORM_WINDOWS
+ if (!VirtualFree(address, release ? 0 : size, release ? MEM_RELEASE : MEM_DECOMMIT)) {
+ assert(!"Failed to unmap virtual memory block");
+ }
+#else
+ if (release) {
+ if (munmap(address, release)) {
+ assert("Failed to unmap virtual memory block" == 0);
+ }
+ }
+ else {
+#if defined(POSIX_MADV_FREE)
+ if (posix_madvise(address, size, POSIX_MADV_FREE))
+#endif
+#if defined(POSIX_MADV_DONTNEED)
+ if (posix_madvise(address, size, POSIX_MADV_DONTNEED)) {
+ assert("Failed to madvise virtual memory block as free" == 0);
+ }
+#endif
+ }
+#endif
+#endif
+#if ENABLE_STATISTICS
+ if (release)
+ atomic_add32(&_mapped_pages_os, -(int32_t)(release >> _memory_page_size_shift));
+#endif
+}
+
+// Extern interface
+
+TRACY_API RPMALLOC_ALLOCATOR void*
+rpmalloc(size_t size) {
+#if ENABLE_VALIDATE_ARGS
+ if (size >= MAX_ALLOC_SIZE) {
+ errno = EINVAL;
+ return 0;
+ }
+#endif
+ heap_t* heap = get_thread_heap();
+ return _memory_allocate(heap, size);
+}
+
+TRACY_API void
+rpfree(void* ptr) {
+ _memory_deallocate(ptr);
+}
+
+extern inline RPMALLOC_ALLOCATOR void*
+rpcalloc(size_t num, size_t size) {
+ size_t total;
+#if ENABLE_VALIDATE_ARGS
+#if PLATFORM_WINDOWS
+ int err = SizeTMult(num, size, &total);
+ if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
+ errno = EINVAL;
+ return 0;
+ }
+#else
+ int err = __builtin_umull_overflow(num, size, &total);
+ if (err || (total >= MAX_ALLOC_SIZE)) {
+ errno = EINVAL;
+ return 0;
+ }
+#endif
+#else
+ total = num * size;
+#endif
+ heap_t* heap = get_thread_heap();
+ void* block = _memory_allocate(heap, total);
+ memset(block, 0, total);
+ return block;
+}
+
+TRACY_API RPMALLOC_ALLOCATOR void*
+rprealloc(void* ptr, size_t size) {
+#if ENABLE_VALIDATE_ARGS
+ if (size >= MAX_ALLOC_SIZE) {
+ errno = EINVAL;
+ return ptr;
+ }
+#endif
+ return _memory_reallocate(ptr, size, 0, 0);
+}
+
+extern RPMALLOC_ALLOCATOR void*
+rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize,
+ unsigned int flags) {
+#if ENABLE_VALIDATE_ARGS
+ if ((size + alignment < size) || (alignment > _memory_page_size)) {
+ errno = EINVAL;
+ return 0;
+ }
+#endif
+ void* block;
+ if (alignment > 32) {
+ size_t usablesize = _memory_usable_size(ptr);
+ if ((usablesize >= size) && (size >= (usablesize / 2)) && !((uintptr_t)ptr & (alignment - 1)))
+ return ptr;
+
+ block = rpaligned_alloc(alignment, size);
+ if (ptr) {
+ if (!oldsize)
+ oldsize = usablesize;
+ if (!(flags & RPMALLOC_NO_PRESERVE))
+ memcpy(block, ptr, oldsize < size ? oldsize : size);
+ rpfree(ptr);
+ }
+ //Mark as having aligned blocks
+ span_t* span = (span_t*)((uintptr_t)block & _memory_span_mask);
+ span->flags |= SPAN_FLAG_ALIGNED_BLOCKS;
+ } else {
+ block = _memory_reallocate(ptr, size, oldsize, flags);
+ }
+ return block;
+}
+
+extern RPMALLOC_ALLOCATOR void*
+rpaligned_alloc(size_t alignment, size_t size) {
+ if (alignment <= 16)
+ return rpmalloc(size);
+
+#if ENABLE_VALIDATE_ARGS
+ if ((size + alignment) < size) {
+ errno = EINVAL;
+ return 0;
+ }
+ if (alignment & (alignment - 1)) {
+ errno = EINVAL;
+ return 0;
+ }
+#endif
+
+ void* ptr = 0;
+ size_t align_mask = alignment - 1;
+ if (alignment < _memory_page_size) {
+ ptr = rpmalloc(size + alignment);
+ if ((uintptr_t)ptr & align_mask)
+ ptr = (void*)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
+ //Mark as having aligned blocks
+ span_t* span = (span_t*)((uintptr_t)ptr & _memory_span_mask);
+ span->flags |= SPAN_FLAG_ALIGNED_BLOCKS;
+ return ptr;
+ }
+
+ // Fallback to mapping new pages for this request. Since pointers passed
+ // to rpfree must be able to reach the start of the span by bitmasking of
+ // the address with the span size, the returned aligned pointer from this
+ // function must be with a span size of the start of the mapped area.
+ // In worst case this requires us to loop and map pages until we get a
+ // suitable memory address. It also means we can never align to span size
+ // or greater, since the span header will push alignment more than one
+ // span size away from span start (thus causing pointer mask to give us
+ // an invalid span start on free)
+ if (alignment & align_mask) {
+ errno = EINVAL;
+ return 0;
+ }
+ if (alignment >= _memory_span_size) {
+ errno = EINVAL;
+ return 0;
+ }
+
+ size_t extra_pages = alignment / _memory_page_size;
+
+ // Since each span has a header, we will at least need one extra memory page
+ size_t num_pages = 1 + (size / _memory_page_size);
+ if (size & (_memory_page_size - 1))
+ ++num_pages;
+
+ if (extra_pages > num_pages)
+ num_pages = 1 + extra_pages;
+
+ size_t original_pages = num_pages;
+ size_t limit_pages = (_memory_span_size / _memory_page_size) * 2;
+ if (limit_pages < (original_pages * 2))
+ limit_pages = original_pages * 2;
+
+ size_t mapped_size, align_offset;
+ span_t* span;
+
+retry:
+ align_offset = 0;
+ mapped_size = num_pages * _memory_page_size;
+
+ span = (span_t*)_memory_map(mapped_size, &align_offset);
+ if (!span) {
+ errno = ENOMEM;
+ return 0;
+ }
+ ptr = pointer_offset(span, SPAN_HEADER_SIZE);
+
+ if ((uintptr_t)ptr & align_mask)
+ ptr = (void*)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
+
+ if (((size_t)pointer_diff(ptr, span) >= _memory_span_size) ||
+ (pointer_offset(ptr, size) > pointer_offset(span, mapped_size)) ||
+ (((uintptr_t)ptr & _memory_span_mask) != (uintptr_t)span)) {
+ _memory_unmap(span, mapped_size, align_offset, mapped_size);
+ ++num_pages;
+ if (num_pages > limit_pages) {
+ errno = EINVAL;
+ return 0;
+ }
+ goto retry;
+ }
+
+ //Store page count in span_count
+ span->size_class = (uint32_t)-1;
+ span->span_count = (uint32_t)num_pages;
+ span->align_offset = (uint32_t)align_offset;
+ _memory_statistics_add_peak(&_huge_pages_current, num_pages, _huge_pages_peak);
+
+ return ptr;
+}
+
+extern inline RPMALLOC_ALLOCATOR void*
+rpmemalign(size_t alignment, size_t size) {
+ return rpaligned_alloc(alignment, size);
+}
+
+extern inline int
+rpposix_memalign(void **memptr, size_t alignment, size_t size) {
+ if (memptr)
+ *memptr = rpaligned_alloc(alignment, size);
+ else
+ return EINVAL;
+ return *memptr ? 0 : ENOMEM;
+}
+
+extern inline size_t
+rpmalloc_usable_size(void* ptr) {
+ return (ptr ? _memory_usable_size(ptr) : 0);
+}
+
+extern inline void
+rpmalloc_thread_collect(void) {
+}
+
+void
+rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats) {
+ memset(stats, 0, sizeof(rpmalloc_thread_statistics_t));
+ heap_t* heap = get_thread_heap_raw();
+ if (!heap)
+ return;
+
+ for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
+ size_class_t* size_class = _memory_size_class + iclass;
+ heap_class_t* heap_class = heap->span_class + iclass;
+ span_t* span = heap_class->partial_span;
+ while (span) {
+ atomic_thread_fence_acquire();
+ size_t free_count = span->list_size;
+ if (span->state == SPAN_STATE_PARTIAL)
+ free_count += (size_class->block_count - span->used_count);
+ stats->sizecache = free_count * size_class->block_size;
+ span = span->next;
+ }
+ }
+
+#if ENABLE_THREAD_CACHE
+ for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
+ if (heap->span_cache[iclass])
+ stats->spancache = (size_t)heap->span_cache[iclass]->list_size * (iclass + 1) * _memory_span_size;
+ span_t* deferred_list = !iclass ? (span_t*)atomic_load_ptr(&heap->span_cache_deferred) : 0;
+ //TODO: Incorrect, for deferred lists the size is NOT stored in list_size
+ if (deferred_list)
+ stats->spancache = (size_t)deferred_list->list_size * (iclass + 1) * _memory_span_size;
+ }
+#endif
+#if ENABLE_STATISTICS
+ stats->thread_to_global = heap->thread_to_global;
+ stats->global_to_thread = heap->global_to_thread;
+
+ for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
+ stats->span_use[iclass].current = (size_t)atomic_load32(&heap->span_use[iclass].current);
+ stats->span_use[iclass].peak = (size_t)heap->span_use[iclass].high;
+ stats->span_use[iclass].to_global = (size_t)heap->span_use[iclass].spans_to_global;
+ stats->span_use[iclass].from_global = (size_t)heap->span_use[iclass].spans_from_global;
+ stats->span_use[iclass].to_cache = (size_t)heap->span_use[iclass].spans_to_cache;
+ stats->span_use[iclass].from_cache = (size_t)heap->span_use[iclass].spans_from_cache;
+ stats->span_use[iclass].to_reserved = (size_t)heap->span_use[iclass].spans_to_reserved;
+ stats->span_use[iclass].from_reserved = (size_t)heap->span_use[iclass].spans_from_reserved;
+ stats->span_use[iclass].map_calls = (size_t)heap->span_use[iclass].spans_map_calls;
+ }
+ for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
+ stats->size_use[iclass].alloc_current = (size_t)atomic_load32(&heap->size_class_use[iclass].alloc_current);
+ stats->size_use[iclass].alloc_peak = (size_t)heap->size_class_use[iclass].alloc_peak;
+ stats->size_use[iclass].alloc_total = (size_t)heap->size_class_use[iclass].alloc_total;
+ stats->size_use[iclass].free_total = (size_t)atomic_load32(&heap->size_class_use[iclass].free_total);
+ stats->size_use[iclass].spans_to_cache = (size_t)heap->size_class_use[iclass].spans_to_cache;
+ stats->size_use[iclass].spans_from_cache = (size_t)heap->size_class_use[iclass].spans_from_cache;
+ stats->size_use[iclass].spans_from_reserved = (size_t)heap->size_class_use[iclass].spans_from_reserved;
+ stats->size_use[iclass].map_calls = (size_t)heap->size_class_use[iclass].spans_map_calls;
+ }
+#endif
+}
+
+void
+rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats) {
+ memset(stats, 0, sizeof(rpmalloc_global_statistics_t));
+#if ENABLE_STATISTICS
+ stats->mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;
+ stats->mapped_peak = (size_t)_mapped_pages_peak * _memory_page_size;
+ stats->mapped_total = (size_t)atomic_load32(&_mapped_total) * _memory_page_size;
+ stats->unmapped_total = (size_t)atomic_load32(&_unmapped_total) * _memory_page_size;
+ stats->huge_alloc = (size_t)atomic_load32(&_huge_pages_current) * _memory_page_size;
+ stats->huge_alloc_peak = (size_t)_huge_pages_peak * _memory_page_size;
+#endif
+#if ENABLE_GLOBAL_CACHE
+ for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
+ stats->cached += (size_t)atomic_load32(&_memory_span_cache[iclass].size) * (iclass + 1) * _memory_span_size;
+ }
+#endif
+}
+
+void
+rpmalloc_dump_statistics(void* file) {
+#if ENABLE_STATISTICS
+ //If you hit this assert, you still have active threads or forgot to finalize some thread(s)
+ assert(atomic_load32(&_memory_active_heaps) == 0);
+
+ for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {
+ heap_t* heap = atomic_load_ptr(&_memory_heaps[list_idx]);
+ while (heap) {
+ fprintf(file, "Heap %d stats:\n", heap->id);
+ fprintf(file, "Class CurAlloc PeakAlloc TotAlloc TotFree BlkSize BlkCount SpansCur SpansPeak PeakAllocMiB ToCacheMiB FromCacheMiB FromReserveMiB MmapCalls\n");
+ for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
+ if (!heap->size_class_use[iclass].alloc_total) {
+ assert(!atomic_load32(&heap->size_class_use[iclass].free_total));
+ assert(!heap->size_class_use[iclass].spans_map_calls);
+ continue;
+ }
+ fprintf(file, "%3u: %10u %10u %10u %10u %8u %8u %8d %9d %13zu %11zu %12zu %14zu %9u\n", (uint32_t)iclass,
+ atomic_load32(&heap->size_class_use[iclass].alloc_current),
+ heap->size_class_use[iclass].alloc_peak,
+ heap->size_class_use[iclass].alloc_total,
+ atomic_load32(&heap->size_class_use[iclass].free_total),
+ _memory_size_class[iclass].block_size,
+ _memory_size_class[iclass].block_count,
+ heap->size_class_use[iclass].spans_current,
+ heap->size_class_use[iclass].spans_peak,
+ ((size_t)heap->size_class_use[iclass].alloc_peak * (size_t)_memory_size_class[iclass].block_size) / (size_t)(1024 * 1024),
+ ((size_t)heap->size_class_use[iclass].spans_to_cache * _memory_span_size) / (size_t)(1024 * 1024),
+ ((size_t)heap->size_class_use[iclass].spans_from_cache * _memory_span_size) / (size_t)(1024 * 1024),
+ ((size_t)heap->size_class_use[iclass].spans_from_reserved * _memory_span_size) / (size_t)(1024 * 1024),
+ heap->size_class_use[iclass].spans_map_calls);
+ }
+ fprintf(file, "Spans Current Peak PeakMiB Cached ToCacheMiB FromCacheMiB ToReserveMiB FromReserveMiB ToGlobalMiB FromGlobalMiB MmapCalls\n");
+ for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
+ if (!heap->span_use[iclass].high && !heap->span_use[iclass].spans_map_calls)
+ continue;
+ fprintf(file, "%4u: %8d %8u %8zu %7u %11zu %12zu %12zu %14zu %11zu %13zu %10u\n", (uint32_t)(iclass + 1),
+ atomic_load32(&heap->span_use[iclass].current),
+ heap->span_use[iclass].high,
+ ((size_t)heap->span_use[iclass].high * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
+ heap->span_cache[iclass] ? heap->span_cache[iclass]->list_size : 0,
+ ((size_t)heap->span_use[iclass].spans_to_cache * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
+ ((size_t)heap->span_use[iclass].spans_from_cache * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
+ ((size_t)heap->span_use[iclass].spans_to_reserved * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
+ ((size_t)heap->span_use[iclass].spans_from_reserved * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
+ ((size_t)heap->span_use[iclass].spans_to_global * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
+ ((size_t)heap->span_use[iclass].spans_from_global * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
+ heap->span_use[iclass].spans_map_calls);
+ }
+ fprintf(file, "ThreadToGlobalMiB GlobalToThreadMiB\n");
+ fprintf(file, "%17zu %17zu\n", (size_t)heap->thread_to_global / (size_t)(1024 * 1024), (size_t)heap->global_to_thread / (size_t)(1024 * 1024));
+ heap = heap->next_heap;
+ }
+ }
+
+ fprintf(file, "Global stats:\n");
+ size_t huge_current = (size_t)atomic_load32(&_huge_pages_current) * _memory_page_size;
+ size_t huge_peak = (size_t)_huge_pages_peak * _memory_page_size;
+ fprintf(file, "HugeCurrentMiB HugePeakMiB\n");
+ fprintf(file, "%14zu %11zu\n", huge_current / (size_t)(1024 * 1024), huge_peak / (size_t)(1024 * 1024));
+
+ size_t mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;
+ size_t mapped_os = (size_t)atomic_load32(&_mapped_pages_os) * _memory_page_size;
+ size_t mapped_peak = (size_t)_mapped_pages_peak * _memory_page_size;
+ size_t mapped_total = (size_t)atomic_load32(&_mapped_total) * _memory_page_size;
+ size_t unmapped_total = (size_t)atomic_load32(&_unmapped_total) * _memory_page_size;
+ size_t reserved_total = (size_t)atomic_load32(&_reserved_spans) * _memory_span_size;
+ fprintf(file, "MappedMiB MappedOSMiB MappedPeakMiB MappedTotalMiB UnmappedTotalMiB ReservedTotalMiB\n");
+ fprintf(file, "%9zu %11zu %13zu %14zu %16zu %16zu\n",
+ mapped / (size_t)(1024 * 1024),
+ mapped_os / (size_t)(1024 * 1024),
+ mapped_peak / (size_t)(1024 * 1024),
+ mapped_total / (size_t)(1024 * 1024),
+ unmapped_total / (size_t)(1024 * 1024),
+ reserved_total / (size_t)(1024 * 1024));
+
+ fprintf(file, "\n");
+#else
+ (void)sizeof(file);
+#endif
+}
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/client/tracy_rpmalloc.hpp b/3rdparty/tracy/tracy/client/tracy_rpmalloc.hpp
new file mode 100644
index 0000000..ef92db1
--- /dev/null
+++ b/3rdparty/tracy/tracy/client/tracy_rpmalloc.hpp
@@ -0,0 +1,261 @@
+/* rpmalloc.h - Memory allocator - Public Domain - 2016 Mattias Jansson
+ *
+ * This library provides a cross-platform lock free thread caching malloc implementation in C11.
+ * The latest source code is always available at
+ *
+ * https://github.com/mjansson/rpmalloc
+ *
+ * This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
+ *
+ */
+
+#pragma once
+
+#include <stddef.h>
+#include "../common/TracyApi.h"
+
+namespace tracy
+{
+
+#if defined(__clang__) || defined(__GNUC__)
+# define RPMALLOC_EXPORT __attribute__((visibility("default")))
+# define RPMALLOC_ALLOCATOR
+# define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__))
+# if defined(__clang_major__) && (__clang_major__ < 4)
+# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
+# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
+# else
+# define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size)))
+# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) __attribute__((alloc_size(count, size)))
+# endif
+# define RPMALLOC_CDECL
+#elif defined(_MSC_VER)
+# define RPMALLOC_EXPORT
+# define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict)
+# define RPMALLOC_ATTRIB_MALLOC
+# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
+# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count,size)
+# define RPMALLOC_CDECL __cdecl
+#else
+# define RPMALLOC_EXPORT
+# define RPMALLOC_ALLOCATOR
+# define RPMALLOC_ATTRIB_MALLOC
+# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
+# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count,size)
+# define RPMALLOC_CDECL
+#endif
+
+//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes
+#ifndef RPMALLOC_CONFIGURABLE
+#define RPMALLOC_CONFIGURABLE 0
+#endif
+
+//! Flag to rpaligned_realloc to not preserve content in reallocation
+#define RPMALLOC_NO_PRESERVE 1
+
+typedef struct rpmalloc_global_statistics_t {
+ //! Current amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
+ size_t mapped;
+ //! Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
+ size_t mapped_peak;
+ //! Current amount of memory in global caches for small and medium sizes (<32KiB)
+ size_t cached;
+ //! Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
+ size_t huge_alloc;
+ //! Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
+ size_t huge_alloc_peak;
+ //! Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
+ size_t mapped_total;
+ //! Total amount of memory unmapped since initialization (only if ENABLE_STATISTICS=1)
+ size_t unmapped_total;
+} rpmalloc_global_statistics_t;
+
+typedef struct rpmalloc_thread_statistics_t {
+ //! Current number of bytes available in thread size class caches for small and medium sizes (<32KiB)
+ size_t sizecache;
+ //! Current number of bytes available in thread span caches for small and medium sizes (<32KiB)
+ size_t spancache;
+ //! Total number of bytes transitioned from thread cache to global cache (only if ENABLE_STATISTICS=1)
+ size_t thread_to_global;
+ //! Total number of bytes transitioned from global cache to thread cache (only if ENABLE_STATISTICS=1)
+ size_t global_to_thread;
+ //! Per span count statistics (only if ENABLE_STATISTICS=1)
+ struct {
+ //! Currently used number of spans
+ size_t current;
+ //! High water mark of spans used
+ size_t peak;
+ //! Number of spans transitioned to global cache
+ size_t to_global;
+ //! Number of spans transitioned from global cache
+ size_t from_global;
+ //! Number of spans transitioned to thread cache
+ size_t to_cache;
+ //! Number of spans transitioned from thread cache
+ size_t from_cache;
+ //! Number of spans transitioned to reserved state
+ size_t to_reserved;
+ //! Number of spans transitioned from reserved state
+ size_t from_reserved;
+ //! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
+ size_t map_calls;
+ } span_use[32];
+ //! Per size class statistics (only if ENABLE_STATISTICS=1)
+ struct {
+ //! Current number of allocations
+ size_t alloc_current;
+ //! Peak number of allocations
+ size_t alloc_peak;
+ //! Total number of allocations
+ size_t alloc_total;
+ //! Total number of frees
+ size_t free_total;
+ //! Number of spans transitioned to cache
+ size_t spans_to_cache;
+ //! Number of spans transitioned from cache
+ size_t spans_from_cache;
+ //! Number of spans transitioned from reserved state
+ size_t spans_from_reserved;
+ //! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
+ size_t map_calls;
+ } size_use[128];
+} rpmalloc_thread_statistics_t;
+
+typedef struct rpmalloc_config_t {
+ //! Map memory pages for the given number of bytes. The returned address MUST be
+ // aligned to the rpmalloc span size, which will always be a power of two.
+ // Optionally the function can store an alignment offset in the offset variable
+ // in case it performs alignment and the returned pointer is offset from the
+ // actual start of the memory region due to this alignment. The alignment offset
+ // will be passed to the memory unmap function. The alignment offset MUST NOT be
+ // larger than 65535 (storable in an uint16_t), if it is you must use natural
+ // alignment to shift it into 16 bits. If you set a memory_map function, you
+ // must also set a memory_unmap function or else the default implementation will
+ // be used for both.
+ void* (*memory_map)(size_t size, size_t* offset);
+ //! Unmap the memory pages starting at address and spanning the given number of bytes.
+ // If release is set to non-zero, the unmap is for an entire span range as returned by
+ // a previous call to memory_map and that the entire range should be released. The
+ // release argument holds the size of the entire span range. If release is set to 0,
+ // the unmap is a partial decommit of a subset of the mapped memory range.
+ // If you set a memory_unmap function, you must also set a memory_map function or
+ // else the default implementation will be used for both.
+ void (*memory_unmap)(void* address, size_t size, size_t offset, size_t release);
+ //! Size of memory pages. The page size MUST be a power of two. All memory mapping
+ // requests to memory_map will be made with size set to a multiple of the page size.
+ // Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system page size is used.
+ size_t page_size;
+ //! Size of a span of memory blocks. MUST be a power of two, and in [4096,262144]
+ // range (unless 0 - set to 0 to use the default span size). Used if RPMALLOC_CONFIGURABLE
+ // is defined to 1.
+ size_t span_size;
+ //! Number of spans to map at each request to map new virtual memory blocks. This can
+ // be used to minimize the system call overhead at the cost of virtual memory address
+ // space. The extra mapped pages will not be written until actually used, so physical
+ // committed memory should not be affected in the default implementation. Will be
+ // aligned to a multiple of spans that match memory page size in case of huge pages.
+ size_t span_map_count;
+ //! Enable use of large/huge pages. If this flag is set to non-zero and page size is
+ // zero, the allocator will try to enable huge pages and auto detect the configuration.
+ // If this is set to non-zero and page_size is also non-zero, the allocator will
+ // assume huge pages have been configured and enabled prior to initializing the
+ // allocator.
+ // For Windows, see https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support
+ // For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
+ int enable_huge_pages;
+} rpmalloc_config_t;
+
+//! Initialize allocator with default configuration
+TRACY_API int
+rpmalloc_initialize(void);
+
+//! Initialize allocator with given configuration
+RPMALLOC_EXPORT int
+rpmalloc_initialize_config(const rpmalloc_config_t* config);
+
+//! Get allocator configuration
+RPMALLOC_EXPORT const rpmalloc_config_t*
+rpmalloc_config(void);
+
+//! Finalize allocator
+TRACY_API void
+rpmalloc_finalize(void);
+
+//! Initialize allocator for calling thread
+TRACY_API void
+rpmalloc_thread_initialize(void);
+
+//! Finalize allocator for calling thread
+TRACY_API void
+rpmalloc_thread_finalize(void);
+
+//! Perform deferred deallocations pending for the calling thread heap
+RPMALLOC_EXPORT void
+rpmalloc_thread_collect(void);
+
+//! Query if allocator is initialized for calling thread
+RPMALLOC_EXPORT int
+rpmalloc_is_thread_initialized(void);
+
+//! Get per-thread statistics
+RPMALLOC_EXPORT void
+rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats);
+
+//! Get global statistics
+RPMALLOC_EXPORT void
+rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats);
+
+//! Dump all statistics in human readable format to file (should be a FILE*)
+RPMALLOC_EXPORT void
+rpmalloc_dump_statistics(void* file);
+
+//! Allocate a memory block of at least the given size
+TRACY_API RPMALLOC_ALLOCATOR void*
+rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1);
+
+//! Free the given memory block
+TRACY_API void
+rpfree(void* ptr);
+
+//! Allocate a memory block of at least the given size and zero initialize it
+RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
+rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2);
+
+//! Reallocate the given block to at least the given size
+TRACY_API RPMALLOC_ALLOCATOR void*
+rprealloc(void* ptr, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
+
+//! Reallocate the given block to at least the given size and alignment,
+// with optional control flags (see RPMALLOC_NO_PRESERVE).
+// Alignment must be a power of two and a multiple of sizeof(void*),
+// and should ideally be less than memory page size. A caveat of rpmalloc
+// internals is that this must also be strictly less than the span size (default 64KiB)
+RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
+rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
+
+//! Allocate a memory block of at least the given size and alignment.
+// Alignment must be a power of two and a multiple of sizeof(void*),
+// and should ideally be less than memory page size. A caveat of rpmalloc
+// internals is that this must also be strictly less than the span size (default 64KiB)
+RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
+rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
+
+//! Allocate a memory block of at least the given size and alignment.
+// Alignment must be a power of two and a multiple of sizeof(void*),
+// and should ideally be less than memory page size. A caveat of rpmalloc
+// internals is that this must also be strictly less than the span size (default 64KiB)
+RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
+rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
+
+//! Allocate a memory block of at least the given size and alignment.
+// Alignment must be a power of two and a multiple of sizeof(void*),
+// and should ideally be less than memory page size. A caveat of rpmalloc
+// internals is that this must also be strictly less than the span size (default 64KiB)
+RPMALLOC_EXPORT int
+rpposix_memalign(void **memptr, size_t alignment, size_t size);
+
+//! Query the usable size of the given memory block (from given pointer to the end of block)
+RPMALLOC_EXPORT size_t
+rpmalloc_usable_size(void* ptr);
+
+}
diff --git a/3rdparty/tracy/tracy/common/TracyAlign.hpp b/3rdparty/tracy/tracy/common/TracyAlign.hpp
new file mode 100644
index 0000000..c3531ba
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyAlign.hpp
@@ -0,0 +1,27 @@
+#ifndef __TRACYALIGN_HPP__
+#define __TRACYALIGN_HPP__
+
+#include <string.h>
+
+#include "TracyForceInline.hpp"
+
+namespace tracy
+{
+
+template<typename T>
+tracy_force_inline T MemRead( const void* ptr )
+{
+ T val;
+ memcpy( &val, ptr, sizeof( T ) );
+ return val;
+}
+
+template<typename T>
+tracy_force_inline void MemWrite( void* ptr, T val )
+{
+ memcpy( ptr, &val, sizeof( T ) );
+}
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyAlloc.hpp b/3rdparty/tracy/tracy/common/TracyAlloc.hpp
new file mode 100644
index 0000000..d3dec12
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyAlloc.hpp
@@ -0,0 +1,69 @@
+#ifndef __TRACYALLOC_HPP__
+#define __TRACYALLOC_HPP__
+
+#include <stdlib.h>
+
+#ifdef TRACY_ENABLE
+# include "TracyApi.h"
+# include "TracyForceInline.hpp"
+# include "../client/tracy_rpmalloc.hpp"
+#endif
+
+namespace tracy
+{
+
+#ifdef TRACY_ENABLE
+TRACY_API void InitRpmalloc();
+#endif
+
+static inline void* tracy_malloc( size_t size )
+{
+#ifdef TRACY_ENABLE
+ InitRpmalloc();
+ return rpmalloc( size );
+#else
+ return malloc( size );
+#endif
+}
+
+static inline void* tracy_malloc_fast( size_t size )
+{
+#ifdef TRACY_ENABLE
+ return rpmalloc( size );
+#else
+ return malloc( size );
+#endif
+}
+
+static inline void tracy_free( void* ptr )
+{
+#ifdef TRACY_ENABLE
+ InitRpmalloc();
+ rpfree( ptr );
+#else
+ free( ptr );
+#endif
+}
+
+static inline void tracy_free_fast( void* ptr )
+{
+#ifdef TRACY_ENABLE
+ rpfree( ptr );
+#else
+ free( ptr );
+#endif
+}
+
+static inline void* tracy_realloc( void* ptr, size_t size )
+{
+#ifdef TRACY_ENABLE
+ InitRpmalloc();
+ return rprealloc( ptr, size );
+#else
+ return realloc( ptr, size );
+#endif
+}
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyApi.h b/3rdparty/tracy/tracy/common/TracyApi.h
new file mode 100644
index 0000000..f396ce0
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyApi.h
@@ -0,0 +1,16 @@
+#ifndef __TRACYAPI_H__
+#define __TRACYAPI_H__
+
+#if defined _WIN32
+# if defined TRACY_EXPORTS
+# define TRACY_API __declspec(dllexport)
+# elif defined TRACY_IMPORTS
+# define TRACY_API __declspec(dllimport)
+# else
+# define TRACY_API
+# endif
+#else
+# define TRACY_API __attribute__((visibility("default")))
+#endif
+
+#endif // __TRACYAPI_H__
diff --git a/3rdparty/tracy/tracy/common/TracyColor.hpp b/3rdparty/tracy/tracy/common/TracyColor.hpp
new file mode 100644
index 0000000..4825c0f
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyColor.hpp
@@ -0,0 +1,690 @@
+#ifndef __TRACYCOLOR_HPP__
+#define __TRACYCOLOR_HPP__
+
+namespace tracy
+{
+struct Color
+{
+enum ColorType
+{
+ Snow = 0xfffafa,
+ GhostWhite = 0xf8f8ff,
+ WhiteSmoke = 0xf5f5f5,
+ Gainsboro = 0xdcdcdc,
+ FloralWhite = 0xfffaf0,
+ OldLace = 0xfdf5e6,
+ Linen = 0xfaf0e6,
+ AntiqueWhite = 0xfaebd7,
+ PapayaWhip = 0xffefd5,
+ BlanchedAlmond = 0xffebcd,
+ Bisque = 0xffe4c4,
+ PeachPuff = 0xffdab9,
+ NavajoWhite = 0xffdead,
+ Moccasin = 0xffe4b5,
+ Cornsilk = 0xfff8dc,
+ Ivory = 0xfffff0,
+ LemonChiffon = 0xfffacd,
+ Seashell = 0xfff5ee,
+ Honeydew = 0xf0fff0,
+ MintCream = 0xf5fffa,
+ Azure = 0xf0ffff,
+ AliceBlue = 0xf0f8ff,
+ Lavender = 0xe6e6fa,
+ LavenderBlush = 0xfff0f5,
+ MistyRose = 0xffe4e1,
+ White = 0xffffff,
+ Black = 0x000000,
+ DarkSlateGray = 0x2f4f4f,
+ DarkSlateGrey = 0x2f4f4f,
+ DimGray = 0x696969,
+ DimGrey = 0x696969,
+ SlateGray = 0x708090,
+ SlateGrey = 0x708090,
+ LightSlateGray = 0x778899,
+ LightSlateGrey = 0x778899,
+ Gray = 0xbebebe,
+ Grey = 0xbebebe,
+ X11Gray = 0xbebebe,
+ X11Grey = 0xbebebe,
+ WebGray = 0x808080,
+ WebGrey = 0x808080,
+ LightGrey = 0xd3d3d3,
+ LightGray = 0xd3d3d3,
+ MidnightBlue = 0x191970,
+ Navy = 0x000080,
+ NavyBlue = 0x000080,
+ CornflowerBlue = 0x6495ed,
+ DarkSlateBlue = 0x483d8b,
+ SlateBlue = 0x6a5acd,
+ MediumSlateBlue = 0x7b68ee,
+ LightSlateBlue = 0x8470ff,
+ MediumBlue = 0x0000cd,
+ RoyalBlue = 0x4169e1,
+ Blue = 0x0000ff,
+ DodgerBlue = 0x1e90ff,
+ DeepSkyBlue = 0x00bfff,
+ SkyBlue = 0x87ceeb,
+ LightSkyBlue = 0x87cefa,
+ SteelBlue = 0x4682b4,
+ LightSteelBlue = 0xb0c4de,
+ LightBlue = 0xadd8e6,
+ PowderBlue = 0xb0e0e6,
+ PaleTurquoise = 0xafeeee,
+ DarkTurquoise = 0x00ced1,
+ MediumTurquoise = 0x48d1cc,
+ Turquoise = 0x40e0d0,
+ Cyan = 0x00ffff,
+ Aqua = 0x00ffff,
+ LightCyan = 0xe0ffff,
+ CadetBlue = 0x5f9ea0,
+ MediumAquamarine = 0x66cdaa,
+ Aquamarine = 0x7fffd4,
+ DarkGreen = 0x006400,
+ DarkOliveGreen = 0x556b2f,
+ DarkSeaGreen = 0x8fbc8f,
+ SeaGreen = 0x2e8b57,
+ MediumSeaGreen = 0x3cb371,
+ LightSeaGreen = 0x20b2aa,
+ PaleGreen = 0x98fb98,
+ SpringGreen = 0x00ff7f,
+ LawnGreen = 0x7cfc00,
+ Green = 0x00ff00,
+ Lime = 0x00ff00,
+ X11Green = 0x00ff00,
+ WebGreen = 0x008000,
+ Chartreuse = 0x7fff00,
+ MediumSpringGreen = 0x00fa9a,
+ GreenYellow = 0xadff2f,
+ LimeGreen = 0x32cd32,
+ YellowGreen = 0x9acd32,
+ ForestGreen = 0x228b22,
+ OliveDrab = 0x6b8e23,
+ DarkKhaki = 0xbdb76b,
+ Khaki = 0xf0e68c,
+ PaleGoldenrod = 0xeee8aa,
+ LightGoldenrodYellow = 0xfafad2,
+ LightYellow = 0xffffe0,
+ Yellow = 0xffff00,
+ Gold = 0xffd700,
+ LightGoldenrod = 0xeedd82,
+ Goldenrod = 0xdaa520,
+ DarkGoldenrod = 0xb8860b,
+ RosyBrown = 0xbc8f8f,
+ IndianRed = 0xcd5c5c,
+ SaddleBrown = 0x8b4513,
+ Sienna = 0xa0522d,
+ Peru = 0xcd853f,
+ Burlywood = 0xdeb887,
+ Beige = 0xf5f5dc,
+ Wheat = 0xf5deb3,
+ SandyBrown = 0xf4a460,
+ Tan = 0xd2b48c,
+ Chocolate = 0xd2691e,
+ Firebrick = 0xb22222,
+ Brown = 0xa52a2a,
+ DarkSalmon = 0xe9967a,
+ Salmon = 0xfa8072,
+ LightSalmon = 0xffa07a,
+ Orange = 0xffa500,
+ DarkOrange = 0xff8c00,
+ Coral = 0xff7f50,
+ LightCoral = 0xf08080,
+ Tomato = 0xff6347,
+ OrangeRed = 0xff4500,
+ Red = 0xff0000,
+ HotPink = 0xff69b4,
+ DeepPink = 0xff1493,
+ Pink = 0xffc0cb,
+ LightPink = 0xffb6c1,
+ PaleVioletRed = 0xdb7093,
+ Maroon = 0xb03060,
+ X11Maroon = 0xb03060,
+ WebMaroon = 0x800000,
+ MediumVioletRed = 0xc71585,
+ VioletRed = 0xd02090,
+ Magenta = 0xff00ff,
+ Fuchsia = 0xff00ff,
+ Violet = 0xee82ee,
+ Plum = 0xdda0dd,
+ Orchid = 0xda70d6,
+ MediumOrchid = 0xba55d3,
+ DarkOrchid = 0x9932cc,
+ DarkViolet = 0x9400d3,
+ BlueViolet = 0x8a2be2,
+ Purple = 0xa020f0,
+ X11Purple = 0xa020f0,
+ WebPurple = 0x800080,
+ MediumPurple = 0x9370db,
+ Thistle = 0xd8bfd8,
+ Snow1 = 0xfffafa,
+ Snow2 = 0xeee9e9,
+ Snow3 = 0xcdc9c9,
+ Snow4 = 0x8b8989,
+ Seashell1 = 0xfff5ee,
+ Seashell2 = 0xeee5de,
+ Seashell3 = 0xcdc5bf,
+ Seashell4 = 0x8b8682,
+ AntiqueWhite1 = 0xffefdb,
+ AntiqueWhite2 = 0xeedfcc,
+ AntiqueWhite3 = 0xcdc0b0,
+ AntiqueWhite4 = 0x8b8378,
+ Bisque1 = 0xffe4c4,
+ Bisque2 = 0xeed5b7,
+ Bisque3 = 0xcdb79e,
+ Bisque4 = 0x8b7d6b,
+ PeachPuff1 = 0xffdab9,
+ PeachPuff2 = 0xeecbad,
+ PeachPuff3 = 0xcdaf95,
+ PeachPuff4 = 0x8b7765,
+ NavajoWhite1 = 0xffdead,
+ NavajoWhite2 = 0xeecfa1,
+ NavajoWhite3 = 0xcdb38b,
+ NavajoWhite4 = 0x8b795e,
+ LemonChiffon1 = 0xfffacd,
+ LemonChiffon2 = 0xeee9bf,
+ LemonChiffon3 = 0xcdc9a5,
+ LemonChiffon4 = 0x8b8970,
+ Cornsilk1 = 0xfff8dc,
+ Cornsilk2 = 0xeee8cd,
+ Cornsilk3 = 0xcdc8b1,
+ Cornsilk4 = 0x8b8878,
+ Ivory1 = 0xfffff0,
+ Ivory2 = 0xeeeee0,
+ Ivory3 = 0xcdcdc1,
+ Ivory4 = 0x8b8b83,
+ Honeydew1 = 0xf0fff0,
+ Honeydew2 = 0xe0eee0,
+ Honeydew3 = 0xc1cdc1,
+ Honeydew4 = 0x838b83,
+ LavenderBlush1 = 0xfff0f5,
+ LavenderBlush2 = 0xeee0e5,
+ LavenderBlush3 = 0xcdc1c5,
+ LavenderBlush4 = 0x8b8386,
+ MistyRose1 = 0xffe4e1,
+ MistyRose2 = 0xeed5d2,
+ MistyRose3 = 0xcdb7b5,
+ MistyRose4 = 0x8b7d7b,
+ Azure1 = 0xf0ffff,
+ Azure2 = 0xe0eeee,
+ Azure3 = 0xc1cdcd,
+ Azure4 = 0x838b8b,
+ SlateBlue1 = 0x836fff,
+ SlateBlue2 = 0x7a67ee,
+ SlateBlue3 = 0x6959cd,
+ SlateBlue4 = 0x473c8b,
+ RoyalBlue1 = 0x4876ff,
+ RoyalBlue2 = 0x436eee,
+ RoyalBlue3 = 0x3a5fcd,
+ RoyalBlue4 = 0x27408b,
+ Blue1 = 0x0000ff,
+ Blue2 = 0x0000ee,
+ Blue3 = 0x0000cd,
+ Blue4 = 0x00008b,
+ DodgerBlue1 = 0x1e90ff,
+ DodgerBlue2 = 0x1c86ee,
+ DodgerBlue3 = 0x1874cd,
+ DodgerBlue4 = 0x104e8b,
+ SteelBlue1 = 0x63b8ff,
+ SteelBlue2 = 0x5cacee,
+ SteelBlue3 = 0x4f94cd,
+ SteelBlue4 = 0x36648b,
+ DeepSkyBlue1 = 0x00bfff,
+ DeepSkyBlue2 = 0x00b2ee,
+ DeepSkyBlue3 = 0x009acd,
+ DeepSkyBlue4 = 0x00688b,
+ SkyBlue1 = 0x87ceff,
+ SkyBlue2 = 0x7ec0ee,
+ SkyBlue3 = 0x6ca6cd,
+ SkyBlue4 = 0x4a708b,
+ LightSkyBlue1 = 0xb0e2ff,
+ LightSkyBlue2 = 0xa4d3ee,
+ LightSkyBlue3 = 0x8db6cd,
+ LightSkyBlue4 = 0x607b8b,
+ SlateGray1 = 0xc6e2ff,
+ SlateGray2 = 0xb9d3ee,
+ SlateGray3 = 0x9fb6cd,
+ SlateGray4 = 0x6c7b8b,
+ LightSteelBlue1 = 0xcae1ff,
+ LightSteelBlue2 = 0xbcd2ee,
+ LightSteelBlue3 = 0xa2b5cd,
+ LightSteelBlue4 = 0x6e7b8b,
+ LightBlue1 = 0xbfefff,
+ LightBlue2 = 0xb2dfee,
+ LightBlue3 = 0x9ac0cd,
+ LightBlue4 = 0x68838b,
+ LightCyan1 = 0xe0ffff,
+ LightCyan2 = 0xd1eeee,
+ LightCyan3 = 0xb4cdcd,
+ LightCyan4 = 0x7a8b8b,
+ PaleTurquoise1 = 0xbbffff,
+ PaleTurquoise2 = 0xaeeeee,
+ PaleTurquoise3 = 0x96cdcd,
+ PaleTurquoise4 = 0x668b8b,
+ CadetBlue1 = 0x98f5ff,
+ CadetBlue2 = 0x8ee5ee,
+ CadetBlue3 = 0x7ac5cd,
+ CadetBlue4 = 0x53868b,
+ Turquoise1 = 0x00f5ff,
+ Turquoise2 = 0x00e5ee,
+ Turquoise3 = 0x00c5cd,
+ Turquoise4 = 0x00868b,
+ Cyan1 = 0x00ffff,
+ Cyan2 = 0x00eeee,
+ Cyan3 = 0x00cdcd,
+ Cyan4 = 0x008b8b,
+ DarkSlateGray1 = 0x97ffff,
+ DarkSlateGray2 = 0x8deeee,
+ DarkSlateGray3 = 0x79cdcd,
+ DarkSlateGray4 = 0x528b8b,
+ Aquamarine1 = 0x7fffd4,
+ Aquamarine2 = 0x76eec6,
+ Aquamarine3 = 0x66cdaa,
+ Aquamarine4 = 0x458b74,
+ DarkSeaGreen1 = 0xc1ffc1,
+ DarkSeaGreen2 = 0xb4eeb4,
+ DarkSeaGreen3 = 0x9bcd9b,
+ DarkSeaGreen4 = 0x698b69,
+ SeaGreen1 = 0x54ff9f,
+ SeaGreen2 = 0x4eee94,
+ SeaGreen3 = 0x43cd80,
+ SeaGreen4 = 0x2e8b57,
+ PaleGreen1 = 0x9aff9a,
+ PaleGreen2 = 0x90ee90,
+ PaleGreen3 = 0x7ccd7c,
+ PaleGreen4 = 0x548b54,
+ SpringGreen1 = 0x00ff7f,
+ SpringGreen2 = 0x00ee76,
+ SpringGreen3 = 0x00cd66,
+ SpringGreen4 = 0x008b45,
+ Green1 = 0x00ff00,
+ Green2 = 0x00ee00,
+ Green3 = 0x00cd00,
+ Green4 = 0x008b00,
+ Chartreuse1 = 0x7fff00,
+ Chartreuse2 = 0x76ee00,
+ Chartreuse3 = 0x66cd00,
+ Chartreuse4 = 0x458b00,
+ OliveDrab1 = 0xc0ff3e,
+ OliveDrab2 = 0xb3ee3a,
+ OliveDrab3 = 0x9acd32,
+ OliveDrab4 = 0x698b22,
+ DarkOliveGreen1 = 0xcaff70,
+ DarkOliveGreen2 = 0xbcee68,
+ DarkOliveGreen3 = 0xa2cd5a,
+ DarkOliveGreen4 = 0x6e8b3d,
+ Khaki1 = 0xfff68f,
+ Khaki2 = 0xeee685,
+ Khaki3 = 0xcdc673,
+ Khaki4 = 0x8b864e,
+ LightGoldenrod1 = 0xffec8b,
+ LightGoldenrod2 = 0xeedc82,
+ LightGoldenrod3 = 0xcdbe70,
+ LightGoldenrod4 = 0x8b814c,
+ LightYellow1 = 0xffffe0,
+ LightYellow2 = 0xeeeed1,
+ LightYellow3 = 0xcdcdb4,
+ LightYellow4 = 0x8b8b7a,
+ Yellow1 = 0xffff00,
+ Yellow2 = 0xeeee00,
+ Yellow3 = 0xcdcd00,
+ Yellow4 = 0x8b8b00,
+ Gold1 = 0xffd700,
+ Gold2 = 0xeec900,
+ Gold3 = 0xcdad00,
+ Gold4 = 0x8b7500,
+ Goldenrod1 = 0xffc125,
+ Goldenrod2 = 0xeeb422,
+ Goldenrod3 = 0xcd9b1d,
+ Goldenrod4 = 0x8b6914,
+ DarkGoldenrod1 = 0xffb90f,
+ DarkGoldenrod2 = 0xeead0e,
+ DarkGoldenrod3 = 0xcd950c,
+ DarkGoldenrod4 = 0x8b6508,
+ RosyBrown1 = 0xffc1c1,
+ RosyBrown2 = 0xeeb4b4,
+ RosyBrown3 = 0xcd9b9b,
+ RosyBrown4 = 0x8b6969,
+ IndianRed1 = 0xff6a6a,
+ IndianRed2 = 0xee6363,
+ IndianRed3 = 0xcd5555,
+ IndianRed4 = 0x8b3a3a,
+ Sienna1 = 0xff8247,
+ Sienna2 = 0xee7942,
+ Sienna3 = 0xcd6839,
+ Sienna4 = 0x8b4726,
+ Burlywood1 = 0xffd39b,
+ Burlywood2 = 0xeec591,
+ Burlywood3 = 0xcdaa7d,
+ Burlywood4 = 0x8b7355,
+ Wheat1 = 0xffe7ba,
+ Wheat2 = 0xeed8ae,
+ Wheat3 = 0xcdba96,
+ Wheat4 = 0x8b7e66,
+ Tan1 = 0xffa54f,
+ Tan2 = 0xee9a49,
+ Tan3 = 0xcd853f,
+ Tan4 = 0x8b5a2b,
+ Chocolate1 = 0xff7f24,
+ Chocolate2 = 0xee7621,
+ Chocolate3 = 0xcd661d,
+ Chocolate4 = 0x8b4513,
+ Firebrick1 = 0xff3030,
+ Firebrick2 = 0xee2c2c,
+ Firebrick3 = 0xcd2626,
+ Firebrick4 = 0x8b1a1a,
+ Brown1 = 0xff4040,
+ Brown2 = 0xee3b3b,
+ Brown3 = 0xcd3333,
+ Brown4 = 0x8b2323,
+ Salmon1 = 0xff8c69,
+ Salmon2 = 0xee8262,
+ Salmon3 = 0xcd7054,
+ Salmon4 = 0x8b4c39,
+ LightSalmon1 = 0xffa07a,
+ LightSalmon2 = 0xee9572,
+ LightSalmon3 = 0xcd8162,
+ LightSalmon4 = 0x8b5742,
+ Orange1 = 0xffa500,
+ Orange2 = 0xee9a00,
+ Orange3 = 0xcd8500,
+ Orange4 = 0x8b5a00,
+ DarkOrange1 = 0xff7f00,
+ DarkOrange2 = 0xee7600,
+ DarkOrange3 = 0xcd6600,
+ DarkOrange4 = 0x8b4500,
+ Coral1 = 0xff7256,
+ Coral2 = 0xee6a50,
+ Coral3 = 0xcd5b45,
+ Coral4 = 0x8b3e2f,
+ Tomato1 = 0xff6347,
+ Tomato2 = 0xee5c42,
+ Tomato3 = 0xcd4f39,
+ Tomato4 = 0x8b3626,
+ OrangeRed1 = 0xff4500,
+ OrangeRed2 = 0xee4000,
+ OrangeRed3 = 0xcd3700,
+ OrangeRed4 = 0x8b2500,
+ Red1 = 0xff0000,
+ Red2 = 0xee0000,
+ Red3 = 0xcd0000,
+ Red4 = 0x8b0000,
+ DeepPink1 = 0xff1493,
+ DeepPink2 = 0xee1289,
+ DeepPink3 = 0xcd1076,
+ DeepPink4 = 0x8b0a50,
+ HotPink1 = 0xff6eb4,
+ HotPink2 = 0xee6aa7,
+ HotPink3 = 0xcd6090,
+ HotPink4 = 0x8b3a62,
+ Pink1 = 0xffb5c5,
+ Pink2 = 0xeea9b8,
+ Pink3 = 0xcd919e,
+ Pink4 = 0x8b636c,
+ LightPink1 = 0xffaeb9,
+ LightPink2 = 0xeea2ad,
+ LightPink3 = 0xcd8c95,
+ LightPink4 = 0x8b5f65,
+ PaleVioletRed1 = 0xff82ab,
+ PaleVioletRed2 = 0xee799f,
+ PaleVioletRed3 = 0xcd6889,
+ PaleVioletRed4 = 0x8b475d,
+ Maroon1 = 0xff34b3,
+ Maroon2 = 0xee30a7,
+ Maroon3 = 0xcd2990,
+ Maroon4 = 0x8b1c62,
+ VioletRed1 = 0xff3e96,
+ VioletRed2 = 0xee3a8c,
+ VioletRed3 = 0xcd3278,
+ VioletRed4 = 0x8b2252,
+ Magenta1 = 0xff00ff,
+ Magenta2 = 0xee00ee,
+ Magenta3 = 0xcd00cd,
+ Magenta4 = 0x8b008b,
+ Orchid1 = 0xff83fa,
+ Orchid2 = 0xee7ae9,
+ Orchid3 = 0xcd69c9,
+ Orchid4 = 0x8b4789,
+ Plum1 = 0xffbbff,
+ Plum2 = 0xeeaeee,
+ Plum3 = 0xcd96cd,
+ Plum4 = 0x8b668b,
+ MediumOrchid1 = 0xe066ff,
+ MediumOrchid2 = 0xd15fee,
+ MediumOrchid3 = 0xb452cd,
+ MediumOrchid4 = 0x7a378b,
+ DarkOrchid1 = 0xbf3eff,
+ DarkOrchid2 = 0xb23aee,
+ DarkOrchid3 = 0x9a32cd,
+ DarkOrchid4 = 0x68228b,
+ Purple1 = 0x9b30ff,
+ Purple2 = 0x912cee,
+ Purple3 = 0x7d26cd,
+ Purple4 = 0x551a8b,
+ MediumPurple1 = 0xab82ff,
+ MediumPurple2 = 0x9f79ee,
+ MediumPurple3 = 0x8968cd,
+ MediumPurple4 = 0x5d478b,
+ Thistle1 = 0xffe1ff,
+ Thistle2 = 0xeed2ee,
+ Thistle3 = 0xcdb5cd,
+ Thistle4 = 0x8b7b8b,
+ Gray0 = 0x000000,
+ Grey0 = 0x000000,
+ Gray1 = 0x030303,
+ Grey1 = 0x030303,
+ Gray2 = 0x050505,
+ Grey2 = 0x050505,
+ Gray3 = 0x080808,
+ Grey3 = 0x080808,
+ Gray4 = 0x0a0a0a,
+ Grey4 = 0x0a0a0a,
+ Gray5 = 0x0d0d0d,
+ Grey5 = 0x0d0d0d,
+ Gray6 = 0x0f0f0f,
+ Grey6 = 0x0f0f0f,
+ Gray7 = 0x121212,
+ Grey7 = 0x121212,
+ Gray8 = 0x141414,
+ Grey8 = 0x141414,
+ Gray9 = 0x171717,
+ Grey9 = 0x171717,
+ Gray10 = 0x1a1a1a,
+ Grey10 = 0x1a1a1a,
+ Gray11 = 0x1c1c1c,
+ Grey11 = 0x1c1c1c,
+ Gray12 = 0x1f1f1f,
+ Grey12 = 0x1f1f1f,
+ Gray13 = 0x212121,
+ Grey13 = 0x212121,
+ Gray14 = 0x242424,
+ Grey14 = 0x242424,
+ Gray15 = 0x262626,
+ Grey15 = 0x262626,
+ Gray16 = 0x292929,
+ Grey16 = 0x292929,
+ Gray17 = 0x2b2b2b,
+ Grey17 = 0x2b2b2b,
+ Gray18 = 0x2e2e2e,
+ Grey18 = 0x2e2e2e,
+ Gray19 = 0x303030,
+ Grey19 = 0x303030,
+ Gray20 = 0x333333,
+ Grey20 = 0x333333,
+ Gray21 = 0x363636,
+ Grey21 = 0x363636,
+ Gray22 = 0x383838,
+ Grey22 = 0x383838,
+ Gray23 = 0x3b3b3b,
+ Grey23 = 0x3b3b3b,
+ Gray24 = 0x3d3d3d,
+ Grey24 = 0x3d3d3d,
+ Gray25 = 0x404040,
+ Grey25 = 0x404040,
+ Gray26 = 0x424242,
+ Grey26 = 0x424242,
+ Gray27 = 0x454545,
+ Grey27 = 0x454545,
+ Gray28 = 0x474747,
+ Grey28 = 0x474747,
+ Gray29 = 0x4a4a4a,
+ Grey29 = 0x4a4a4a,
+ Gray30 = 0x4d4d4d,
+ Grey30 = 0x4d4d4d,
+ Gray31 = 0x4f4f4f,
+ Grey31 = 0x4f4f4f,
+ Gray32 = 0x525252,
+ Grey32 = 0x525252,
+ Gray33 = 0x545454,
+ Grey33 = 0x545454,
+ Gray34 = 0x575757,
+ Grey34 = 0x575757,
+ Gray35 = 0x595959,
+ Grey35 = 0x595959,
+ Gray36 = 0x5c5c5c,
+ Grey36 = 0x5c5c5c,
+ Gray37 = 0x5e5e5e,
+ Grey37 = 0x5e5e5e,
+ Gray38 = 0x616161,
+ Grey38 = 0x616161,
+ Gray39 = 0x636363,
+ Grey39 = 0x636363,
+ Gray40 = 0x666666,
+ Grey40 = 0x666666,
+ Gray41 = 0x696969,
+ Grey41 = 0x696969,
+ Gray42 = 0x6b6b6b,
+ Grey42 = 0x6b6b6b,
+ Gray43 = 0x6e6e6e,
+ Grey43 = 0x6e6e6e,
+ Gray44 = 0x707070,
+ Grey44 = 0x707070,
+ Gray45 = 0x737373,
+ Grey45 = 0x737373,
+ Gray46 = 0x757575,
+ Grey46 = 0x757575,
+ Gray47 = 0x787878,
+ Grey47 = 0x787878,
+ Gray48 = 0x7a7a7a,
+ Grey48 = 0x7a7a7a,
+ Gray49 = 0x7d7d7d,
+ Grey49 = 0x7d7d7d,
+ Gray50 = 0x7f7f7f,
+ Grey50 = 0x7f7f7f,
+ Gray51 = 0x828282,
+ Grey51 = 0x828282,
+ Gray52 = 0x858585,
+ Grey52 = 0x858585,
+ Gray53 = 0x878787,
+ Grey53 = 0x878787,
+ Gray54 = 0x8a8a8a,
+ Grey54 = 0x8a8a8a,
+ Gray55 = 0x8c8c8c,
+ Grey55 = 0x8c8c8c,
+ Gray56 = 0x8f8f8f,
+ Grey56 = 0x8f8f8f,
+ Gray57 = 0x919191,
+ Grey57 = 0x919191,
+ Gray58 = 0x949494,
+ Grey58 = 0x949494,
+ Gray59 = 0x969696,
+ Grey59 = 0x969696,
+ Gray60 = 0x999999,
+ Grey60 = 0x999999,
+ Gray61 = 0x9c9c9c,
+ Grey61 = 0x9c9c9c,
+ Gray62 = 0x9e9e9e,
+ Grey62 = 0x9e9e9e,
+ Gray63 = 0xa1a1a1,
+ Grey63 = 0xa1a1a1,
+ Gray64 = 0xa3a3a3,
+ Grey64 = 0xa3a3a3,
+ Gray65 = 0xa6a6a6,
+ Grey65 = 0xa6a6a6,
+ Gray66 = 0xa8a8a8,
+ Grey66 = 0xa8a8a8,
+ Gray67 = 0xababab,
+ Grey67 = 0xababab,
+ Gray68 = 0xadadad,
+ Grey68 = 0xadadad,
+ Gray69 = 0xb0b0b0,
+ Grey69 = 0xb0b0b0,
+ Gray70 = 0xb3b3b3,
+ Grey70 = 0xb3b3b3,
+ Gray71 = 0xb5b5b5,
+ Grey71 = 0xb5b5b5,
+ Gray72 = 0xb8b8b8,
+ Grey72 = 0xb8b8b8,
+ Gray73 = 0xbababa,
+ Grey73 = 0xbababa,
+ Gray74 = 0xbdbdbd,
+ Grey74 = 0xbdbdbd,
+ Gray75 = 0xbfbfbf,
+ Grey75 = 0xbfbfbf,
+ Gray76 = 0xc2c2c2,
+ Grey76 = 0xc2c2c2,
+ Gray77 = 0xc4c4c4,
+ Grey77 = 0xc4c4c4,
+ Gray78 = 0xc7c7c7,
+ Grey78 = 0xc7c7c7,
+ Gray79 = 0xc9c9c9,
+ Grey79 = 0xc9c9c9,
+ Gray80 = 0xcccccc,
+ Grey80 = 0xcccccc,
+ Gray81 = 0xcfcfcf,
+ Grey81 = 0xcfcfcf,
+ Gray82 = 0xd1d1d1,
+ Grey82 = 0xd1d1d1,
+ Gray83 = 0xd4d4d4,
+ Grey83 = 0xd4d4d4,
+ Gray84 = 0xd6d6d6,
+ Grey84 = 0xd6d6d6,
+ Gray85 = 0xd9d9d9,
+ Grey85 = 0xd9d9d9,
+ Gray86 = 0xdbdbdb,
+ Grey86 = 0xdbdbdb,
+ Gray87 = 0xdedede,
+ Grey87 = 0xdedede,
+ Gray88 = 0xe0e0e0,
+ Grey88 = 0xe0e0e0,
+ Gray89 = 0xe3e3e3,
+ Grey89 = 0xe3e3e3,
+ Gray90 = 0xe5e5e5,
+ Grey90 = 0xe5e5e5,
+ Gray91 = 0xe8e8e8,
+ Grey91 = 0xe8e8e8,
+ Gray92 = 0xebebeb,
+ Grey92 = 0xebebeb,
+ Gray93 = 0xededed,
+ Grey93 = 0xededed,
+ Gray94 = 0xf0f0f0,
+ Grey94 = 0xf0f0f0,
+ Gray95 = 0xf2f2f2,
+ Grey95 = 0xf2f2f2,
+ Gray96 = 0xf5f5f5,
+ Grey96 = 0xf5f5f5,
+ Gray97 = 0xf7f7f7,
+ Grey97 = 0xf7f7f7,
+ Gray98 = 0xfafafa,
+ Grey98 = 0xfafafa,
+ Gray99 = 0xfcfcfc,
+ Grey99 = 0xfcfcfc,
+ Gray100 = 0xffffff,
+ Grey100 = 0xffffff,
+ DarkGrey = 0xa9a9a9,
+ DarkGray = 0xa9a9a9,
+ DarkBlue = 0x00008b,
+ DarkCyan = 0x008b8b,
+ DarkMagenta = 0x8b008b,
+ DarkRed = 0x8b0000,
+ LightGreen = 0x90ee90,
+ Crimson = 0xdc143c,
+ Indigo = 0x4b0082,
+ Olive = 0x808000,
+ RebeccaPurple = 0x663399,
+ Silver = 0xc0c0c0,
+ Teal = 0x008080,
+};
+};
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyForceInline.hpp b/3rdparty/tracy/tracy/common/TracyForceInline.hpp
new file mode 100644
index 0000000..b6a5833
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyForceInline.hpp
@@ -0,0 +1,20 @@
+#ifndef __TRACYFORCEINLINE_HPP__
+#define __TRACYFORCEINLINE_HPP__
+
+#if defined(__GNUC__)
+# define tracy_force_inline __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+# define tracy_force_inline __forceinline
+#else
+# define tracy_force_inline inline
+#endif
+
+#if defined(__GNUC__)
+# define tracy_no_inline __attribute__((noinline))
+#elif defined(_MSC_VER)
+# define tracy_no_inline __declspec(noinline)
+#else
+# define tracy_no_inline
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyMutex.hpp b/3rdparty/tracy/tracy/common/TracyMutex.hpp
new file mode 100644
index 0000000..57fb01a
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyMutex.hpp
@@ -0,0 +1,24 @@
+#ifndef __TRACYMUTEX_HPP__
+#define __TRACYMUTEX_HPP__
+
+#if defined _MSC_VER
+
+# include <shared_mutex>
+
+namespace tracy
+{
+using TracyMutex = std::shared_mutex;
+}
+
+#else
+
+#include <mutex>
+
+namespace tracy
+{
+using TracyMutex = std::mutex;
+}
+
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyProtocol.hpp b/3rdparty/tracy/tracy/common/TracyProtocol.hpp
new file mode 100644
index 0000000..a647b03
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyProtocol.hpp
@@ -0,0 +1,139 @@
+#ifndef __TRACYPROTOCOL_HPP__
+#define __TRACYPROTOCOL_HPP__
+
+#include <limits>
+#include <stdint.h>
+
+namespace tracy
+{
+
+constexpr unsigned Lz4CompressBound( unsigned isize ) { return isize + ( isize / 255 ) + 16; }
+
+enum : uint32_t { ProtocolVersion = 55 };
+enum : uint16_t { BroadcastVersion = 2 };
+
+using lz4sz_t = uint32_t;
+
+enum { TargetFrameSize = 256 * 1024 };
+enum { LZ4Size = Lz4CompressBound( TargetFrameSize ) };
+static_assert( LZ4Size <= std::numeric_limits<lz4sz_t>::max(), "LZ4Size greater than lz4sz_t" );
+static_assert( TargetFrameSize * 2 >= 64 * 1024, "Not enough space for LZ4 stream buffer" );
+
+enum { HandshakeShibbolethSize = 8 };
+static const char HandshakeShibboleth[HandshakeShibbolethSize] = { 'T', 'r', 'a', 'c', 'y', 'P', 'r', 'f' };
+
+enum HandshakeStatus : uint8_t
+{
+ HandshakePending,
+ HandshakeWelcome,
+ HandshakeProtocolMismatch,
+ HandshakeNotAvailable,
+ HandshakeDropped
+};
+
+enum { WelcomeMessageProgramNameSize = 64 };
+enum { WelcomeMessageHostInfoSize = 1024 };
+
+#pragma pack( 1 )
+
+// Must increase left query space after handling!
+enum ServerQuery : uint8_t
+{
+ ServerQueryTerminate,
+ ServerQueryString,
+ ServerQueryThreadString,
+ ServerQuerySourceLocation,
+ ServerQueryPlotName,
+ ServerQueryCallstackFrame,
+ ServerQueryFrameName,
+ ServerQueryDisconnect,
+ ServerQueryExternalName,
+ ServerQueryParameter,
+ ServerQuerySymbol,
+ ServerQuerySymbolCode,
+ ServerQueryCodeLocation,
+ ServerQuerySourceCode,
+ ServerQueryDataTransfer,
+ ServerQueryDataTransferPart,
+ ServerQueryFiberName
+};
+
+struct ServerQueryPacket
+{
+ ServerQuery type;
+ uint64_t ptr;
+ uint32_t extra;
+};
+
+enum { ServerQueryPacketSize = sizeof( ServerQueryPacket ) };
+
+
+enum CpuArchitecture : uint8_t
+{
+ CpuArchUnknown,
+ CpuArchX86,
+ CpuArchX64,
+ CpuArchArm32,
+ CpuArchArm64
+};
+
+
+struct WelcomeFlag
+{
+ enum _t : uint8_t
+ {
+ OnDemand = 1 << 0,
+ IsApple = 1 << 1,
+ CodeTransfer = 1 << 2,
+ CombineSamples = 1 << 3,
+ IdentifySamples = 1 << 4,
+ };
+};
+
+struct WelcomeMessage
+{
+ double timerMul;
+ int64_t initBegin;
+ int64_t initEnd;
+ uint64_t delay;
+ uint64_t resolution;
+ uint64_t epoch;
+ uint64_t exectime;
+ uint64_t pid;
+ int64_t samplingPeriod;
+ uint8_t flags;
+ uint8_t cpuArch;
+ char cpuManufacturer[12];
+ uint32_t cpuId;
+ char programName[WelcomeMessageProgramNameSize];
+ char hostInfo[WelcomeMessageHostInfoSize];
+};
+
+enum { WelcomeMessageSize = sizeof( WelcomeMessage ) };
+
+
+struct OnDemandPayloadMessage
+{
+ uint64_t frames;
+ uint64_t currentTime;
+};
+
+enum { OnDemandPayloadMessageSize = sizeof( OnDemandPayloadMessage ) };
+
+
+struct BroadcastMessage
+{
+ uint16_t broadcastVersion;
+ uint16_t listenPort;
+ uint32_t protocolVersion;
+ int32_t activeTime; // in seconds
+ char programName[WelcomeMessageProgramNameSize];
+};
+
+enum { BroadcastMessageSize = sizeof( BroadcastMessage ) };
+
+#pragma pack()
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyQueue.hpp b/3rdparty/tracy/tracy/common/TracyQueue.hpp
new file mode 100644
index 0000000..4deb191
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyQueue.hpp
@@ -0,0 +1,850 @@
+#ifndef __TRACYQUEUE_HPP__
+#define __TRACYQUEUE_HPP__
+
+#include <stdint.h>
+
+namespace tracy
+{
+
+enum class QueueType : uint8_t
+{
+ ZoneText,
+ ZoneName,
+ Message,
+ MessageColor,
+ MessageCallstack,
+ MessageColorCallstack,
+ MessageAppInfo,
+ ZoneBeginAllocSrcLoc,
+ ZoneBeginAllocSrcLocCallstack,
+ CallstackSerial,
+ Callstack,
+ CallstackAlloc,
+ CallstackSample,
+ CallstackSampleContextSwitch,
+ FrameImage,
+ ZoneBegin,
+ ZoneBeginCallstack,
+ ZoneEnd,
+ LockWait,
+ LockObtain,
+ LockRelease,
+ LockSharedWait,
+ LockSharedObtain,
+ LockSharedRelease,
+ LockName,
+ MemAlloc,
+ MemAllocNamed,
+ MemFree,
+ MemFreeNamed,
+ MemAllocCallstack,
+ MemAllocCallstackNamed,
+ MemFreeCallstack,
+ MemFreeCallstackNamed,
+ GpuZoneBegin,
+ GpuZoneBeginCallstack,
+ GpuZoneBeginAllocSrcLoc,
+ GpuZoneBeginAllocSrcLocCallstack,
+ GpuZoneEnd,
+ GpuZoneBeginSerial,
+ GpuZoneBeginCallstackSerial,
+ GpuZoneBeginAllocSrcLocSerial,
+ GpuZoneBeginAllocSrcLocCallstackSerial,
+ GpuZoneEndSerial,
+ PlotData,
+ ContextSwitch,
+ ThreadWakeup,
+ GpuTime,
+ GpuContextName,
+ CallstackFrameSize,
+ SymbolInformation,
+ CodeInformation,
+ ExternalNameMetadata,
+ SymbolCodeMetadata,
+ FiberEnter,
+ FiberLeave,
+ Terminate,
+ KeepAlive,
+ ThreadContext,
+ GpuCalibration,
+ Crash,
+ CrashReport,
+ ZoneValidation,
+ ZoneColor,
+ ZoneValue,
+ FrameMarkMsg,
+ FrameMarkMsgStart,
+ FrameMarkMsgEnd,
+ SourceLocation,
+ LockAnnounce,
+ LockTerminate,
+ LockMark,
+ MessageLiteral,
+ MessageLiteralColor,
+ MessageLiteralCallstack,
+ MessageLiteralColorCallstack,
+ GpuNewContext,
+ CallstackFrame,
+ SysTimeReport,
+ TidToPid,
+ HwSampleCpuCycle,
+ HwSampleInstructionRetired,
+ HwSampleCacheReference,
+ HwSampleCacheMiss,
+ HwSampleBranchRetired,
+ HwSampleBranchMiss,
+ PlotConfig,
+ ParamSetup,
+ AckServerQueryNoop,
+ AckSourceCodeNotAvailable,
+ AckSymbolCodeNotAvailable,
+ CpuTopology,
+ SingleStringData,
+ SecondStringData,
+ MemNamePayload,
+ StringData,
+ ThreadName,
+ PlotName,
+ SourceLocationPayload,
+ CallstackPayload,
+ CallstackAllocPayload,
+ FrameName,
+ FrameImageData,
+ ExternalName,
+ ExternalThreadName,
+ SymbolCode,
+ SourceCode,
+ FiberName,
+ NUM_TYPES
+};
+
+#pragma pack( 1 )
+
+struct QueueThreadContext
+{
+ uint32_t thread;
+};
+
+struct QueueZoneBeginLean
+{
+ int64_t time;
+};
+
+struct QueueZoneBegin : public QueueZoneBeginLean
+{
+ uint64_t srcloc; // ptr
+};
+
+struct QueueZoneBeginThread : public QueueZoneBegin
+{
+ uint32_t thread;
+};
+
+struct QueueZoneEnd
+{
+ int64_t time;
+};
+
+struct QueueZoneEndThread : public QueueZoneEnd
+{
+ uint32_t thread;
+};
+
+struct QueueZoneValidation
+{
+ uint32_t id;
+};
+
+struct QueueZoneValidationThread : public QueueZoneValidation
+{
+ uint32_t thread;
+};
+
+struct QueueZoneColor
+{
+ uint8_t r;
+ uint8_t g;
+ uint8_t b;
+};
+
+struct QueueZoneColorThread : public QueueZoneColor
+{
+ uint32_t thread;
+};
+
+struct QueueZoneValue
+{
+ uint64_t value;
+};
+
+struct QueueZoneValueThread : public QueueZoneValue
+{
+ uint32_t thread;
+};
+
+struct QueueStringTransfer
+{
+ uint64_t ptr;
+};
+
+struct QueueFrameMark
+{
+ int64_t time;
+ uint64_t name; // ptr
+};
+
+struct QueueFrameImage
+{
+ uint32_t frame;
+ uint16_t w;
+ uint16_t h;
+ uint8_t flip;
+};
+
+struct QueueFrameImageFat : public QueueFrameImage
+{
+ uint64_t image; // ptr
+};
+
+struct QueueSourceLocation
+{
+ uint64_t name;
+ uint64_t function; // ptr
+ uint64_t file; // ptr
+ uint32_t line;
+ uint8_t r;
+ uint8_t g;
+ uint8_t b;
+};
+
+struct QueueZoneTextFat
+{
+ uint64_t text; // ptr
+ uint16_t size;
+};
+
+struct QueueZoneTextFatThread : public QueueZoneTextFat
+{
+ uint32_t thread;
+};
+
+enum class LockType : uint8_t
+{
+ Lockable,
+ SharedLockable
+};
+
+struct QueueLockAnnounce
+{
+ uint32_t id;
+ int64_t time;
+ uint64_t lckloc; // ptr
+ LockType type;
+};
+
+struct QueueFiberEnter
+{
+ int64_t time;
+ uint64_t fiber; // ptr
+ uint32_t thread;
+};
+
+struct QueueFiberLeave
+{
+ int64_t time;
+ uint32_t thread;
+};
+
+struct QueueLockTerminate
+{
+ uint32_t id;
+ int64_t time;
+};
+
+struct QueueLockWait
+{
+ uint32_t thread;
+ uint32_t id;
+ int64_t time;
+};
+
+struct QueueLockObtain
+{
+ uint32_t thread;
+ uint32_t id;
+ int64_t time;
+};
+
+struct QueueLockRelease
+{
+ uint32_t thread;
+ uint32_t id;
+ int64_t time;
+};
+
+struct QueueLockMark
+{
+ uint32_t thread;
+ uint32_t id;
+ uint64_t srcloc; // ptr
+};
+
+struct QueueLockName
+{
+ uint32_t id;
+};
+
+struct QueueLockNameFat : public QueueLockName
+{
+ uint64_t name; // ptr
+ uint16_t size;
+};
+
+enum class PlotDataType : uint8_t
+{
+ Float,
+ Double,
+ Int
+};
+
+struct QueuePlotData
+{
+ uint64_t name; // ptr
+ int64_t time;
+ PlotDataType type;
+ union
+ {
+ double d;
+ float f;
+ int64_t i;
+ } data;
+};
+
+struct QueueMessage
+{
+ int64_t time;
+};
+
+struct QueueMessageColor : public QueueMessage
+{
+ uint8_t r;
+ uint8_t g;
+ uint8_t b;
+};
+
+struct QueueMessageLiteral : public QueueMessage
+{
+ uint64_t text; // ptr
+};
+
+struct QueueMessageLiteralThread : public QueueMessageLiteral
+{
+ uint32_t thread;
+};
+
+struct QueueMessageColorLiteral : public QueueMessageColor
+{
+ uint64_t text; // ptr
+};
+
+struct QueueMessageColorLiteralThread : public QueueMessageColorLiteral
+{
+ uint32_t thread;
+};
+
+struct QueueMessageFat : public QueueMessage
+{
+ uint64_t text; // ptr
+ uint16_t size;
+};
+
+struct QueueMessageFatThread : public QueueMessageFat
+{
+ uint32_t thread;
+};
+
+struct QueueMessageColorFat : public QueueMessageColor
+{
+ uint64_t text; // ptr
+ uint16_t size;
+};
+
+struct QueueMessageColorFatThread : public QueueMessageColorFat
+{
+ uint32_t thread;
+};
+
+// Don't change order, only add new entries at the end, this is also used on trace dumps!
+enum class GpuContextType : uint8_t
+{
+ Invalid,
+ OpenGl,
+ Vulkan,
+ OpenCL,
+ Direct3D12,
+ Direct3D11
+};
+
+enum GpuContextFlags : uint8_t
+{
+ GpuContextCalibration = 1 << 0
+};
+
+struct QueueGpuNewContext
+{
+ int64_t cpuTime;
+ int64_t gpuTime;
+ uint32_t thread;
+ float period;
+ uint8_t context;
+ GpuContextFlags flags;
+ GpuContextType type;
+};
+
+struct QueueGpuZoneBeginLean
+{
+ int64_t cpuTime;
+ uint32_t thread;
+ uint16_t queryId;
+ uint8_t context;
+};
+
+struct QueueGpuZoneBegin : public QueueGpuZoneBeginLean
+{
+ uint64_t srcloc;
+};
+
+struct QueueGpuZoneEnd
+{
+ int64_t cpuTime;
+ uint32_t thread;
+ uint16_t queryId;
+ uint8_t context;
+};
+
+struct QueueGpuTime
+{
+ int64_t gpuTime;
+ uint16_t queryId;
+ uint8_t context;
+};
+
+struct QueueGpuCalibration
+{
+ int64_t gpuTime;
+ int64_t cpuTime;
+ int64_t cpuDelta;
+ uint8_t context;
+};
+
+struct QueueGpuContextName
+{
+ uint8_t context;
+};
+
+struct QueueGpuContextNameFat : public QueueGpuContextName
+{
+ uint64_t ptr;
+ uint16_t size;
+};
+
+struct QueueMemNamePayload
+{
+ uint64_t name;
+};
+
+struct QueueMemAlloc
+{
+ int64_t time;
+ uint32_t thread;
+ uint64_t ptr;
+ char size[6];
+};
+
+struct QueueMemFree
+{
+ int64_t time;
+ uint32_t thread;
+ uint64_t ptr;
+};
+
+struct QueueCallstackFat
+{
+ uint64_t ptr;
+};
+
+struct QueueCallstackFatThread : public QueueCallstackFat
+{
+ uint32_t thread;
+};
+
+struct QueueCallstackAllocFat
+{
+ uint64_t ptr;
+ uint64_t nativePtr;
+};
+
+struct QueueCallstackAllocFatThread : public QueueCallstackAllocFat
+{
+ uint32_t thread;
+};
+
+struct QueueCallstackSample
+{
+ int64_t time;
+ uint32_t thread;
+};
+
+struct QueueCallstackSampleFat : public QueueCallstackSample
+{
+ uint64_t ptr;
+};
+
+struct QueueCallstackFrameSize
+{
+ uint64_t ptr;
+ uint8_t size;
+};
+
+struct QueueCallstackFrameSizeFat : public QueueCallstackFrameSize
+{
+ uint64_t data;
+ uint64_t imageName;
+};
+
+struct QueueCallstackFrame
+{
+ uint32_t line;
+ uint64_t symAddr;
+ uint32_t symLen;
+};
+
+struct QueueSymbolInformation
+{
+ uint32_t line;
+ uint64_t symAddr;
+};
+
+struct QueueSymbolInformationFat : public QueueSymbolInformation
+{
+ uint64_t fileString;
+ uint8_t needFree;
+};
+
+struct QueueCodeInformation
+{
+ uint64_t symAddr;
+ uint32_t line;
+ uint64_t ptrOffset;
+};
+
+struct QueueCodeInformationFat : public QueueCodeInformation
+{
+ uint64_t fileString;
+ uint8_t needFree;
+};
+
+struct QueueCrashReport
+{
+ int64_t time;
+ uint64_t text; // ptr
+};
+
+struct QueueCrashReportThread
+{
+ uint32_t thread;
+};
+
+struct QueueSysTime
+{
+ int64_t time;
+ float sysTime;
+};
+
+struct QueueContextSwitch
+{
+ int64_t time;
+ uint32_t oldThread;
+ uint32_t newThread;
+ uint8_t cpu;
+ uint8_t reason;
+ uint8_t state;
+};
+
+struct QueueThreadWakeup
+{
+ int64_t time;
+ uint32_t thread;
+};
+
+struct QueueTidToPid
+{
+ uint64_t tid;
+ uint64_t pid;
+};
+
+struct QueueHwSample
+{
+ uint64_t ip;
+ int64_t time;
+};
+
+enum class PlotFormatType : uint8_t
+{
+ Number,
+ Memory,
+ Percentage
+};
+
+struct QueuePlotConfig
+{
+ uint64_t name; // ptr
+ uint8_t type;
+};
+
+struct QueueParamSetup
+{
+ uint32_t idx;
+ uint64_t name; // ptr
+ uint8_t isBool;
+ int32_t val;
+};
+
+struct QueueCpuTopology
+{
+ uint32_t package;
+ uint32_t core;
+ uint32_t thread;
+};
+
+struct QueueExternalNameMetadata
+{
+ uint64_t thread;
+ uint64_t name;
+ uint64_t threadName;
+};
+
+struct QueueSymbolCodeMetadata
+{
+ uint64_t symbol;
+ uint64_t ptr;
+ uint32_t size;
+};
+
+struct QueueHeader
+{
+ union
+ {
+ QueueType type;
+ uint8_t idx;
+ };
+};
+
+struct QueueItem
+{
+ QueueHeader hdr;
+ union
+ {
+ QueueThreadContext threadCtx;
+ QueueZoneBegin zoneBegin;
+ QueueZoneBeginLean zoneBeginLean;
+ QueueZoneBeginThread zoneBeginThread;
+ QueueZoneEnd zoneEnd;
+ QueueZoneEndThread zoneEndThread;
+ QueueZoneValidation zoneValidation;
+ QueueZoneValidationThread zoneValidationThread;
+ QueueZoneColor zoneColor;
+ QueueZoneColorThread zoneColorThread;
+ QueueZoneValue zoneValue;
+ QueueZoneValueThread zoneValueThread;
+ QueueStringTransfer stringTransfer;
+ QueueFrameMark frameMark;
+ QueueFrameImage frameImage;
+ QueueFrameImageFat frameImageFat;
+ QueueSourceLocation srcloc;
+ QueueZoneTextFat zoneTextFat;
+ QueueZoneTextFatThread zoneTextFatThread;
+ QueueLockAnnounce lockAnnounce;
+ QueueLockTerminate lockTerminate;
+ QueueLockWait lockWait;
+ QueueLockObtain lockObtain;
+ QueueLockRelease lockRelease;
+ QueueLockMark lockMark;
+ QueueLockName lockName;
+ QueueLockNameFat lockNameFat;
+ QueuePlotData plotData;
+ QueueMessage message;
+ QueueMessageColor messageColor;
+ QueueMessageLiteral messageLiteral;
+ QueueMessageLiteralThread messageLiteralThread;
+ QueueMessageColorLiteral messageColorLiteral;
+ QueueMessageColorLiteralThread messageColorLiteralThread;
+ QueueMessageFat messageFat;
+ QueueMessageFatThread messageFatThread;
+ QueueMessageColorFat messageColorFat;
+ QueueMessageColorFatThread messageColorFatThread;
+ QueueGpuNewContext gpuNewContext;
+ QueueGpuZoneBegin gpuZoneBegin;
+ QueueGpuZoneBeginLean gpuZoneBeginLean;
+ QueueGpuZoneEnd gpuZoneEnd;
+ QueueGpuTime gpuTime;
+ QueueGpuCalibration gpuCalibration;
+ QueueGpuContextName gpuContextName;
+ QueueGpuContextNameFat gpuContextNameFat;
+ QueueMemAlloc memAlloc;
+ QueueMemFree memFree;
+ QueueMemNamePayload memName;
+ QueueCallstackFat callstackFat;
+ QueueCallstackFatThread callstackFatThread;
+ QueueCallstackAllocFat callstackAllocFat;
+ QueueCallstackAllocFatThread callstackAllocFatThread;
+ QueueCallstackSample callstackSample;
+ QueueCallstackSampleFat callstackSampleFat;
+ QueueCallstackFrameSize callstackFrameSize;
+ QueueCallstackFrameSizeFat callstackFrameSizeFat;
+ QueueCallstackFrame callstackFrame;
+ QueueSymbolInformation symbolInformation;
+ QueueSymbolInformationFat symbolInformationFat;
+ QueueCodeInformation codeInformation;
+ QueueCodeInformationFat codeInformationFat;
+ QueueCrashReport crashReport;
+ QueueCrashReportThread crashReportThread;
+ QueueSysTime sysTime;
+ QueueContextSwitch contextSwitch;
+ QueueThreadWakeup threadWakeup;
+ QueueTidToPid tidToPid;
+ QueueHwSample hwSample;
+ QueuePlotConfig plotConfig;
+ QueueParamSetup paramSetup;
+ QueueCpuTopology cpuTopology;
+ QueueExternalNameMetadata externalNameMetadata;
+ QueueSymbolCodeMetadata symbolCodeMetadata;
+ QueueFiberEnter fiberEnter;
+ QueueFiberLeave fiberLeave;
+ };
+};
+#pragma pack()
+
+
+enum { QueueItemSize = sizeof( QueueItem ) };
+
+static constexpr size_t QueueDataSize[] = {
+ sizeof( QueueHeader ), // zone text
+ sizeof( QueueHeader ), // zone name
+ sizeof( QueueHeader ) + sizeof( QueueMessage ),
+ sizeof( QueueHeader ) + sizeof( QueueMessageColor ),
+ sizeof( QueueHeader ) + sizeof( QueueMessage ), // callstack
+ sizeof( QueueHeader ) + sizeof( QueueMessageColor ), // callstack
+ sizeof( QueueHeader ) + sizeof( QueueMessage ), // app info
+ sizeof( QueueHeader ) + sizeof( QueueZoneBeginLean ), // allocated source location
+ sizeof( QueueHeader ) + sizeof( QueueZoneBeginLean ), // allocated source location, callstack
+ sizeof( QueueHeader ), // callstack memory
+ sizeof( QueueHeader ), // callstack
+ sizeof( QueueHeader ), // callstack alloc
+ sizeof( QueueHeader ) + sizeof( QueueCallstackSample ),
+ sizeof( QueueHeader ) + sizeof( QueueCallstackSample ), // context switch
+ sizeof( QueueHeader ) + sizeof( QueueFrameImage ),
+ sizeof( QueueHeader ) + sizeof( QueueZoneBegin ),
+ sizeof( QueueHeader ) + sizeof( QueueZoneBegin ), // callstack
+ sizeof( QueueHeader ) + sizeof( QueueZoneEnd ),
+ sizeof( QueueHeader ) + sizeof( QueueLockWait ),
+ sizeof( QueueHeader ) + sizeof( QueueLockObtain ),
+ sizeof( QueueHeader ) + sizeof( QueueLockRelease ),
+ sizeof( QueueHeader ) + sizeof( QueueLockWait ), // shared
+ sizeof( QueueHeader ) + sizeof( QueueLockObtain ), // shared
+ sizeof( QueueHeader ) + sizeof( QueueLockRelease ), // shared
+ sizeof( QueueHeader ) + sizeof( QueueLockName ),
+ sizeof( QueueHeader ) + sizeof( QueueMemAlloc ),
+ sizeof( QueueHeader ) + sizeof( QueueMemAlloc ), // named
+ sizeof( QueueHeader ) + sizeof( QueueMemFree ),
+ sizeof( QueueHeader ) + sizeof( QueueMemFree ), // named
+ sizeof( QueueHeader ) + sizeof( QueueMemAlloc ), // callstack
+ sizeof( QueueHeader ) + sizeof( QueueMemAlloc ), // callstack, named
+ sizeof( QueueHeader ) + sizeof( QueueMemFree ), // callstack
+ sizeof( QueueHeader ) + sizeof( QueueMemFree ), // callstack, named
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneBegin ),
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneBegin ), // callstack
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneBeginLean ),// allocated source location
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneBeginLean ),// allocated source location, callstack
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneEnd ),
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneBegin ), // serial
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneBegin ), // serial, callstack
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneBeginLean ),// serial, allocated source location
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneBeginLean ),// serial, allocated source location, callstack
+ sizeof( QueueHeader ) + sizeof( QueueGpuZoneEnd ), // serial
+ sizeof( QueueHeader ) + sizeof( QueuePlotData ),
+ sizeof( QueueHeader ) + sizeof( QueueContextSwitch ),
+ sizeof( QueueHeader ) + sizeof( QueueThreadWakeup ),
+ sizeof( QueueHeader ) + sizeof( QueueGpuTime ),
+ sizeof( QueueHeader ) + sizeof( QueueGpuContextName ),
+ sizeof( QueueHeader ) + sizeof( QueueCallstackFrameSize ),
+ sizeof( QueueHeader ) + sizeof( QueueSymbolInformation ),
+ sizeof( QueueHeader ) + sizeof( QueueCodeInformation ),
+ sizeof( QueueHeader ), // ExternalNameMetadata - not for wire transfer
+ sizeof( QueueHeader ), // SymbolCodeMetadata - not for wire transfer
+ sizeof( QueueHeader ) + sizeof( QueueFiberEnter ),
+ sizeof( QueueHeader ) + sizeof( QueueFiberLeave ),
+ // above items must be first
+ sizeof( QueueHeader ), // terminate
+ sizeof( QueueHeader ), // keep alive
+ sizeof( QueueHeader ) + sizeof( QueueThreadContext ),
+ sizeof( QueueHeader ) + sizeof( QueueGpuCalibration ),
+ sizeof( QueueHeader ), // crash
+ sizeof( QueueHeader ) + sizeof( QueueCrashReport ),
+ sizeof( QueueHeader ) + sizeof( QueueZoneValidation ),
+ sizeof( QueueHeader ) + sizeof( QueueZoneColor ),
+ sizeof( QueueHeader ) + sizeof( QueueZoneValue ),
+ sizeof( QueueHeader ) + sizeof( QueueFrameMark ), // continuous frames
+ sizeof( QueueHeader ) + sizeof( QueueFrameMark ), // start
+ sizeof( QueueHeader ) + sizeof( QueueFrameMark ), // end
+ sizeof( QueueHeader ) + sizeof( QueueSourceLocation ),
+ sizeof( QueueHeader ) + sizeof( QueueLockAnnounce ),
+ sizeof( QueueHeader ) + sizeof( QueueLockTerminate ),
+ sizeof( QueueHeader ) + sizeof( QueueLockMark ),
+ sizeof( QueueHeader ) + sizeof( QueueMessageLiteral ),
+ sizeof( QueueHeader ) + sizeof( QueueMessageColorLiteral ),
+ sizeof( QueueHeader ) + sizeof( QueueMessageLiteral ), // callstack
+ sizeof( QueueHeader ) + sizeof( QueueMessageColorLiteral ), // callstack
+ sizeof( QueueHeader ) + sizeof( QueueGpuNewContext ),
+ sizeof( QueueHeader ) + sizeof( QueueCallstackFrame ),
+ sizeof( QueueHeader ) + sizeof( QueueSysTime ),
+ sizeof( QueueHeader ) + sizeof( QueueTidToPid ),
+ sizeof( QueueHeader ) + sizeof( QueueHwSample ), // cpu cycle
+ sizeof( QueueHeader ) + sizeof( QueueHwSample ), // instruction retired
+ sizeof( QueueHeader ) + sizeof( QueueHwSample ), // cache reference
+ sizeof( QueueHeader ) + sizeof( QueueHwSample ), // cache miss
+ sizeof( QueueHeader ) + sizeof( QueueHwSample ), // branch retired
+ sizeof( QueueHeader ) + sizeof( QueueHwSample ), // branch miss
+ sizeof( QueueHeader ) + sizeof( QueuePlotConfig ),
+ sizeof( QueueHeader ) + sizeof( QueueParamSetup ),
+ sizeof( QueueHeader ), // server query acknowledgement
+ sizeof( QueueHeader ), // source code not available
+ sizeof( QueueHeader ), // symbol code not available
+ sizeof( QueueHeader ) + sizeof( QueueCpuTopology ),
+ sizeof( QueueHeader ), // single string data
+ sizeof( QueueHeader ), // second string data
+ sizeof( QueueHeader ) + sizeof( QueueMemNamePayload ),
+ // keep all QueueStringTransfer below
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // string data
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // thread name
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // plot name
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // allocated source location payload
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // callstack payload
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // callstack alloc payload
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // frame name
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // frame image data
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // external name
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // external thread name
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // symbol code
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // source code
+ sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // fiber name
+};
+
+static_assert( QueueItemSize == 32, "Queue item size not 32 bytes" );
+static_assert( sizeof( QueueDataSize ) / sizeof( size_t ) == (uint8_t)QueueType::NUM_TYPES, "QueueDataSize mismatch" );
+static_assert( sizeof( void* ) <= sizeof( uint64_t ), "Pointer size > 8 bytes" );
+static_assert( sizeof( void* ) == sizeof( uintptr_t ), "Pointer size != uintptr_t" );
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracySocket.cpp b/3rdparty/tracy/tracy/common/TracySocket.cpp
new file mode 100644
index 0000000..176bbc7
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracySocket.cpp
@@ -0,0 +1,749 @@
+#include <assert.h>
+#include <inttypes.h>
+#include <new>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include "TracyAlloc.hpp"
+#include "TracySocket.hpp"
+#include "TracySystem.hpp"
+
+#ifdef _WIN32
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+# include <winsock2.h>
+# include <ws2tcpip.h>
+# ifdef _MSC_VER
+# pragma warning(disable:4244)
+# pragma warning(disable:4267)
+# endif
+# define poll WSAPoll
+#else
+# include <arpa/inet.h>
+# include <sys/socket.h>
+# include <sys/param.h>
+# include <errno.h>
+# include <fcntl.h>
+# include <netinet/in.h>
+# include <netdb.h>
+# include <unistd.h>
+# include <poll.h>
+#endif
+
+#ifndef MSG_NOSIGNAL
+# define MSG_NOSIGNAL 0
+#endif
+
+namespace tracy
+{
+
+#ifdef _WIN32
+typedef SOCKET socket_t;
+#else
+typedef int socket_t;
+#endif
+
+#ifdef _WIN32
+struct __wsinit
+{
+ __wsinit()
+ {
+ WSADATA wsaData;
+ if( WSAStartup( MAKEWORD( 2, 2 ), &wsaData ) != 0 )
+ {
+ fprintf( stderr, "Cannot init winsock.\n" );
+ exit( 1 );
+ }
+ }
+};
+
+void InitWinSock()
+{
+ static __wsinit init;
+}
+#endif
+
+
+enum { BufSize = 128 * 1024 };
+
+Socket::Socket()
+ : m_buf( (char*)tracy_malloc( BufSize ) )
+ , m_bufPtr( nullptr )
+ , m_sock( -1 )
+ , m_bufLeft( 0 )
+ , m_ptr( nullptr )
+{
+#ifdef _WIN32
+ InitWinSock();
+#endif
+}
+
+Socket::Socket( int sock )
+ : m_buf( (char*)tracy_malloc( BufSize ) )
+ , m_bufPtr( nullptr )
+ , m_sock( sock )
+ , m_bufLeft( 0 )
+ , m_ptr( nullptr )
+{
+}
+
+Socket::~Socket()
+{
+ tracy_free( m_buf );
+ if( m_sock.load( std::memory_order_relaxed ) != -1 )
+ {
+ Close();
+ }
+ if( m_ptr )
+ {
+ freeaddrinfo( m_res );
+#ifdef _WIN32
+ closesocket( m_connSock );
+#else
+ close( m_connSock );
+#endif
+ }
+}
+
+bool Socket::Connect( const char* addr, uint16_t port )
+{
+ assert( !IsValid() );
+
+ if( m_ptr )
+ {
+ const auto c = connect( m_connSock, m_ptr->ai_addr, m_ptr->ai_addrlen );
+ if( c == -1 )
+ {
+#if defined _WIN32
+ const auto err = WSAGetLastError();
+ if( err == WSAEALREADY || err == WSAEINPROGRESS ) return false;
+ if( err != WSAEISCONN )
+ {
+ freeaddrinfo( m_res );
+ closesocket( m_connSock );
+ m_ptr = nullptr;
+ return false;
+ }
+#else
+ const auto err = errno;
+ if( err == EALREADY || err == EINPROGRESS ) return false;
+ if( err != EISCONN )
+ {
+ freeaddrinfo( m_res );
+ close( m_connSock );
+ m_ptr = nullptr;
+ return false;
+ }
+#endif
+ }
+
+#if defined _WIN32
+ u_long nonblocking = 0;
+ ioctlsocket( m_connSock, FIONBIO, &nonblocking );
+#else
+ int flags = fcntl( m_connSock, F_GETFL, 0 );
+ fcntl( m_connSock, F_SETFL, flags & ~O_NONBLOCK );
+#endif
+ m_sock.store( m_connSock, std::memory_order_relaxed );
+ freeaddrinfo( m_res );
+ m_ptr = nullptr;
+ return true;
+ }
+
+ struct addrinfo hints;
+ struct addrinfo *res, *ptr;
+
+ memset( &hints, 0, sizeof( hints ) );
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+
+ char portbuf[32];
+ sprintf( portbuf, "%" PRIu16, port );
+
+ if( getaddrinfo( addr, portbuf, &hints, &res ) != 0 ) return false;
+ int sock = 0;
+ for( ptr = res; ptr; ptr = ptr->ai_next )
+ {
+ if( ( sock = socket( ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol ) ) == -1 ) continue;
+#if defined __APPLE__
+ int val = 1;
+ setsockopt( sock, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof( val ) );
+#endif
+#if defined _WIN32
+ u_long nonblocking = 1;
+ ioctlsocket( sock, FIONBIO, &nonblocking );
+#else
+ int flags = fcntl( sock, F_GETFL, 0 );
+ fcntl( sock, F_SETFL, flags | O_NONBLOCK );
+#endif
+ if( connect( sock, ptr->ai_addr, ptr->ai_addrlen ) == 0 )
+ {
+ break;
+ }
+ else
+ {
+#if defined _WIN32
+ const auto err = WSAGetLastError();
+ if( err != WSAEWOULDBLOCK )
+ {
+ closesocket( sock );
+ continue;
+ }
+#else
+ if( errno != EINPROGRESS )
+ {
+ close( sock );
+ continue;
+ }
+#endif
+ }
+ m_res = res;
+ m_ptr = ptr;
+ m_connSock = sock;
+ return false;
+ }
+ freeaddrinfo( res );
+ if( !ptr ) return false;
+
+#if defined _WIN32
+ u_long nonblocking = 0;
+ ioctlsocket( sock, FIONBIO, &nonblocking );
+#else
+ int flags = fcntl( sock, F_GETFL, 0 );
+ fcntl( sock, F_SETFL, flags & ~O_NONBLOCK );
+#endif
+
+ m_sock.store( sock, std::memory_order_relaxed );
+ return true;
+}
+
+bool Socket::ConnectBlocking( const char* addr, uint16_t port )
+{
+ assert( !IsValid() );
+ assert( !m_ptr );
+
+ struct addrinfo hints;
+ struct addrinfo *res, *ptr;
+
+ memset( &hints, 0, sizeof( hints ) );
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+
+ char portbuf[32];
+ sprintf( portbuf, "%" PRIu16, port );
+
+ if( getaddrinfo( addr, portbuf, &hints, &res ) != 0 ) return false;
+ int sock = 0;
+ for( ptr = res; ptr; ptr = ptr->ai_next )
+ {
+ if( ( sock = socket( ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol ) ) == -1 ) continue;
+#if defined __APPLE__
+ int val = 1;
+ setsockopt( sock, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof( val ) );
+#endif
+ if( connect( sock, ptr->ai_addr, ptr->ai_addrlen ) == -1 )
+ {
+#ifdef _WIN32
+ closesocket( sock );
+#else
+ close( sock );
+#endif
+ continue;
+ }
+ break;
+ }
+ freeaddrinfo( res );
+ if( !ptr ) return false;
+
+ m_sock.store( sock, std::memory_order_relaxed );
+ return true;
+}
+
+void Socket::Close()
+{
+ const auto sock = m_sock.load( std::memory_order_relaxed );
+ assert( sock != -1 );
+#ifdef _WIN32
+ closesocket( sock );
+#else
+ close( sock );
+#endif
+ m_sock.store( -1, std::memory_order_relaxed );
+}
+
+int Socket::Send( const void* _buf, int len )
+{
+ const auto sock = m_sock.load( std::memory_order_relaxed );
+ auto buf = (const char*)_buf;
+ assert( sock != -1 );
+ auto start = buf;
+ while( len > 0 )
+ {
+ auto ret = send( sock, buf, len, MSG_NOSIGNAL );
+ if( ret == -1 ) return -1;
+ len -= ret;
+ buf += ret;
+ }
+ return int( buf - start );
+}
+
+int Socket::GetSendBufSize()
+{
+ const auto sock = m_sock.load( std::memory_order_relaxed );
+ int bufSize;
+#if defined _WIN32
+ int sz = sizeof( bufSize );
+ getsockopt( sock, SOL_SOCKET, SO_SNDBUF, (char*)&bufSize, &sz );
+#else
+ socklen_t sz = sizeof( bufSize );
+ getsockopt( sock, SOL_SOCKET, SO_SNDBUF, &bufSize, &sz );
+#endif
+ return bufSize;
+}
+
+int Socket::RecvBuffered( void* buf, int len, int timeout )
+{
+ if( len <= m_bufLeft )
+ {
+ memcpy( buf, m_bufPtr, len );
+ m_bufPtr += len;
+ m_bufLeft -= len;
+ return len;
+ }
+
+ if( m_bufLeft > 0 )
+ {
+ memcpy( buf, m_bufPtr, m_bufLeft );
+ const auto ret = m_bufLeft;
+ m_bufLeft = 0;
+ return ret;
+ }
+
+ if( len >= BufSize ) return Recv( buf, len, timeout );
+
+ m_bufLeft = Recv( m_buf, BufSize, timeout );
+ if( m_bufLeft <= 0 ) return m_bufLeft;
+
+ const auto sz = len < m_bufLeft ? len : m_bufLeft;
+ memcpy( buf, m_buf, sz );
+ m_bufPtr = m_buf + sz;
+ m_bufLeft -= sz;
+ return sz;
+}
+
+int Socket::Recv( void* _buf, int len, int timeout )
+{
+ const auto sock = m_sock.load( std::memory_order_relaxed );
+ auto buf = (char*)_buf;
+
+ struct pollfd fd;
+ fd.fd = (socket_t)sock;
+ fd.events = POLLIN;
+
+ if( poll( &fd, 1, timeout ) > 0 )
+ {
+ return recv( sock, buf, len, 0 );
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+int Socket::ReadUpTo( void* _buf, int len, int timeout )
+{
+ const auto sock = m_sock.load( std::memory_order_relaxed );
+ auto buf = (char*)_buf;
+
+ int rd = 0;
+ while( len > 0 )
+ {
+ const auto res = recv( sock, buf, len, 0 );
+ if( res == 0 ) break;
+ if( res == -1 ) return -1;
+ len -= res;
+ rd += res;
+ buf += res;
+ }
+ return rd;
+}
+
+bool Socket::Read( void* buf, int len, int timeout )
+{
+ auto cbuf = (char*)buf;
+ while( len > 0 )
+ {
+ if( !ReadImpl( cbuf, len, timeout ) ) return false;
+ }
+ return true;
+}
+
+bool Socket::ReadImpl( char*& buf, int& len, int timeout )
+{
+ const auto sz = RecvBuffered( buf, len, timeout );
+ switch( sz )
+ {
+ case 0:
+ return false;
+ case -1:
+#ifdef _WIN32
+ {
+ auto err = WSAGetLastError();
+ if( err == WSAECONNABORTED || err == WSAECONNRESET ) return false;
+ }
+#endif
+ break;
+ default:
+ len -= sz;
+ buf += sz;
+ break;
+ }
+ return true;
+}
+
+bool Socket::ReadRaw( void* _buf, int len, int timeout )
+{
+ auto buf = (char*)_buf;
+ while( len > 0 )
+ {
+ const auto sz = Recv( buf, len, timeout );
+ if( sz <= 0 ) return false;
+ len -= sz;
+ buf += sz;
+ }
+ return true;
+}
+
+bool Socket::HasData()
+{
+ const auto sock = m_sock.load( std::memory_order_relaxed );
+ if( m_bufLeft > 0 ) return true;
+
+ struct pollfd fd;
+ fd.fd = (socket_t)sock;
+ fd.events = POLLIN;
+
+ return poll( &fd, 1, 0 ) > 0;
+}
+
+bool Socket::IsValid() const
+{
+ return m_sock.load( std::memory_order_relaxed ) >= 0;
+}
+
+
+ListenSocket::ListenSocket()
+ : m_sock( -1 )
+{
+#ifdef _WIN32
+ InitWinSock();
+#endif
+}
+
+ListenSocket::~ListenSocket()
+{
+ if( m_sock != -1 ) Close();
+}
+
+static int addrinfo_and_socket_for_family( uint16_t port, int ai_family, struct addrinfo** res )
+{
+ struct addrinfo hints;
+ memset( &hints, 0, sizeof( hints ) );
+ hints.ai_family = ai_family;
+ hints.ai_socktype = SOCK_STREAM;
+#ifndef TRACY_ONLY_LOCALHOST
+ const char* onlyLocalhost = GetEnvVar( "TRACY_ONLY_LOCALHOST" );
+ if( !onlyLocalhost || onlyLocalhost[0] != '1' )
+ {
+ hints.ai_flags = AI_PASSIVE;
+ }
+#endif
+ char portbuf[32];
+ sprintf( portbuf, "%" PRIu16, port );
+ if( getaddrinfo( nullptr, portbuf, &hints, res ) != 0 ) return -1;
+ int sock = socket( (*res)->ai_family, (*res)->ai_socktype, (*res)->ai_protocol );
+ if (sock == -1) freeaddrinfo( *res );
+ return sock;
+}
+
+bool ListenSocket::Listen( uint16_t port, int backlog )
+{
+ assert( m_sock == -1 );
+
+ struct addrinfo* res = nullptr;
+
+#if !defined TRACY_ONLY_IPV4 && !defined TRACY_ONLY_LOCALHOST
+ const char* onlyIPv4 = GetEnvVar( "TRACY_ONLY_IPV4" );
+ if( !onlyIPv4 || onlyIPv4[0] != '1' )
+ {
+ m_sock = addrinfo_and_socket_for_family( port, AF_INET6, &res );
+ }
+#endif
+ if (m_sock == -1)
+ {
+ // IPV6 protocol may not be available/is disabled. Try to create a socket
+ // with the IPV4 protocol
+ m_sock = addrinfo_and_socket_for_family( port, AF_INET, &res );
+ if( m_sock == -1 ) return false;
+ }
+#if defined _WIN32
+ unsigned long val = 0;
+ setsockopt( m_sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&val, sizeof( val ) );
+#elif defined BSD
+ int val = 0;
+ setsockopt( m_sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&val, sizeof( val ) );
+ val = 1;
+ setsockopt( m_sock, SOL_SOCKET, SO_REUSEADDR, &val, sizeof( val ) );
+#else
+ int val = 1;
+ setsockopt( m_sock, SOL_SOCKET, SO_REUSEADDR, &val, sizeof( val ) );
+#endif
+ if( bind( m_sock, res->ai_addr, res->ai_addrlen ) == -1 ) { freeaddrinfo( res ); Close(); return false; }
+ if( listen( m_sock, backlog ) == -1 ) { freeaddrinfo( res ); Close(); return false; }
+ freeaddrinfo( res );
+ return true;
+}
+
+Socket* ListenSocket::Accept()
+{
+ struct sockaddr_storage remote;
+ socklen_t sz = sizeof( remote );
+
+ struct pollfd fd;
+ fd.fd = (socket_t)m_sock;
+ fd.events = POLLIN;
+
+ if( poll( &fd, 1, 10 ) > 0 )
+ {
+ int sock = accept( m_sock, (sockaddr*)&remote, &sz);
+ if( sock == -1 ) return nullptr;
+
+#if defined __APPLE__
+ int val = 1;
+ setsockopt( sock, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof( val ) );
+#endif
+
+ auto ptr = (Socket*)tracy_malloc( sizeof( Socket ) );
+ new(ptr) Socket( sock );
+ return ptr;
+ }
+ else
+ {
+ return nullptr;
+ }
+}
+
+void ListenSocket::Close()
+{
+ assert( m_sock != -1 );
+#ifdef _WIN32
+ closesocket( m_sock );
+#else
+ close( m_sock );
+#endif
+ m_sock = -1;
+}
+
+UdpBroadcast::UdpBroadcast()
+ : m_sock( -1 )
+{
+#ifdef _WIN32
+ InitWinSock();
+#endif
+}
+
+UdpBroadcast::~UdpBroadcast()
+{
+ if( m_sock != -1 ) Close();
+}
+
+bool UdpBroadcast::Open( const char* addr, uint16_t port )
+{
+ assert( m_sock == -1 );
+
+ struct addrinfo hints;
+ struct addrinfo *res, *ptr;
+
+ memset( &hints, 0, sizeof( hints ) );
+ hints.ai_family = AF_INET;
+ hints.ai_socktype = SOCK_DGRAM;
+
+ char portbuf[32];
+ sprintf( portbuf, "%" PRIu16, port );
+
+ if( getaddrinfo( addr, portbuf, &hints, &res ) != 0 ) return false;
+ int sock = 0;
+ for( ptr = res; ptr; ptr = ptr->ai_next )
+ {
+ if( ( sock = socket( ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol ) ) == -1 ) continue;
+#if defined __APPLE__
+ int val = 1;
+ setsockopt( sock, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof( val ) );
+#endif
+#if defined _WIN32
+ unsigned long broadcast = 1;
+ if( setsockopt( sock, SOL_SOCKET, SO_BROADCAST, (const char*)&broadcast, sizeof( broadcast ) ) == -1 )
+#else
+ int broadcast = 1;
+ if( setsockopt( sock, SOL_SOCKET, SO_BROADCAST, &broadcast, sizeof( broadcast ) ) == -1 )
+#endif
+ {
+#ifdef _WIN32
+ closesocket( sock );
+#else
+ close( sock );
+#endif
+ continue;
+ }
+ break;
+ }
+ freeaddrinfo( res );
+ if( !ptr ) return false;
+
+ m_sock = sock;
+ inet_pton( AF_INET, addr, &m_addr );
+ return true;
+}
+
+void UdpBroadcast::Close()
+{
+ assert( m_sock != -1 );
+#ifdef _WIN32
+ closesocket( m_sock );
+#else
+ close( m_sock );
+#endif
+ m_sock = -1;
+}
+
+int UdpBroadcast::Send( uint16_t port, const void* data, int len )
+{
+ assert( m_sock != -1 );
+ struct sockaddr_in addr;
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons( port );
+ addr.sin_addr.s_addr = m_addr;
+ return sendto( m_sock, (const char*)data, len, MSG_NOSIGNAL, (sockaddr*)&addr, sizeof( addr ) );
+}
+
+IpAddress::IpAddress()
+ : m_number( 0 )
+{
+ *m_text = '\0';
+}
+
+IpAddress::~IpAddress()
+{
+}
+
+void IpAddress::Set( const struct sockaddr& addr )
+{
+#if defined _WIN32 && ( !defined NTDDI_WIN10 || NTDDI_VERSION < NTDDI_WIN10 )
+ struct sockaddr_in tmp;
+ memcpy( &tmp, &addr, sizeof( tmp ) );
+ auto ai = &tmp;
+#else
+ auto ai = (const struct sockaddr_in*)&addr;
+#endif
+ inet_ntop( AF_INET, &ai->sin_addr, m_text, 17 );
+ m_number = ai->sin_addr.s_addr;
+}
+
+UdpListen::UdpListen()
+ : m_sock( -1 )
+{
+#ifdef _WIN32
+ InitWinSock();
+#endif
+}
+
+UdpListen::~UdpListen()
+{
+ if( m_sock != -1 ) Close();
+}
+
+bool UdpListen::Listen( uint16_t port )
+{
+ assert( m_sock == -1 );
+
+ int sock;
+ if( ( sock = socket( AF_INET, SOCK_DGRAM, 0 ) ) == -1 ) return false;
+
+#if defined __APPLE__
+ int val = 1;
+ setsockopt( sock, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof( val ) );
+#endif
+#if defined _WIN32
+ unsigned long reuse = 1;
+ setsockopt( m_sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof( reuse ) );
+#else
+ int reuse = 1;
+ setsockopt( m_sock, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof( reuse ) );
+#endif
+#if defined _WIN32
+ unsigned long broadcast = 1;
+ if( setsockopt( sock, SOL_SOCKET, SO_BROADCAST, (const char*)&broadcast, sizeof( broadcast ) ) == -1 )
+#else
+ int broadcast = 1;
+ if( setsockopt( sock, SOL_SOCKET, SO_BROADCAST, &broadcast, sizeof( broadcast ) ) == -1 )
+#endif
+ {
+#ifdef _WIN32
+ closesocket( sock );
+#else
+ close( sock );
+#endif
+ return false;
+ }
+
+ struct sockaddr_in addr;
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons( port );
+ addr.sin_addr.s_addr = INADDR_ANY;
+
+ if( bind( sock, (sockaddr*)&addr, sizeof( addr ) ) == -1 )
+ {
+#ifdef _WIN32
+ closesocket( sock );
+#else
+ close( sock );
+#endif
+ return false;
+ }
+
+ m_sock = sock;
+ return true;
+}
+
+void UdpListen::Close()
+{
+ assert( m_sock != -1 );
+#ifdef _WIN32
+ closesocket( m_sock );
+#else
+ close( m_sock );
+#endif
+ m_sock = -1;
+}
+
+const char* UdpListen::Read( size_t& len, IpAddress& addr, int timeout )
+{
+ static char buf[2048];
+
+ struct pollfd fd;
+ fd.fd = (socket_t)m_sock;
+ fd.events = POLLIN;
+ if( poll( &fd, 1, timeout ) <= 0 ) return nullptr;
+
+ sockaddr sa;
+ socklen_t salen = sizeof( struct sockaddr );
+ len = (size_t)recvfrom( m_sock, buf, 2048, 0, &sa, &salen );
+ addr.Set( sa );
+
+ return buf;
+}
+
+}
diff --git a/3rdparty/tracy/tracy/common/TracySocket.hpp b/3rdparty/tracy/tracy/common/TracySocket.hpp
new file mode 100644
index 0000000..4de4cca
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracySocket.hpp
@@ -0,0 +1,156 @@
+#ifndef __TRACYSOCKET_HPP__
+#define __TRACYSOCKET_HPP__
+
+#include <atomic>
+#include <stdint.h>
+
+#include "TracyForceInline.hpp"
+
+struct addrinfo;
+struct sockaddr;
+
+namespace tracy
+{
+
+#ifdef _WIN32
+void InitWinSock();
+#endif
+
+class Socket
+{
+public:
+ Socket();
+ Socket( int sock );
+ ~Socket();
+
+ bool Connect( const char* addr, uint16_t port );
+ bool ConnectBlocking( const char* addr, uint16_t port );
+ void Close();
+
+ int Send( const void* buf, int len );
+ int GetSendBufSize();
+
+ int ReadUpTo( void* buf, int len, int timeout );
+ bool Read( void* buf, int len, int timeout );
+
+ template<typename ShouldExit>
+ bool Read( void* buf, int len, int timeout, ShouldExit exitCb )
+ {
+ auto cbuf = (char*)buf;
+ while( len > 0 )
+ {
+ if( exitCb() ) return false;
+ if( !ReadImpl( cbuf, len, timeout ) ) return false;
+ }
+ return true;
+ }
+
+ bool ReadRaw( void* buf, int len, int timeout );
+ bool HasData();
+ bool IsValid() const;
+
+ Socket( const Socket& ) = delete;
+ Socket( Socket&& ) = delete;
+ Socket& operator=( const Socket& ) = delete;
+ Socket& operator=( Socket&& ) = delete;
+
+private:
+ int RecvBuffered( void* buf, int len, int timeout );
+ int Recv( void* buf, int len, int timeout );
+
+ bool ReadImpl( char*& buf, int& len, int timeout );
+
+ char* m_buf;
+ char* m_bufPtr;
+ std::atomic<int> m_sock;
+ int m_bufLeft;
+
+ struct addrinfo *m_res;
+ struct addrinfo *m_ptr;
+ int m_connSock;
+};
+
+class ListenSocket
+{
+public:
+ ListenSocket();
+ ~ListenSocket();
+
+ bool Listen( uint16_t port, int backlog );
+ Socket* Accept();
+ void Close();
+
+ ListenSocket( const ListenSocket& ) = delete;
+ ListenSocket( ListenSocket&& ) = delete;
+ ListenSocket& operator=( const ListenSocket& ) = delete;
+ ListenSocket& operator=( ListenSocket&& ) = delete;
+
+private:
+ int m_sock;
+};
+
+class UdpBroadcast
+{
+public:
+ UdpBroadcast();
+ ~UdpBroadcast();
+
+ bool Open( const char* addr, uint16_t port );
+ void Close();
+
+ int Send( uint16_t port, const void* data, int len );
+
+ UdpBroadcast( const UdpBroadcast& ) = delete;
+ UdpBroadcast( UdpBroadcast&& ) = delete;
+ UdpBroadcast& operator=( const UdpBroadcast& ) = delete;
+ UdpBroadcast& operator=( UdpBroadcast&& ) = delete;
+
+private:
+ int m_sock;
+ uint32_t m_addr;
+};
+
+class IpAddress
+{
+public:
+ IpAddress();
+ ~IpAddress();
+
+ void Set( const struct sockaddr& addr );
+
+ uint32_t GetNumber() const { return m_number; }
+ const char* GetText() const { return m_text; }
+
+ IpAddress( const IpAddress& ) = delete;
+ IpAddress( IpAddress&& ) = delete;
+ IpAddress& operator=( const IpAddress& ) = delete;
+ IpAddress& operator=( IpAddress&& ) = delete;
+
+private:
+ uint32_t m_number;
+ char m_text[17];
+};
+
+class UdpListen
+{
+public:
+ UdpListen();
+ ~UdpListen();
+
+ bool Listen( uint16_t port );
+ void Close();
+
+ const char* Read( size_t& len, IpAddress& addr, int timeout );
+
+ UdpListen( const UdpListen& ) = delete;
+ UdpListen( UdpListen&& ) = delete;
+ UdpListen& operator=( const UdpListen& ) = delete;
+ UdpListen& operator=( UdpListen&& ) = delete;
+
+private:
+ int m_sock;
+};
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyStackFrames.cpp b/3rdparty/tracy/tracy/common/TracyStackFrames.cpp
new file mode 100644
index 0000000..7b0abac
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyStackFrames.cpp
@@ -0,0 +1,122 @@
+#include "TracyStackFrames.hpp"
+
+namespace tracy
+{
+
+const char* s_tracyStackFrames_[] = {
+ "tracy::Callstack",
+ "tracy::Callstack(int)",
+ "tracy::GpuCtxScope::{ctor}",
+ "tracy::Profiler::SendCallstack",
+ "tracy::Profiler::SendCallstack(int)",
+ "tracy::Profiler::SendCallstack(int, unsigned long)",
+ "tracy::Profiler::MemAllocCallstack",
+ "tracy::Profiler::MemAllocCallstack(void const*, unsigned long, int)",
+ "tracy::Profiler::MemFreeCallstack",
+ "tracy::Profiler::MemFreeCallstack(void const*, int)",
+ "tracy::ScopedZone::{ctor}",
+ "tracy::ScopedZone::ScopedZone(tracy::SourceLocationData const*, int, bool)",
+ "tracy::Profiler::Message",
+ nullptr
+};
+
+const char** s_tracyStackFrames = s_tracyStackFrames_;
+
+const StringMatch s_tracySkipSubframes_[] = {
+ { "/include/arm_neon.h", 19 },
+ { "/include/adxintrin.h", 20 },
+ { "/include/ammintrin.h", 20 },
+ { "/include/amxbf16intrin.h", 24 },
+ { "/include/amxint8intrin.h", 24 },
+ { "/include/amxtileintrin.h", 24 },
+ { "/include/avx2intrin.h", 21 },
+ { "/include/avx5124fmapsintrin.h", 29 },
+ { "/include/avx5124vnniwintrin.h", 29 },
+ { "/include/avx512bf16intrin.h", 27 },
+ { "/include/avx512bf16vlintrin.h", 29 },
+ { "/include/avx512bitalgintrin.h", 29 },
+ { "/include/avx512bwintrin.h", 25 },
+ { "/include/avx512cdintrin.h", 25 },
+ { "/include/avx512dqintrin.h", 25 },
+ { "/include/avx512erintrin.h", 25 },
+ { "/include/avx512fintrin.h", 24 },
+ { "/include/avx512ifmaintrin.h", 27 },
+ { "/include/avx512ifmavlintrin.h", 29 },
+ { "/include/avx512pfintrin.h", 25 },
+ { "/include/avx512vbmi2intrin.h", 28 },
+ { "/include/avx512vbmi2vlintrin.h", 30 },
+ { "/include/avx512vbmiintrin.h", 27 },
+ { "/include/avx512vbmivlintrin.h", 29 },
+ { "/include/avx512vlbwintrin.h", 27 },
+ { "/include/avx512vldqintrin.h", 27 },
+ { "/include/avx512vlintrin.h", 25 },
+ { "/include/avx512vnniintrin.h", 27 },
+ { "/include/avx512vnnivlintrin.h", 29 },
+ { "/include/avx512vp2intersectintrin.h", 35 },
+ { "/include/avx512vp2intersectvlintrin.h", 37 },
+ { "/include/avx512vpopcntdqintrin.h", 32 },
+ { "/include/avx512vpopcntdqvlintrin.h", 34 },
+ { "/include/avxintrin.h", 20 },
+ { "/include/avxvnniintrin.h", 24 },
+ { "/include/bmi2intrin.h", 21 },
+ { "/include/bmiintrin.h", 20 },
+ { "/include/bmmintrin.h", 20 },
+ { "/include/cetintrin.h", 20 },
+ { "/include/cldemoteintrin.h", 25 },
+ { "/include/clflushoptintrin.h", 27 },
+ { "/include/clwbintrin.h", 21 },
+ { "/include/clzerointrin.h", 23 },
+ { "/include/emmintrin.h", 20 },
+ { "/include/enqcmdintrin.h", 23 },
+ { "/include/f16cintrin.h", 21 },
+ { "/include/fma4intrin.h", 21 },
+ { "/include/fmaintrin.h", 20 },
+ { "/include/fxsrintrin.h", 21 },
+ { "/include/gfniintrin.h", 21 },
+ { "/include/hresetintrin.h", 23 },
+ { "/include/ia32intrin.h", 21 },
+ { "/include/immintrin.h", 20 },
+ { "/include/keylockerintrin.h", 26 },
+ { "/include/lwpintrin.h", 20 },
+ { "/include/lzcntintrin.h", 22 },
+ { "/include/mmintrin.h", 19 },
+ { "/include/movdirintrin.h", 23 },
+ { "/include/mwaitxintrin.h", 23 },
+ { "/include/nmmintrin.h", 20 },
+ { "/include/pconfigintrin.h", 24 },
+ { "/include/pkuintrin.h", 20 },
+ { "/include/pmmintrin.h", 20 },
+ { "/include/popcntintrin.h", 23 },
+ { "/include/prfchwintrin.h", 23 },
+ { "/include/rdseedintrin.h", 23 },
+ { "/include/rtmintrin.h", 20 },
+ { "/include/serializeintrin.h", 26 },
+ { "/include/sgxintrin.h", 20 },
+ { "/include/shaintrin.h", 20 },
+ { "/include/smmintrin.h", 20 },
+ { "/include/tbmintrin.h", 20 },
+ { "/include/tmmintrin.h", 20 },
+ { "/include/tsxldtrkintrin.h", 25 },
+ { "/include/uintrintrin.h", 22 },
+ { "/include/vaesintrin.h", 21 },
+ { "/include/vpclmulqdqintrin.h", 27 },
+ { "/include/waitpkgintrin.h", 24 },
+ { "/include/wbnoinvdintrin.h", 25 },
+ { "/include/wmmintrin.h", 20 },
+ { "/include/x86gprintrin.h", 23 },
+ { "/include/x86intrin.h", 20 },
+ { "/include/xmmintrin.h", 20 },
+ { "/include/xopintrin.h", 20 },
+ { "/include/xsavecintrin.h", 23 },
+ { "/include/xsaveintrin.h", 22 },
+ { "/include/xsaveoptintrin.h", 25 },
+ { "/include/xsavesintrin.h", 23 },
+ { "/include/xtestintrin.h", 22 },
+ { "/bits/atomic_base.h", 19 },
+ { "/atomic", 7 },
+ {}
+};
+
+const StringMatch* s_tracySkipSubframes = s_tracySkipSubframes_;
+
+}
diff --git a/3rdparty/tracy/tracy/common/TracyStackFrames.hpp b/3rdparty/tracy/tracy/common/TracyStackFrames.hpp
new file mode 100644
index 0000000..9d4262c
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyStackFrames.hpp
@@ -0,0 +1,22 @@
+#ifndef __TRACYSTACKFRAMES_HPP__
+#define __TRACYSTACKFRAMES_HPP__
+
+#include <stddef.h>
+
+namespace tracy
+{
+
+struct StringMatch
+{
+ const char* str;
+ size_t len;
+};
+
+extern const char** s_tracyStackFrames;
+extern const StringMatch* s_tracySkipSubframes;
+
+static constexpr int s_tracySkipSubframesMinLen = 7;
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracySystem.cpp b/3rdparty/tracy/tracy/common/TracySystem.cpp
new file mode 100644
index 0000000..1248fde
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracySystem.cpp
@@ -0,0 +1,304 @@
+#ifdef _MSC_VER
+# pragma warning(disable:4996)
+#endif
+#if defined _WIN32
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# endif
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+# include <windows.h>
+# include <malloc.h>
+# include "TracyUwp.hpp"
+#else
+# include <pthread.h>
+# include <string.h>
+# include <unistd.h>
+#endif
+
+#ifdef __linux__
+# ifdef __ANDROID__
+# include <sys/types.h>
+# else
+# include <sys/syscall.h>
+# endif
+# include <fcntl.h>
+#elif defined __FreeBSD__
+# include <sys/thr.h>
+#elif defined __NetBSD__ || defined __DragonFly__
+# include <sys/lwp.h>
+#endif
+
+#ifdef __MINGW32__
+# define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "TracySystem.hpp"
+
+#if defined _WIN32
+extern "C" typedef HRESULT (WINAPI *t_SetThreadDescription)( HANDLE, PCWSTR );
+extern "C" typedef HRESULT (WINAPI *t_GetThreadDescription)( HANDLE, PWSTR* );
+#endif
+
+#ifdef TRACY_ENABLE
+# include <atomic>
+# include "TracyAlloc.hpp"
+#endif
+
+namespace tracy
+{
+
+namespace detail
+{
+
+TRACY_API uint32_t GetThreadHandleImpl()
+{
+#if defined _WIN32
+ static_assert( sizeof( decltype( GetCurrentThreadId() ) ) <= sizeof( uint32_t ), "Thread handle too big to fit in protocol" );
+ return uint32_t( GetCurrentThreadId() );
+#elif defined __APPLE__
+ uint64_t id;
+ pthread_threadid_np( pthread_self(), &id );
+ return uint32_t( id );
+#elif defined __ANDROID__
+ return (uint32_t)gettid();
+#elif defined __linux__
+ return (uint32_t)syscall( SYS_gettid );
+#elif defined __FreeBSD__
+ long id;
+ thr_self( &id );
+ return id;
+#elif defined __NetBSD__
+ return _lwp_self();
+#elif defined __DragonFly__
+ return lwp_gettid();
+#elif defined __OpenBSD__
+ return getthrid();
+#else
+ // To add support for a platform, retrieve and return the kernel thread identifier here.
+ //
+ // Note that pthread_t (as for example returned by pthread_self()) is *not* a kernel
+ // thread identifier. It is a pointer to a library-allocated data structure instead.
+ // Such pointers will be reused heavily, making the pthread_t non-unique. Additionally
+ // a 64-bit pointer cannot be reliably truncated to 32 bits.
+ #error "Unsupported platform!"
+#endif
+
+}
+
+}
+
+#ifdef TRACY_ENABLE
+struct ThreadNameData
+{
+ uint32_t id;
+ const char* name;
+ ThreadNameData* next;
+};
+std::atomic<ThreadNameData*>& GetThreadNameData();
+#endif
+
+#ifdef _MSC_VER
+# pragma pack( push, 8 )
+struct THREADNAME_INFO
+{
+ DWORD dwType;
+ LPCSTR szName;
+ DWORD dwThreadID;
+ DWORD dwFlags;
+};
+# pragma pack(pop)
+
+void ThreadNameMsvcMagic( const THREADNAME_INFO& info )
+{
+ __try
+ {
+ RaiseException( 0x406D1388, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info );
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ }
+}
+#endif
+
+TRACY_API void SetThreadName( const char* name )
+{
+#if defined _WIN32
+# ifdef TRACY_UWP
+ static auto _SetThreadDescription = &::SetThreadDescription;
+# else
+ static auto _SetThreadDescription = (t_SetThreadDescription)GetProcAddress( GetModuleHandleA( "kernel32.dll" ), "SetThreadDescription" );
+# endif
+ if( _SetThreadDescription )
+ {
+ wchar_t buf[256];
+ mbstowcs( buf, name, 256 );
+ _SetThreadDescription( GetCurrentThread(), buf );
+ }
+ else
+ {
+# if defined _MSC_VER
+ THREADNAME_INFO info;
+ info.dwType = 0x1000;
+ info.szName = name;
+ info.dwThreadID = GetCurrentThreadId();
+ info.dwFlags = 0;
+ ThreadNameMsvcMagic( info );
+# endif
+ }
+#elif defined _GNU_SOURCE && !defined __EMSCRIPTEN__
+ {
+ const auto sz = strlen( name );
+ if( sz <= 15 )
+ {
+#if defined __APPLE__
+ pthread_setname_np( name );
+#else
+ pthread_setname_np( pthread_self(), name );
+#endif
+ }
+ else
+ {
+ char buf[16];
+ memcpy( buf, name, 15 );
+ buf[15] = '\0';
+#if defined __APPLE__
+ pthread_setname_np( buf );
+#else
+ pthread_setname_np( pthread_self(), buf );
+#endif
+ }
+ }
+#endif
+#ifdef TRACY_ENABLE
+ {
+ const auto sz = strlen( name );
+ char* buf = (char*)tracy_malloc( sz+1 );
+ memcpy( buf, name, sz );
+ buf[sz] = '\0';
+ auto data = (ThreadNameData*)tracy_malloc_fast( sizeof( ThreadNameData ) );
+ data->id = detail::GetThreadHandleImpl();
+ data->name = buf;
+ data->next = GetThreadNameData().load( std::memory_order_relaxed );
+ while( !GetThreadNameData().compare_exchange_weak( data->next, data, std::memory_order_release, std::memory_order_relaxed ) ) {}
+ }
+#endif
+}
+
+TRACY_API const char* GetThreadName( uint32_t id )
+{
+ static char buf[256];
+#ifdef TRACY_ENABLE
+ auto ptr = GetThreadNameData().load( std::memory_order_relaxed );
+ while( ptr )
+ {
+ if( ptr->id == id )
+ {
+ return ptr->name;
+ }
+ ptr = ptr->next;
+ }
+#else
+# if defined _WIN32
+# ifdef TRACY_UWP
+ static auto _GetThreadDescription = &::GetThreadDescription;
+# else
+ static auto _GetThreadDescription = (t_GetThreadDescription)GetProcAddress( GetModuleHandleA( "kernel32.dll" ), "GetThreadDescription" );
+# endif
+ if( _GetThreadDescription )
+ {
+ auto hnd = OpenThread( THREAD_QUERY_LIMITED_INFORMATION, FALSE, (DWORD)id );
+ if( hnd != 0 )
+ {
+ PWSTR tmp;
+ _GetThreadDescription( hnd, &tmp );
+ auto ret = wcstombs( buf, tmp, 256 );
+ CloseHandle( hnd );
+ if( ret != 0 )
+ {
+ return buf;
+ }
+ }
+ }
+# elif defined __linux__
+ int cs, fd;
+ char path[32];
+# ifdef __ANDROID__
+ int tid = gettid();
+# else
+ int tid = (int) syscall( SYS_gettid );
+# endif
+ snprintf( path, sizeof( path ), "/proc/self/task/%d/comm", tid );
+ sprintf( buf, "%" PRIu32, id );
+# ifndef __ANDROID__
+ pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, &cs );
+# endif
+ if ( ( fd = open( path, O_RDONLY ) ) > 0) {
+ int len = read( fd, buf, 255 );
+ if( len > 0 )
+ {
+ buf[len] = 0;
+ if( len > 1 && buf[len-1] == '\n' )
+ {
+ buf[len-1] = 0;
+ }
+ }
+ close( fd );
+ }
+# ifndef __ANDROID__
+ pthread_setcancelstate( cs, 0 );
+# endif
+ return buf;
+# endif
+#endif
+ sprintf( buf, "%" PRIu32, id );
+ return buf;
+}
+
+TRACY_API const char* GetEnvVar( const char* name )
+{
+#if defined _WIN32
+ // unfortunately getenv() on Windows is just fundamentally broken. It caches the entire
+ // environment block once on startup, then never refreshes it again. If any environment
+ // strings are added or modified after startup of the CRT, those changes will not be
+ // seen by getenv(). This removes the possibility of an app using this SDK from
+ // programmatically setting any of the behaviour controlling envvars here.
+ //
+ // To work around this, we'll instead go directly to the Win32 environment strings APIs
+ // to get the current value.
+ static char buffer[1024];
+ DWORD const kBufferSize = DWORD(sizeof(buffer) / sizeof(buffer[0]));
+ DWORD count = GetEnvironmentVariableA(name, buffer, kBufferSize);
+
+ if( count == 0 )
+ return nullptr;
+
+ if( count >= kBufferSize )
+ {
+ char* buf = reinterpret_cast<char*>(_alloca(count + 1));
+ count = GetEnvironmentVariableA(name, buf, count + 1);
+ memcpy(buffer, buf, kBufferSize);
+ buffer[kBufferSize - 1] = 0;
+ }
+
+ return buffer;
+#else
+ return getenv(name);
+#endif
+}
+
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+TRACY_API void ___tracy_set_thread_name( const char* name ) { tracy::SetThreadName( name ); }
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracySystem.hpp b/3rdparty/tracy/tracy/common/TracySystem.hpp
new file mode 100644
index 0000000..e0040e9
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracySystem.hpp
@@ -0,0 +1,32 @@
+#ifndef __TRACYSYSTEM_HPP__
+#define __TRACYSYSTEM_HPP__
+
+#include <stdint.h>
+
+#include "TracyApi.h"
+
+namespace tracy
+{
+
+namespace detail
+{
+TRACY_API uint32_t GetThreadHandleImpl();
+}
+
+#ifdef TRACY_ENABLE
+TRACY_API uint32_t GetThreadHandle();
+#else
+static inline uint32_t GetThreadHandle()
+{
+ return detail::GetThreadHandleImpl();
+}
+#endif
+
+TRACY_API void SetThreadName( const char* name );
+TRACY_API const char* GetThreadName( uint32_t id );
+
+TRACY_API const char* GetEnvVar(const char* name);
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyUwp.hpp b/3rdparty/tracy/tracy/common/TracyUwp.hpp
new file mode 100644
index 0000000..7dce96b
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyUwp.hpp
@@ -0,0 +1,11 @@
+#ifndef __TRACYUWP_HPP__
+#define __TRACYUWP_HPP__
+
+#ifdef _WIN32
+# include <winapifamily.h>
+# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+# define TRACY_UWP
+# endif
+#endif
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/TracyYield.hpp b/3rdparty/tracy/tracy/common/TracyYield.hpp
new file mode 100644
index 0000000..403ca29
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/TracyYield.hpp
@@ -0,0 +1,26 @@
+#ifndef __TRACYYIELD_HPP__
+#define __TRACYYIELD_HPP__
+
+#if defined __SSE2__ || defined _M_AMD64 || _M_IX86_FP == 2
+# include <emmintrin.h>
+#else
+# include <thread>
+#endif
+
+#include "TracyForceInline.hpp"
+
+namespace tracy
+{
+
+static tracy_force_inline void YieldThread()
+{
+#if defined __SSE2__ || defined _M_AMD64 || _M_IX86_FP == 2
+ _mm_pause();
+#else
+ std::this_thread::yield();
+#endif
+}
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/common/src-from-vcxproj.mk b/3rdparty/tracy/tracy/common/src-from-vcxproj.mk
new file mode 100644
index 0000000..3a16b19
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/src-from-vcxproj.mk
@@ -0,0 +1,21 @@
+# Extract the actual list of source files from a sibling Visual Studio project.
+
+# Ensure these are simply-substituted variables, without changing their values.
+SRC := $(SRC)
+SRC2 := $(SRC2)
+SRC3 := $(SRC3)
+SRC4 := $(SRC4)
+
+# Paths here are relative to the directory in which make was invoked, not to
+# this file, so ../win32/$(PROJECT).vcxproj refers to the Visual Studio project
+# of whichever tool is including this makefile fragment.
+
+BASE := $(shell egrep 'ClCompile.*cpp"' ../win32/$(PROJECT).vcxproj | sed -e 's/.*\"\(.*\)\".*/\1/' | sed -e 's@\\@/@g')
+BASE2 := $(shell egrep 'ClCompile.*c"' ../win32/$(PROJECT).vcxproj | sed -e 's/.*\"\(.*\)\".*/\1/' | sed -e 's@\\@/@g')
+BASE4 := $(shell egrep 'None.*S"' ../win32/$(PROJECT).vcxproj | sed -e 's/.*\"\(.*\)\".*/\1/' | sed -e 's@\\@/@g')
+
+# The tool-specific makefile may request that certain files be omitted.
+SRC += $(filter-out $(FILTER),$(BASE))
+SRC2 += $(filter-out $(FILTER),$(BASE2))
+SRC3 += $(filter-out $(FILTER),$(BASE3))
+SRC4 += $(filter-out $(FILTER),$(BASE4))
diff --git a/3rdparty/tracy/tracy/common/tracy_lz4.cpp b/3rdparty/tracy/tracy/common/tracy_lz4.cpp
new file mode 100644
index 0000000..5a31aa7
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/tracy_lz4.cpp
@@ -0,0 +1,2492 @@
+/*
+ LZ4 - Fast LZ compression algorithm
+ Copyright (C) 2011-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
+*/
+
+/*-************************************
+* Tuning parameters
+**************************************/
+/*
+ * LZ4_HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
+#ifndef LZ4_HEAPMODE
+# define LZ4_HEAPMODE 0
+#endif
+
+/*
+ * LZ4_ACCELERATION_DEFAULT :
+ * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
+ */
+#define LZ4_ACCELERATION_DEFAULT 1
+/*
+ * LZ4_ACCELERATION_MAX :
+ * Any "acceleration" value higher than this threshold
+ * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
+ */
+#define LZ4_ACCELERATION_MAX 65537
+
+
+/*-************************************
+* CPU Feature Detection
+**************************************/
+/* LZ4_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets which assembly generation depends on alignment.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
+# if defined(__GNUC__) && \
+ ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define LZ4_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
+# define LZ4_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+/*
+ * LZ4_FORCE_SW_BITCOUNT
+ * Define this parameter if your target system or compiler does not support hardware bit count
+ */
+#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
+# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
+# define LZ4_FORCE_SW_BITCOUNT
+#endif
+
+
+
+/*-************************************
+* Dependency
+**************************************/
+/*
+ * LZ4_SRC_INCLUDED:
+ * Amalgamation flag, whether lz4.c is included
+ */
+#ifndef LZ4_SRC_INCLUDED
+# define LZ4_SRC_INCLUDED 1
+#endif
+
+#ifndef LZ4_STATIC_LINKING_ONLY
+#define LZ4_STATIC_LINKING_ONLY
+#endif
+
+#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
+#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
+#endif
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
+#include "tracy_lz4.hpp"
+/* see also "memory routines" below */
+
+
+/*-************************************
+* Compiler Options
+**************************************/
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
+# include <intrin.h> /* only present in VS2005+ */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif /* _MSC_VER */
+
+#ifndef LZ4_FORCE_INLINE
+# ifdef _MSC_VER /* Visual Studio */
+# define LZ4_FORCE_INLINE static __forceinline
+# else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define LZ4_FORCE_INLINE static inline
+# endif
+# else
+# define LZ4_FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+# endif /* _MSC_VER */
+#endif /* LZ4_FORCE_INLINE */
+
+/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
+ * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
+ * together with a simple 8-byte copy loop as a fall-back path.
+ * However, this optimization hurts the decompression speed by >30%,
+ * because the execution does not go to the optimized loop
+ * for typical compressible data, and all of the preamble checks
+ * before going to the fall-back path become useless overhead.
+ * This optimization happens only with the -O3 flag, and -O2 generates
+ * a simple 8-byte copy loop.
+ * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
+ * functions are annotated with __attribute__((optimize("O2"))),
+ * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
+ * of LZ4_wildCopy8 does not affect the compression speed.
+ */
+#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
+# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
+# undef LZ4_FORCE_INLINE
+# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
+#else
+# define LZ4_FORCE_O2
+#endif
+
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
+# define expect(expr,value) (__builtin_expect ((expr),(value)) )
+#else
+# define expect(expr,value) (expr)
+#endif
+
+#ifndef likely
+#define likely(expr) expect((expr) != 0, 1)
+#endif
+#ifndef unlikely
+#define unlikely(expr) expect((expr) != 0, 0)
+#endif
+
+/* Should the alignment test prove unreliable, for some reason,
+ * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
+#ifndef LZ4_ALIGN_TEST /* can be externally provided */
+# define LZ4_ALIGN_TEST 1
+#endif
+
+
+/*-************************************
+* Memory routines
+**************************************/
+#ifdef LZ4_USER_MEMORY_FUNCTIONS
+/* memory management functions can be customized by user project.
+ * Below functions must exist somewhere in the Project
+ * and be available at link time */
+void* LZ4_malloc(size_t s);
+void* LZ4_calloc(size_t n, size_t s);
+void LZ4_free(void* p);
+# define ALLOC(s) LZ4_malloc(s)
+# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
+# define FREEMEM(p) LZ4_free(p)
+#else
+# include <stdlib.h> /* malloc, calloc, free */
+# define ALLOC(s) malloc(s)
+# define ALLOC_AND_ZERO(s) calloc(1,s)
+# define FREEMEM(p) free(p)
+#endif
+
+#include <string.h> /* memset, memcpy */
+#define MEM_INIT(p,v,s) memset((p),(v),(s))
+
+
+/*-************************************
+* Common Constants
+**************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
+#define FASTLOOP_SAFE_DISTANCE 64
+static const int LZ4_minLength = (MFLIMIT+1);
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
+#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
+# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
+#endif
+
+#define ML_BITS 4
+#define ML_MASK ((1U<<ML_BITS)-1)
+#define RUN_BITS (8-ML_BITS)
+#define RUN_MASK ((1U<<RUN_BITS)-1)
+
+
+/*-************************************
+* Error detection
+**************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
+# include <assert.h>
+#else
+# ifndef assert
+# define assert(condition) ((void)0)
+# endif
+#endif
+
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
+# include <stdio.h>
+ static int g_debuglog_enable = 1;
+# define DEBUGLOG(l, ...) { \
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
+#else
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+static int LZ4_isAligned(const void* ptr, size_t alignment)
+{
+ return ((size_t)ptr & (alignment -1)) == 0;
+}
+
+
+/*-************************************
+* Types
+**************************************/
+#include <limits.h>
+#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef uintptr_t uptrval;
+#else
+# if UINT_MAX != 4294967295UL
+# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
+# endif
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef size_t uptrval; /* generally true, except OpenVMS-64 */
+#endif
+
+#if defined(__x86_64__)
+ typedef U64 reg_t; /* 64-bits in x32 mode */
+#else
+ typedef size_t reg_t; /* 32-bits in x32 mode */
+#endif
+
+typedef enum {
+ notLimited = 0,
+ limitedOutput = 1,
+ fillOutput = 2
+} limitedOutput_directive;
+
+namespace tracy
+{
+
+/*-************************************
+* Reading and writing into memory
+**************************************/
+
+/**
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so it can't apply its specialized memcpy() inlining
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
+ * memcpy() as if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#else
+#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
+#endif
+
+static unsigned LZ4_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+
+#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
+/* lie to the compiler about data alignment; use with caution */
+
+static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
+static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
+
+static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+
+#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
+
+static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
+
+static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
+
+#else /* safe and portable access using memcpy() */
+
+static U16 LZ4_read16(const void* memPtr)
+{
+ U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static U32 LZ4_read32(const void* memPtr)
+{
+ U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static reg_t LZ4_read_ARCH(const void* memPtr)
+{
+ reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static void LZ4_write16(void* memPtr, U16 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+static void LZ4_write32(void* memPtr, U32 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* LZ4_FORCE_MEMORY_ACCESS */
+
+
+static U16 LZ4_readLE16(const void* memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read16(memPtr);
+ } else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)((U16)p[0] + (p[1]<<8));
+ }
+}
+
+static void LZ4_writeLE16(void* memPtr, U16 value)
+{
+ if (LZ4_isLittleEndian()) {
+ LZ4_write16(memPtr, value);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE) value;
+ p[1] = (BYTE)(value>>8);
+ }
+}
+
+/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
+LZ4_FORCE_INLINE
+void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* const e = (BYTE*)dstEnd;
+
+ do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
+}
+
+static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
+
+
+#ifndef LZ4_FAST_DEC_LOOP
+# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+# define LZ4_FAST_DEC_LOOP 1
+# elif defined(__aarch64__) && !defined(__clang__)
+ /* On aarch64, we disable this optimization for clang because on certain
+ * mobile chipsets, performance is reduced with clang. For information
+ * refer to https://github.com/lz4/lz4/pull/707 */
+# define LZ4_FAST_DEC_LOOP 1
+# else
+# define LZ4_FAST_DEC_LOOP 0
+# endif
+#endif
+
+#if LZ4_FAST_DEC_LOOP
+
+LZ4_FORCE_INLINE void
+LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
+{
+ assert(srcPtr + offset == dstPtr);
+ if (offset < 8) {
+ LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
+ dstPtr[0] = srcPtr[0];
+ dstPtr[1] = srcPtr[1];
+ dstPtr[2] = srcPtr[2];
+ dstPtr[3] = srcPtr[3];
+ srcPtr += inc32table[offset];
+ LZ4_memcpy(dstPtr+4, srcPtr, 4);
+ srcPtr -= dec64table[offset];
+ dstPtr += 8;
+ } else {
+ LZ4_memcpy(dstPtr, srcPtr, 8);
+ dstPtr += 8;
+ srcPtr += 8;
+ }
+
+ LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
+}
+
+/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
+ * this version copies two times 16 bytes (instead of one time 32 bytes)
+ * because it must be compatible with offsets >= 16. */
+LZ4_FORCE_INLINE void
+LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* const e = (BYTE*)dstEnd;
+
+ do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
+}
+
+/* LZ4_memcpy_using_offset() presumes :
+ * - dstEnd >= dstPtr + MINMATCH
+ * - there is at least 8 bytes available to write after dstEnd */
+LZ4_FORCE_INLINE void
+LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
+{
+ BYTE v[8];
+
+ assert(dstEnd >= dstPtr + MINMATCH);
+
+ switch(offset) {
+ case 1:
+ MEM_INIT(v, *srcPtr, 8);
+ break;
+ case 2:
+ LZ4_memcpy(v, srcPtr, 2);
+ LZ4_memcpy(&v[2], srcPtr, 2);
+ LZ4_memcpy(&v[4], v, 4);
+ break;
+ case 4:
+ LZ4_memcpy(v, srcPtr, 4);
+ LZ4_memcpy(&v[4], srcPtr, 4);
+ break;
+ default:
+ LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
+ return;
+ }
+
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ while (dstPtr < dstEnd) {
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ }
+}
+#endif
+
+
+/*-************************************
+* Common functions
+**************************************/
+LZ4_FORCE_INLINE unsigned LZ4_NbCommonBytes (reg_t val)
+{
+ assert(val != 0);
+ if (LZ4_isLittleEndian()) {
+ if (sizeof(val) == 8) {
+# if defined(_MSC_VER) && (_MSC_VER >= 1800) && defined(_M_AMD64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
+ return (unsigned)_tzcnt_u64(val) >> 3;
+# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanForward64(&r, (U64)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctzll((U64)val) >> 3;
+# else
+ const U64 m = 0x0101010101010101ULL;
+ val ^= val - 1;
+ return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
+# endif
+ } else /* 32 bits */ {
+# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward(&r, (U32)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctz((U32)val) >> 3;
+# else
+ const U32 m = 0x01010101;
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
+# endif
+ }
+ } else /* Big Endian CPU */ {
+ if (sizeof(val)==8) {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clzll((U64)val) >> 3;
+# else
+#if 1
+ /* this method is probably faster,
+ * but adds a 128 bytes lookup table */
+ static const unsigned char ctz7_tab[128] = {
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ };
+ U64 const mask = 0x0101010101010101ULL;
+ U64 const t = (((val >> 8) - mask) | val) & mask;
+ return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
+#else
+ /* this method doesn't consume memory space like the previous one,
+ * but it contains several branches,
+ * that may end up slowing execution */
+ static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
+ Note that this code path is never triggered in 32-bits mode. */
+ unsigned r;
+ if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+#endif
+# endif
+ } else /* 32 bits */ {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clz((U32)val) >> 3;
+# else
+ val >>= 8;
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+ (val + 0x00FF0000)) >> 24;
+ return (unsigned)val ^ 3;
+# endif
+ }
+ }
+}
+
+
+#define STEPSIZE sizeof(reg_t)
+LZ4_FORCE_INLINE
+unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
+{
+ const BYTE* const pStart = pIn;
+
+ if (likely(pIn < pInLimit-(STEPSIZE-1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) {
+ pIn+=STEPSIZE; pMatch+=STEPSIZE;
+ } else {
+ return LZ4_NbCommonBytes(diff);
+ } }
+
+ while (likely(pIn < pInLimit-(STEPSIZE-1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
+ pIn += LZ4_NbCommonBytes(diff);
+ return (unsigned)(pIn - pStart);
+ }
+
+ if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (unsigned)(pIn - pStart);
+}
+
+
+#ifndef LZ4_COMMONDEFS_ONLY
+/*-************************************
+* Local Constants
+**************************************/
+static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
+static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
+
+
+/*-************************************
+* Local Structures and types
+**************************************/
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
+
+/**
+ * This enum distinguishes several different modes of accessing previous
+ * content in the stream.
+ *
+ * - noDict : There is no preceding content.
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
+ * blob being compressed are valid and refer to the preceding
+ * content (of length ctx->dictSize), which is available
+ * contiguously preceding in memory the content currently
+ * being compressed.
+ * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
+ * else in memory, starting at ctx->dictionary with length
+ * ctx->dictSize.
+ * - usingDictCtx : Like usingExtDict, but everything concerning the preceding
+ * content is in a separate context, pointed to by
+ * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
+ * entries in the current context that refer to positions
+ * preceding the beginning of the current compression are
+ * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
+ * ->dictSize describe the location and size of the preceding
+ * content, and matches are found by looking in the ctx
+ * ->dictCtx->hashTable.
+ */
+typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+
+/*-************************************
+* Local Utils
+**************************************/
+int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
+const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
+int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
+int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
+
+
+/*-************************************
+* Internal Definitions used in Tests
+**************************************/
+
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
+
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize);
+
+/*-******************************
+* Compression functions
+********************************/
+LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
+{
+ if (tableType == byU16)
+ return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
+ else
+ return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
+ if (LZ4_isLittleEndian()) {
+ const U64 prime5bytes = 889523592379ULL;
+ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
+ } else {
+ const U64 prime8bytes = 11400714785074694791ULL;
+ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
+ }
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
+{
+ if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+ return LZ4_hash4(LZ4_read32(p), tableType);
+}
+
+LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: { /* illegal! */ assert(0); return; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: /* fallthrough */
+ case byPtr: { /* illegal! */ assert(0); return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
+ void* tableBase, tableType_t const tableType,
+ const BYTE* srcBase)
+{
+ switch (tableType)
+ {
+ case clearedTable: { /* illegal! */ assert(0); return; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
+
+/* LZ4_getIndexOnHash() :
+ * Index of match position registered in hash table.
+ * hash position must be calculated by using base+index, or dictBase+index.
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
+ * Assumption 2 : h is presumed valid (within limits of hash table)
+ */
+LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
+{
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
+ if (tableType == byU32) {
+ const U32* const hashTable = (const U32*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
+ return hashTable[h];
+ }
+ if (tableType == byU16) {
+ const U16* const hashTable = (const U16*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
+ return hashTable[h];
+ }
+ assert(0); return 0; /* forbidden case */
+}
+
+static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
+ if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
+ { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
+}
+
+LZ4_FORCE_INLINE const BYTE*
+LZ4_getPosition(const BYTE* p,
+ const void* tableBase, tableType_t tableType,
+ const BYTE* srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
+
+LZ4_FORCE_INLINE void
+LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
+ const int inputSize,
+ const tableType_t tableType) {
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure
+ * out if it's safe to leave as is or whether it needs to be reset.
+ */
+ if ((tableType_t)cctx->tableType != clearedTable) {
+ assert(inputSize >= 0);
+ if ((tableType_t)cctx->tableType != tableType
+ || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
+ || ((tableType == byU32) && cctx->currentOffset > 1 GB)
+ || tableType == byPtr
+ || inputSize >= 4 KB)
+ {
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
+ MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
+ cctx->currentOffset = 0;
+ cctx->tableType = (U32)clearedTable;
+ } else {
+ DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
+ }
+ }
+
+ /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster
+ * than compressing without a gap. However, compressing with
+ * currentOffset == 0 is faster still, so we preserve that case.
+ */
+ if (cctx->currentOffset != 0 && tableType == byU32) {
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
+ cctx->currentOffset += 64 KB;
+ }
+
+ /* Finally, clear history */
+ cctx->dictCtx = NULL;
+ cctx->dictionary = NULL;
+ cctx->dictSize = 0;
+}
+
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time.
+ * Presumed already validated at this stage:
+ * - source != NULL
+ * - inputSize > 0
+ */
+LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
+ LZ4_stream_t_internal* const cctx,
+ const char* const source,
+ char* const dest,
+ const int inputSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int maxOutputSize,
+ const limitedOutput_directive outputDirective,
+ const tableType_t tableType,
+ const dict_directive dictDirective,
+ const dictIssue_directive dictIssue,
+ const int acceleration)
+{
+ int result;
+ const BYTE* ip = (const BYTE*) source;
+
+ U32 const startIndex = cctx->currentOffset;
+ const BYTE* base = (const BYTE*) source - startIndex;
+ const BYTE* lowLimit;
+
+ const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
+ const BYTE* const dictionary =
+ dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
+ const U32 dictSize =
+ dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
+ const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
+
+ int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
+ U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
+ const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
+ const BYTE* anchor = (const BYTE*) source;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+
+ /* the dictCtx currentOffset is indexed on the start of the dictionary,
+ * while a dictionary in the current context precedes the currentOffset */
+ const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
+ dictionary + dictSize - dictCtx->currentOffset :
+ dictionary + dictSize - startIndex;
+
+ BYTE* op = (BYTE*) dest;
+ BYTE* const olimit = op + maxOutputSize;
+
+ U32 offset = 0;
+ U32 forwardH;
+
+ DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
+ assert(ip != NULL);
+ /* If init conditions are not met, we don't have to mark stream
+ * as having dirty context, since no action was taken yet */
+ if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
+ if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
+ assert(acceleration >= 1);
+
+ lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
+
+ /* Update context state */
+ if (dictDirective == usingDictCtx) {
+ /* Subsequent linked blocks can't use the dictionary. */
+ /* Instead, they use the block we just compressed. */
+ cctx->dictCtx = NULL;
+ cctx->dictSize = (U32)inputSize;
+ } else {
+ cctx->dictSize += (U32)inputSize;
+ }
+ cctx->currentOffset += (U32)inputSize;
+ cctx->tableType = (U32)tableType;
+
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* First Byte */
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for ( ; ; ) {
+ const BYTE* match;
+ BYTE* token;
+ const BYTE* filledIp;
+
+ /* Find a match */
+ if (tableType == byPtr) {
+ const BYTE* forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+
+ } while ( (match+LZ4_DISTANCE_MAX < ip)
+ || (LZ4_read32(match) != LZ4_read32(ip)) );
+
+ } else { /* byU32, byU16 */
+
+ const BYTE* forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ U32 const current = (U32)(forwardIp - base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex <= current);
+ assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ assert(tableType == byU32);
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ matchIndex += dictDelta; /* make dictCtx index comparable with current context */
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source;
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
+ assert(startIndex - matchIndex >= MINMATCH);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source;
+ }
+ } else { /* single continuous memory segment */
+ match = base + matchIndex;
+ }
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+
+ DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
+ assert(matchIndex < current);
+ if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
+ && (matchIndex+LZ4_DISTANCE_MAX < current)) {
+ continue;
+ } /* too far */
+ assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ if (maybe_extMem) offset = current - matchIndex;
+ break; /* match found */
+ }
+
+ } while(1);
+ }
+
+ /* Catch up */
+ filledIp = ip;
+ while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
+
+ /* Encode Literals */
+ { unsigned const litLength = (unsigned)(ip - anchor);
+ token = op++;
+ if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ if ((outputDirective == fillOutput) &&
+ (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
+ op--;
+ goto _last_literals;
+ }
+ if (litLength >= RUN_MASK) {
+ int len = (int)(litLength - RUN_MASK);
+ *token = (RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
+ }
+ else *token = (BYTE)(litLength<<ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op+litLength);
+ op+=litLength;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
+ }
+
+_next_match:
+ /* at this stage, the following variables must be correctly set :
+ * - ip : at start of LZ operation
+ * - match : at start of previous pattern occurence; can be within current prefix, or within extDict
+ * - offset : if maybe_ext_memSegment==1 (constant)
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
+ */
+
+ if ((outputDirective == fillOutput) &&
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
+ /* the match was too close to the end, rewind and go to last literals */
+ op = token;
+ goto _last_literals;
+ }
+
+ /* Encode Offset */
+ if (maybe_extMem) { /* static test */
+ DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
+ assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
+ LZ4_writeLE16(op, (U16)offset); op+=2;
+ } else {
+ DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
+ assert(ip-match <= LZ4_DISTANCE_MAX);
+ LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
+ }
+
+ /* Encode MatchLength */
+ { unsigned matchCode;
+
+ if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
+ && (lowLimit==dictionary) /* match within extDict */ ) {
+ const BYTE* limit = ip + (dictEnd-match);
+ assert(dictEnd > match);
+ if (limit > matchlimit) limit = matchlimit;
+ matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
+ ip += (size_t)matchCode + MINMATCH;
+ if (ip==limit) {
+ unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
+ matchCode += more;
+ ip += more;
+ }
+ DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
+ } else {
+ matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
+ ip += (size_t)matchCode + MINMATCH;
+ DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
+ }
+
+ if ((outputDirective) && /* Check output buffer overflow */
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
+ if (outputDirective == fillOutput) {
+ /* Match description too long : reduce it */
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
+ ip -= matchCode - newMatchCode;
+ assert(newMatchCode < matchCode);
+ matchCode = newMatchCode;
+ if (unlikely(ip <= filledIp)) {
+ /* We have already filled up to filledIp so if ip ends up less than filledIp
+ * we have positions in the hash table beyond the current position. This is
+ * a problem if we reuse the hash table. So we have to remove these positions
+ * from the hash table.
+ */
+ const BYTE* ptr;
+ DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
+ for (ptr = ip; ptr <= filledIp; ++ptr) {
+ U32 const h = LZ4_hashPosition(ptr, tableType);
+ LZ4_clearHash(h, cctx->hashTable, tableType);
+ }
+ }
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
+ while (matchCode >= 4*255) {
+ op+=4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4*255;
+ }
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
+ }
+ /* Ensure we have enough space for the last literals. */
+ assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
+
+ anchor = ip;
+
+ /* Test end of chunk */
+ if (ip >= mflimitPlusOne) break;
+
+ /* Fill table */
+ LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
+
+ /* Test next position */
+ if (tableType == byPtr) {
+
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ if ( (match+LZ4_DISTANCE_MAX >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip)) )
+ { token=op++; *token=0; goto _next_match; }
+
+ } else { /* byU32, byU16 */
+
+ U32 const h = LZ4_hashPosition(ip, tableType);
+ U32 const current = (U32)(ip-base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ matchIndex += dictDelta;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else { /* single memory segment */
+ match = base + matchIndex;
+ }
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
+ && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
+ && (LZ4_read32(match) == LZ4_read32(ip)) ) {
+ token=op++;
+ *token=0;
+ if (maybe_extMem) offset = current - matchIndex;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
+ goto _next_match;
+ }
+ }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRun = (size_t)(iend - anchor);
+ if ( (outputDirective) && /* Check output buffer overflow */
+ (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
+ if (outputDirective == fillOutput) {
+ /* adapt lastRun to fill 'dst' */
+ assert(olimit >= op);
+ lastRun = (size_t)(olimit-op) - 1/*token*/;
+ lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRun);
+ ip = anchor + lastRun;
+ op += lastRun;
+ }
+
+ if (outputDirective == fillOutput) {
+ *inputConsumed = (int) (((const char*)ip)-source);
+ }
+ result = (int)(((char*)op) - dest);
+ assert(result > 0);
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
+ return result;
+}
+
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time;
+ * takes care of src == (NULL, 0)
+ * and forward the rest to LZ4_compress_generic_validated */
+LZ4_FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal* const cctx,
+ const char* const src,
+ char* const dst,
+ const int srcSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int dstCapacity,
+ const limitedOutput_directive outputDirective,
+ const tableType_t tableType,
+ const dict_directive dictDirective,
+ const dictIssue_directive dictIssue,
+ const int acceleration)
+{
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
+ srcSize, dstCapacity);
+
+ if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
+ if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
+ if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
+ DEBUGLOG(5, "Generating an empty block");
+ assert(outputDirective == notLimited || dstCapacity >= 1);
+ assert(dst != NULL);
+ dst[0] = 0;
+ if (outputDirective == fillOutput) {
+ assert (inputConsumed != NULL);
+ *inputConsumed = 0;
+ }
+ return 1;
+ }
+ assert(src != NULL);
+
+ return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
+ inputConsumed, /* only written into if outputDirective == fillOutput */
+ dstCapacity, outputDirective,
+ tableType, dictDirective, dictIssue, acceleration);
+}
+
+
+int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
+ assert(ctx != NULL);
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+ if (maxOutputSize >= LZ4_compressBound(inputSize)) {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ }
+}
+
+/**
+ * LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
+ * "correctly initialized").
+ */
+int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
+{
+ LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+
+ if (dstCapacity >= LZ4_compressBound(srcSize)) {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ }
+}
+
+
+int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ int result;
+#if (LZ4_HEAPMODE)
+ LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctxPtr == NULL) return 0;
+#else
+ LZ4_stream_t ctx;
+ LZ4_stream_t* const ctxPtr = &ctx;
+#endif
+ result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
+
+#if (LZ4_HEAPMODE)
+ FREEMEM(ctxPtr);
+#endif
+ return result;
+}
+
+
+int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
+{
+ return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
+}
+
+
+/* Note!: This function leaves the stream in an unclean/broken state!
+ * It is not safe to subsequently use the same state with a _fastReset() or
+ * _continue() call without resetting it. */
+static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
+ void* const s = LZ4_initStream(state, sizeof (*state));
+ assert(s != NULL); (void)s;
+
+ if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
+ } else {
+ if (*srcSizePtr < LZ4_64Klimit) {
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
+ } else {
+ tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
+ } }
+}
+
+
+int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
+#if (LZ4_HEAPMODE)
+ LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctx == NULL) return 0;
+#else
+ LZ4_stream_t ctxBody;
+ LZ4_stream_t* ctx = &ctxBody;
+#endif
+
+ int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
+
+#if (LZ4_HEAPMODE)
+ FREEMEM(ctx);
+#endif
+ return result;
+}
+
+
+
+/*-******************************
+* Streaming functions
+********************************/
+
+LZ4_stream_t* LZ4_createStream(void)
+{
+ LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
+ LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
+ DEBUGLOG(4, "LZ4_createStream %p", lz4s);
+ if (lz4s == NULL) return NULL;
+ LZ4_initStream(lz4s, sizeof(*lz4s));
+ return lz4s;
+}
+
+static size_t LZ4_stream_t_alignment(void)
+{
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_stream_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
+#else
+ return 1; /* effectively disabled */
+#endif
+}
+
+LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
+{
+ DEBUGLOG(5, "LZ4_initStream");
+ if (buffer == NULL) { return NULL; }
+ if (size < sizeof(LZ4_stream_t)) { return NULL; }
+ if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
+ return (LZ4_stream_t*)buffer;
+}
+
+/* resetStream is now deprecated,
+ * prefer initStream() which is more general */
+void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
+{
+ DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
+ MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
+}
+
+void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
+ LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
+}
+
+int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
+{
+ if (!LZ4_stream) return 0; /* support free on NULL */
+ DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
+ FREEMEM(LZ4_stream);
+ return (0);
+}
+
+
+#define HASH_UNIT sizeof(reg_t)
+int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
+{
+ LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
+ const tableType_t tableType = byU32;
+ const BYTE* p = (const BYTE*)dictionary;
+ const BYTE* const dictEnd = p + dictSize;
+ const BYTE* base;
+
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
+
+ /* It's necessary to reset the context,
+ * and not just continue it with prepareTable()
+ * to avoid any risk of generating overflowing matchIndex
+ * when compressing using this dictionary */
+ LZ4_resetStream(LZ4_dict);
+
+ /* We always increment the offset by 64 KB, since, if the dict is longer,
+ * we truncate it to the last 64k, and if it's shorter, we still want to
+ * advance by a whole window length so we can provide the guarantee that
+ * there are only valid offsets in the window, which allows an optimization
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
+ * dictionary isn't a full 64k. */
+ dict->currentOffset += 64 KB;
+
+ if (dictSize < (int)HASH_UNIT) {
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
+ base = dictEnd - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->tableType = (U32)tableType;
+
+ while (p <= dictEnd-HASH_UNIT) {
+ LZ4_putPosition(p, dict->hashTable, tableType, base);
+ p+=3;
+ }
+
+ return (int)dict->dictSize;
+}
+
+void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
+ const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
+ &(dictionaryStream->internal_donotuse);
+
+ DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
+ workingStream, dictionaryStream,
+ dictCtx != NULL ? dictCtx->dictSize : 0);
+
+ if (dictCtx != NULL) {
+ /* If the current offset is zero, we will never look in the
+ * external dictionary context, since there is no value a table
+ * entry can take that indicate a miss. In that case, we need
+ * to bump the offset to something non-zero.
+ */
+ if (workingStream->internal_donotuse.currentOffset == 0) {
+ workingStream->internal_donotuse.currentOffset = 64 KB;
+ }
+
+ /* Don't actually attach an empty dictionary.
+ */
+ if (dictCtx->dictSize == 0) {
+ dictCtx = NULL;
+ }
+ }
+ workingStream->internal_donotuse.dictCtx = dictCtx;
+}
+
+
+static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
+{
+ assert(nextSize >= 0);
+ if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 KB;
+ const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+ DEBUGLOG(4, "LZ4_renormDictT");
+ for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
+ else LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 KB;
+ if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+
+int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
+ const char* source, char* dest,
+ int inputSize, int maxOutputSize,
+ int acceleration)
+{
+ const tableType_t tableType = byU32;
+ LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
+ const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
+
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
+
+ LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+
+ /* invalidate tiny dictionaries */
+ if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
+ && (dictEnd != (const BYTE*)source) ) {
+ DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = (const BYTE*)source;
+ dictEnd = (const BYTE*)source;
+ }
+
+ /* Check overlapping input/dictionary space */
+ { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
+ if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
+ if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
+ streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ }
+ }
+
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == (const BYTE*)source) {
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
+ else
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
+ }
+
+ /* external dictionary mode */
+ { int result;
+ if (streamPtr->dictCtx) {
+ /* We depend here on the fact that dictCtx'es (produced by
+ * LZ4_loadDict) guarantee that their tables contain no references
+ * to offsets between dictCtx->currentOffset - 64 KB and
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
+ * to use noDictIssue even when the dict isn't a full 64 KB.
+ */
+ if (inputSize > 4 KB) {
+ /* For compressing large blobs, it is faster to pay the setup
+ * cost to copy the dictionary's tables into the active context,
+ * so that the compression loop is only looking into one table.
+ */
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
+ }
+ } else {
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ }
+ }
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)inputSize;
+ return result;
+ }
+}
+
+
+/* Hidden debug function, to force-test external dictionary mode */
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
+{
+ LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
+ int result;
+
+ LZ4_renormDictT(streamPtr, srcSize);
+
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+ }
+
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)srcSize;
+
+ return result;
+}
+
+
+/*! LZ4_saveDict() :
+ * If previously compressed data block is not guaranteed to remain available at its memory location,
+ * save it into a safer place (char* safeBuffer).
+ * Note : you don't need to call LZ4_loadDict() afterwards,
+ * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
+ * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
+ */
+int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
+ const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
+
+ if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
+ if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
+
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+
+ dict->dictionary = (const BYTE*)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+
+
+
+/*-*******************************
+ * Decompression functions
+ ********************************/
+
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#undef MIN
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+
+/* Read the variable-length literal or match length.
+ *
+ * ip - pointer to use as input.
+ * lencheck - end ip. Return an error if ip advances >= lencheck.
+ * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so.
+ * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so.
+ * error (output) - error code. Should be set to 0 before call.
+ */
+typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
+LZ4_FORCE_INLINE unsigned
+read_variable_length(const BYTE**ip, const BYTE* lencheck,
+ int loop_check, int initial_check,
+ variable_length_error* error)
+{
+ U32 length = 0;
+ U32 s;
+ if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = initial_error;
+ return length;
+ }
+ do {
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = loop_error;
+ return length;
+ }
+ } while (s==255);
+
+ return length;
+}
+
+/*! LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+LZ4_FORCE_INLINE int
+LZ4_decompress_generic(
+ const char* const src,
+ char* const dst,
+ int srcSize,
+ int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
+
+ endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */
+ earlyEnd_directive partialDecoding, /* full, partial */
+ dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
+ const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
+ const BYTE* const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note : = 0 if noDict */
+ )
+{
+ if (src == NULL) { return -1; }
+
+ { const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+
+ BYTE* op = (BYTE*) dst;
+ BYTE* const oend = op + outputSize;
+ BYTE* cpy;
+
+ const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
+
+ const int safeDecode = (endOnInput==endOnInputSize);
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
+
+
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
+ const BYTE* match;
+ size_t offset;
+ unsigned token;
+ size_t length;
+
+
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
+
+ /* Special cases */
+ assert(lowPrefix <= op);
+ if ((endOnInput) && (unlikely(outputSize==0))) {
+ /* Empty output buffer */
+ if (partialDecoding) return 0;
+ return ((srcSize==1) && (*ip==0)) ? 0 : -1;
+ }
+ if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
+ if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
+
+ /* Currently the fast loop shows a regression on qualcomm arm chips. */
+#if LZ4_FAST_DEC_LOOP
+ if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
+ DEBUGLOG(6, "skip fast decode loop");
+ goto safe_decode;
+ }
+
+ /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
+ assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
+ if (endOnInput) { assert(ip < iend); }
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
+ if (error == initial_error) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+
+ /* copy literals */
+ cpy = op+length;
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if (endOnInput) { /* LZ4_decompress_safe() */
+ if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
+ LZ4_wildCopy32(op, ip, cpy);
+ } else { /* LZ4_decompress_fast() */
+ if (cpy>oend-8) { goto safe_literal_copy; }
+ LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
+ * it doesn't know input length, and only relies on end-of-block properties */
+ }
+ ip += length; op = cpy;
+ } else {
+ cpy = op+length;
+ if (endOnInput) { /* LZ4_decompress_safe() */
+ DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
+ /* We don't need to check oend, since we check it once for each loop below */
+ if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
+ /* Literals can only be 14, but hope compilers optimize if we copy by a register size */
+ LZ4_memcpy(op, ip, 16);
+ } else { /* LZ4_decompress_fast() */
+ /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
+ * it doesn't know input length, and relies on end-of-block properties */
+ LZ4_memcpy(op, ip, 8);
+ if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
+ }
+ ip += length; op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip); ip+=2;
+ match = op - offset;
+ assert(match <= op);
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ if (length == ML_MASK) {
+ variable_length_error error = ok;
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
+ if (error != ok) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
+ length += MINMATCH;
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+ } else {
+ length += MINMATCH;
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+
+ /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
+ if ((dict == withPrefix64k) || (match >= lowPrefix)) {
+ if (offset >= 8) {
+ assert(match >= lowPrefix);
+ assert(match <= op);
+ assert(op + 18 <= oend);
+
+ LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op+8, match+8, 8);
+ LZ4_memcpy(op+16, match+16, 2);
+ op += length;
+ continue;
+ } } }
+
+ if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op+length > oend-LASTLITERALS)) {
+ if (partialDecoding) {
+ DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
+ length = MIN(length, (size_t)(oend-op));
+ } else {
+ goto _output_error; /* end-of-block condition violated */
+ } }
+
+ if (length <= (size_t)(lowPrefix-match)) {
+ /* match fits entirely within external dictionary : just copy */
+ memmove(op, dictEnd - (lowPrefix-match), length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
+ BYTE* const endOfMatch = op + restSize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) { *op++ = *copyFrom++; }
+ } else {
+ LZ4_memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ } }
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ assert((op <= oend) && (oend-op >= 32));
+ if (unlikely(offset<16)) {
+ LZ4_memcpy_using_offset(op, match, cpy, offset);
+ } else {
+ LZ4_wildCopy32(op, match, cpy);
+ }
+
+ op = cpy; /* wildcopy correction */
+ }
+ safe_decode:
+#endif
+
+ /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
+
+ /* A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough space,
+ * enter the shortcut and copy 16 bytes on behalf of the literals
+ * (in the fast mode, only 8 bytes can be safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ * manner; but we ensure that there's enough space in the output for
+ * those 18 bytes earlier, upon entering the shortcut (in other words,
+ * there is a combined check for both stages).
+ */
+ if ( (endOnInput ? length != RUN_MASK : length <= 8)
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */
+ && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
+ /* Copy the literals */
+ LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /* The second stage: prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted. */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip); ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ( (length != ML_MASK)
+ && (offset >= 8)
+ && (dict==withPrefix64k || match >= lowPrefix) ) {
+ /* Copy the match. */
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op +16, match +16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /* The second stage didn't work out, but the info is ready.
+ * Propel it right to the point of match copying. */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
+ if (error == initial_error) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+ }
+
+ /* copy literals */
+ cpy = op+length;
+#if LZ4_FAST_DEC_LOOP
+ safe_literal_copy:
+#endif
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
+ || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
+ {
+ /* We've either hit the input parsing restriction or the output parsing restriction.
+ * In the normal scenario, decoding a full block, it must be the last sequence,
+ * otherwise it's an error (invalid input or dimensions).
+ * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
+ */
+ if (partialDecoding) {
+ /* Since we are partial decoding we may be in this block because of the output parsing
+ * restriction, which is not valid since the output buffer is allowed to be undersized.
+ */
+ assert(endOnInput);
+ DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
+ DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
+ DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
+ DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
+ /* Finishing in the middle of a literals segment,
+ * due to lack of input.
+ */
+ if (ip+length > iend) {
+ length = (size_t)(iend-ip);
+ cpy = op + length;
+ }
+ /* Finishing in the middle of a literals segment,
+ * due to lack of output space.
+ */
+ if (cpy > oend) {
+ cpy = oend;
+ assert(op<=oend);
+ length = (size_t)(oend-op);
+ }
+ } else {
+ /* We must be on the last sequence because of the parsing limitations so check
+ * that we exactly regenerate the original size (must be exact when !endOnInput).
+ */
+ if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
+ /* We must be on the last sequence (or invalid) because of the parsing limitations
+ * so check that we exactly consume the input and don't overrun the output buffer.
+ */
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
+ DEBUGLOG(6, "should have been last run of literals")
+ DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
+ DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
+ goto _output_error;
+ }
+ }
+ memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
+ ip += length;
+ op += length;
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
+ */
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
+ break;
+ }
+ } else {
+ LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
+ ip += length; op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip); ip+=2;
+ match = op - offset;
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ _copy_match:
+ if (length == ML_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
+ if (error != ok) goto _output_error;
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
+ }
+ length += MINMATCH;
+
+#if LZ4_FAST_DEC_LOOP
+ safe_match_copy:
+#endif
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op+length > oend-LASTLITERALS)) {
+ if (partialDecoding) length = MIN(length, (size_t)(oend-op));
+ else goto _output_error; /* doesn't respect parsing restriction */
+ }
+
+ if (length <= (size_t)(lowPrefix-match)) {
+ /* match fits entirely within external dictionary : just copy */
+ memmove(op, dictEnd - (lowPrefix-match), length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
+ BYTE* const endOfMatch = op + restSize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) *op++ = *copyFrom++;
+ } else {
+ LZ4_memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ } }
+ continue;
+ }
+ assert(match >= lowPrefix);
+
+ /* copy match within block */
+ cpy = op + length;
+
+ /* partialDecoding : may end anywhere within the block */
+ assert(op<=oend);
+ if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = MIN(length, (size_t)(oend-op));
+ const BYTE* const matchEnd = match + mlen;
+ BYTE* const copyEnd = op + mlen;
+ if (matchEnd > op) { /* overlap copy */
+ while (op < copyEnd) { *op++ = *match++; }
+ } else {
+ LZ4_memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend) { break; }
+ continue;
+ }
+
+ if (unlikely(offset<8)) {
+ LZ4_write32(op, 0); /* silence msan warning when offset==0 */
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += inc32table[offset];
+ LZ4_memcpy(op+4, match, 4);
+ match -= dec64table[offset];
+ } else {
+ LZ4_memcpy(op, match, 8);
+ match += 8;
+ }
+ op += 8;
+
+ if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
+ BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
+ if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
+ if (op < oCopyLimit) {
+ LZ4_wildCopy8(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+ while (op < cpy) { *op++ = *match++; }
+ } else {
+ LZ4_memcpy(op, match, 8);
+ if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
+ }
+ op = cpy; /* wildcopy correction */
+ }
+
+ /* end of decoding */
+ if (endOnInput) {
+ DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
+ return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
+ } else {
+ return (int) (((const char*)ip)-src); /* Nb of input bytes read */
+ }
+
+ /* Overflow error detected */
+ _output_error:
+ return (int) (-(((const char*)ip)-src))-1;
+ }
+}
+
+
+/*===== Instantiate the API decoding functions. =====*/
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
+ endOnInputSize, decode_full_block, noDict,
+ (BYTE*)dest, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ endOnInputSize, partial_decode,
+ noDict, (BYTE*)dst, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+/*===== Instantiate a few more decoding cases, used more than once. =====*/
+
+LZ4_FORCE_O2 /* Exported, an obsolete API function. */
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+/* Another obsolete API function, paired with the previous one. */
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
+{
+ /* LZ4_decompress_fast doesn't validate match offsets,
+ * and thus serves well with any prefixed dictionary. */
+ return LZ4_decompress_fast(source, dest, originalSize);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, noDict,
+ (BYTE*)dest-prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+LZ4_FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
+
+/*===== streaming decompression functions =====*/
+
+LZ4_streamDecode_t* LZ4_createStreamDecode(void)
+{
+ LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
+ LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
+ return lz4s;
+}
+
+int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
+{
+ if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
+ FREEMEM(LZ4_stream);
+ return 0;
+}
+
+/*! LZ4_setStreamDecode() :
+ * Use this function to instruct where to find the dictionary.
+ * This function is not necessary if previous data is still available where it was decoded.
+ * Loading a size of 0 is allowed (same effect as no dictionary).
+ * @return : 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ lz4sd->prefixSize = (size_t) dictSize;
+ lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
+
+/*! LZ4_decoderRingBufferSize() :
+ * when setting a ring buffer for streaming decompression (optional scenario),
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * Note : in a ring buffer scenario,
+ * blocks are presumed decompressed next to each other.
+ * When not enough space remains for next block (remainingSize < maxBlockSize),
+ * decoding resumes from beginning of ring buffer.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+int LZ4_decoderRingBufferSize(int maxBlockSize)
+{
+ if (maxBlockSize < 0) return 0;
+ if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
+ if (maxBlockSize < 16) maxBlockSize = 16;
+ return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
+}
+
+/*
+*_continue() :
+ These decoding functions allow decompression of multiple blocks in "streaming" mode.
+ Previously decoded blocks must still be available at the memory position where they were decoded.
+ If it's not possible, save the relevant part of decoded data into a safe buffer,
+ and indicate where it stands using LZ4_setStreamDecode()
+*/
+LZ4_FORCE_O2
+int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize += (size_t)result;
+ lz4sd->prefixEnd += result;
+ } else {
+ /* The buffer wraps around, or they're switching to another buffer. */
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ }
+
+ return result;
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+ assert(originalSize >= 0);
+
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest, originalSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize += (size_t)originalSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ }
+
+ return result;
+}
+
+
+/*
+Advanced decoding functions :
+*_usingDict() :
+ These decoding functions work the same as "_continue" ones,
+ the dictionary must be explicitly provided within parameters
+*/
+
+int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
+{
+ if (dictSize==0)
+ return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 KB - 1) {
+ return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
+}
+
+int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
+{
+ if (dictSize==0 || dictStart+dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+ assert(dictSize >= 0);
+ return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
+}
+
+
+/*=*************************************************
+* Obsolete Functions
+***************************************************/
+/* obsolete compression functions */
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
+}
+int LZ4_compress(const char* src, char* dest, int srcSize)
+{
+ return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
+}
+int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
+}
+int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
+}
+int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
+}
+int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
+}
+
+/*
+These decompression functions are deprecated and should no longer be used.
+They are only provided here for compatibility with older user programs.
+- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
+- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
+*/
+int LZ4_uncompress (const char* source, char* dest, int outputSize)
+{
+ return LZ4_decompress_fast(source, dest, outputSize);
+}
+int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
+{
+ return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
+}
+
+/* Obsolete Streaming functions */
+
+int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
+
+int LZ4_resetStreamState(void* state, char* inputBuffer)
+{
+ (void)inputBuffer;
+ LZ4_resetStream((LZ4_stream_t*)state);
+ return 0;
+}
+
+void* LZ4_create (char* inputBuffer)
+{
+ (void)inputBuffer;
+ return LZ4_createStream();
+}
+
+char* LZ4_slideInputBuffer (void* state)
+{
+ /* avoid const char * -> char * conversion warning */
+ return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
+}
+
+#endif /* LZ4_COMMONDEFS_ONLY */
+
+}
diff --git a/3rdparty/tracy/tracy/common/tracy_lz4.hpp b/3rdparty/tracy/tracy/common/tracy_lz4.hpp
new file mode 100644
index 0000000..1ccdcff
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/tracy_lz4.hpp
@@ -0,0 +1,777 @@
+/*
+ * LZ4 - Fast LZ compression algorithm
+ * Header File
+ * Copyright (C) 2011-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
+*/
+
+#ifndef TRACY_LZ4_H_2983827168210
+#define TRACY_LZ4_H_2983827168210
+
+/* --- Dependency --- */
+#include <stddef.h> /* size_t */
+#include <stdint.h>
+
+
+/**
+ Introduction
+
+ LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
+ scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
+ multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
+
+ The LZ4 compression library provides in-memory compression and decompression functions.
+ It gives full buffer control to user.
+ Compression can be done in:
+ - a single step (described as Simple Functions)
+ - a single step, reusing a context (described in Advanced Functions)
+ - unbounded multiple steps (described as Streaming compression)
+
+ lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
+ Decompressing such a compressed block requires additional metadata.
+ Exact metadata depends on exact decompression function.
+ For the typical case of LZ4_decompress_safe(),
+ metadata includes block's compressed size, and maximum bound of decompressed size.
+ Each application is free to encode and pass such metadata in whichever way it wants.
+
+ lz4.h only handle blocks, it can not generate Frames.
+
+ Blocks are different from Frames (doc/lz4_Frame_format.md).
+ Frames bundle both blocks and metadata in a specified manner.
+ Embedding metadata is required for compressed data to be self-contained and portable.
+ Frame format is delivered through a companion API, declared in lz4frame.h.
+ The `lz4` CLI can only manage frames.
+*/
+
+/*^***************************************************************
+* Export parameters
+*****************************************************************/
+/*
+* LZ4_DLL_EXPORT :
+* Enable exporting of functions when building a Windows DLL
+* LZ4LIB_VISIBILITY :
+* Control library symbols visibility.
+*/
+#ifndef LZ4LIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define LZ4LIB_VISIBILITY
+# endif
+#endif
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
+# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
+# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define LZ4LIB_API LZ4LIB_VISIBILITY
+#endif
+
+/*------ Version ------*/
+#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
+#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
+#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
+
+#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
+
+#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
+#define LZ4_QUOTE(str) #str
+#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
+#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION)
+
+namespace tracy
+{
+
+LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */
+LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version */
+
+
+/*-************************************
+* Tuning parameter
+**************************************/
+/*!
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio.
+ * Reduced memory usage may improve speed, thanks to better cache locality.
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+ */
+#ifndef LZ4_MEMORY_USAGE
+# define LZ4_MEMORY_USAGE 14
+#endif
+
+
+/*-************************************
+* Simple Functions
+**************************************/
+/*! LZ4_compress_default() :
+ * Compresses 'srcSize' bytes from buffer 'src'
+ * into already allocated 'dst' buffer of size 'dstCapacity'.
+ * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'src' into a more limited 'dst' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * In which case, 'dst' content is undefined (invalid).
+ * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
+ * dstCapacity : size of buffer 'dst' (which must be already allocated)
+ * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
+ * or 0 if compression fails
+ * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
+ */
+LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
+
+/*! LZ4_decompress_safe() :
+ * compressedSize : is the exact complete size of the compressed block.
+ * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
+ * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ * Note 1 : This function is protected against malicious data packets :
+ * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
+ * even if the compressed block is maliciously modified to order the decoder to do these actions.
+ * In such case, the decoder stops immediately, and considers the compressed block malformed.
+ * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
+ * The implementation is free to send / store / derive this information in whichever way is most beneficial.
+ * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
+ */
+LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
+
+
+/*-************************************
+* Advanced Functions
+**************************************/
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
+
+/*! LZ4_compressBound() :
+ Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
+ This function is primarily useful for memory allocation purposes (destination buffer size).
+ Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
+ Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
+ inputSize : max supported value is LZ4_MAX_INPUT_SIZE
+ return : maximum output size in a "worst case" scenario
+ or 0, if input size is incorrect (too large or negative)
+*/
+LZ4LIB_API int LZ4_compressBound(int inputSize);
+
+/*! LZ4_compress_fast() :
+ Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
+ The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
+ It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
+ An acceleration value of "1" is the same as regular LZ4_compress_default()
+ Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
+ Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
+*/
+LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+
+/*! LZ4_compress_fast_extState() :
+ * Same as LZ4_compress_fast(), using an externally allocated memory space for its state.
+ * Use LZ4_sizeofState() to know how much memory must be allocated,
+ * and allocate it on 8-bytes boundaries (using `malloc()` typically).
+ * Then, provide this buffer as `void* state` to compression function.
+ */
+LZ4LIB_API int LZ4_sizeofState(void);
+LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+
+/*! LZ4_compress_destSize() :
+ * Reverse the logic : compresses as much data as possible from 'src' buffer
+ * into already allocated buffer 'dst', of size >= 'targetDestSize'.
+ * This function either compresses the entire 'src' content into 'dst' if it's large enough,
+ * or fill 'dst' buffer completely with as much data as possible from 'src'.
+ * note: acceleration parameter is fixed to "default".
+ *
+ * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
+ * New value is necessarily <= input value.
+ * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
+ * or 0 if compression fails.
+ *
+ * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
+ * the produced compressed content could, in specific circumstances,
+ * require to be decompressed into a destination buffer larger
+ * by at least 1 byte than the content to decompress.
+ * If an application uses `LZ4_compress_destSize()`,
+ * it's highly recommended to update liblz4 to v1.9.2 or better.
+ * If this can't be done or ensured,
+ * the receiving decompression function should provide
+ * a dstCapacity which is > decompressedSize, by at least 1 byte.
+ * See https://github.com/lz4/lz4/issues/859 for details
+ */
+LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
+
+
+/*! LZ4_decompress_safe_partial() :
+ * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
+ * into destination buffer 'dst' of size 'dstCapacity'.
+ * Up to 'targetOutputSize' bytes will be decoded.
+ * The function stops decoding on reaching this objective.
+ * This can be useful to boost performance
+ * whenever only the beginning of a block is required.
+ *
+ * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
+ * If source stream is detected malformed, function returns a negative result.
+ *
+ * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
+ *
+ * Note 2 : targetOutputSize must be <= dstCapacity
+ *
+ * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
+ * so dstCapacity is kind of redundant.
+ * This is because in older versions of this function,
+ * decoding operation would still write complete sequences.
+ * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
+ * it could write more bytes, though only up to dstCapacity.
+ * Some "margin" used to be required for this operation to work properly.
+ * Thankfully, this is no longer necessary.
+ * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
+ *
+ * Note 4 : If srcSize is the exact size of the block,
+ * then targetOutputSize can be any value,
+ * including larger than the block's decompressed size.
+ * The function will, at most, generate block's decompressed size.
+ *
+ * Note 5 : If srcSize is _larger_ than block's compressed size,
+ * then targetOutputSize **MUST** be <= block's decompressed size.
+ * Otherwise, *silent corruption will occur*.
+ */
+LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
+
+
+/*-*********************************************
+* Streaming Compression Functions
+***********************************************/
+typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
+
+LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
+LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
+
+/*! LZ4_resetStream_fast() : v1.9.0+
+ * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
+ * (e.g., LZ4_compress_fast_continue()).
+ *
+ * An LZ4_stream_t must be initialized once before usage.
+ * This is automatically done when created by LZ4_createStream().
+ * However, should the LZ4_stream_t be simply declared on stack (for example),
+ * it's necessary to initialize it first, using LZ4_initStream().
+ *
+ * After init, start any new stream with LZ4_resetStream_fast().
+ * A same LZ4_stream_t can be re-used multiple times consecutively
+ * and compress multiple streams,
+ * provided that it starts each new stream with LZ4_resetStream_fast().
+ *
+ * LZ4_resetStream_fast() is much faster than LZ4_initStream(),
+ * but is not compatible with memory regions containing garbage data.
+ *
+ * Note: it's only useful to call LZ4_resetStream_fast()
+ * in the context of streaming compression.
+ * The *extState* functions perform their own resets.
+ * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
+ */
+LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
+
+/*! LZ4_loadDict() :
+ * Use this function to reference a static dictionary into LZ4_stream_t.
+ * The dictionary must remain available during compression.
+ * LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
+ * The same dictionary will have to be loaded on decompression side for successful decoding.
+ * Dictionary are useful for better compression of small data (KB range).
+ * While LZ4 accept any input as dictionary,
+ * results are generally better when using Zstandard's Dictionary Builder.
+ * Loading a size of 0 is allowed, and is the same as reset.
+ * @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
+ */
+LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
+
+/*! LZ4_compress_fast_continue() :
+ * Compress 'src' content using data from previously compressed blocks, for better compression ratio.
+ * 'dst' buffer must be already allocated.
+ * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
+ *
+ * @return : size of compressed block
+ * or 0 if there is an error (typically, cannot fit into 'dst').
+ *
+ * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block.
+ * Each block has precise boundaries.
+ * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata.
+ * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
+ *
+ * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory !
+ *
+ * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
+ * Make sure that buffers are separated, by at least one byte.
+ * This construction ensures that each block only depends on previous block.
+ *
+ * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
+ *
+ * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
+ */
+LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+/*! LZ4_saveDict() :
+ * If last 64KB data cannot be guaranteed to remain available at its current memory location,
+ * save it into a safer place (char* safeBuffer).
+ * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
+ * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
+ * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
+ */
+LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize);
+
+
+/*-**********************************************
+* Streaming Decompression Functions
+* Bufferless synchronous API
+************************************************/
+typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
+
+/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
+ * creation / destruction of streaming decompression tracking context.
+ * A tracking context can be re-used multiple times.
+ */
+LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
+LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
+
+/*! LZ4_setStreamDecode() :
+ * An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
+ * Use this function to start decompression of a new stream of blocks.
+ * A dictionary can optionally be set. Use NULL or size 0 for a reset order.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
+ * @return : 1 if OK, 0 if error
+ */
+LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
+
+/*! LZ4_decoderRingBufferSize() : v1.8.2+
+ * Note : in a ring buffer scenario (optional),
+ * blocks are presumed decompressed next to each other
+ * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize),
+ * at which stage it resumes from beginning of ring buffer.
+ * When setting such a ring buffer for streaming decompression,
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
+#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
+
+/*! LZ4_decompress_*_continue() :
+ * These decoding functions allow decompression of consecutive blocks in "streaming" mode.
+ * A block is an unsplittable entity, it must be presented entirely to a decompression function.
+ * Decompression functions only accepts one block at a time.
+ * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded.
+ * If less than 64KB of data has been decoded, all the data must be present.
+ *
+ * Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
+ * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize).
+ * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized.
+ * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize.
+ * - Synchronized mode :
+ * Decompression buffer size is _exactly_ the same as compression buffer size,
+ * and follows exactly same update rule (block boundaries at same positions),
+ * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream),
+ * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB).
+ * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized,
+ * and encoding ring buffer can have any size, including small ones ( < 64 KB).
+ *
+ * Whenever these conditions are not possible,
+ * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
+ * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
+*/
+LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity);
+
+
+/*! LZ4_decompress_*_usingDict() :
+ * These decoding functions work the same as
+ * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
+ * They are stand-alone, and don't need an LZ4_streamDecode_t structure.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
+ * Performance tip : Decompression speed can be substantially increased
+ * when dst == dictStart + dictSize.
+ */
+LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
+
+}
+
+#endif /* LZ4_H_2983827168210 */
+
+
+/*^*************************************
+ * !!!!!! STATIC LINKING ONLY !!!!!!
+ ***************************************/
+
+/*-****************************************************************************
+ * Experimental section
+ *
+ * Symbols declared in this section must be considered unstable. Their
+ * signatures or semantics may change, or they may be removed altogether in the
+ * future. They are therefore only safe to depend on when the caller is
+ * statically linked against the library.
+ *
+ * To protect against unsafe usage, not only are the declarations guarded,
+ * the definitions are hidden by default
+ * when building LZ4 as a shared/dynamic library.
+ *
+ * In order to access these declarations,
+ * define LZ4_STATIC_LINKING_ONLY in your application
+ * before including LZ4's headers.
+ *
+ * In order to make their implementations accessible dynamically, you must
+ * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
+ ******************************************************************************/
+
+#ifdef LZ4_STATIC_LINKING_ONLY
+
+#ifndef TRACY_LZ4_STATIC_3504398509
+#define TRACY_LZ4_STATIC_3504398509
+
+#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
+#define LZ4LIB_STATIC_API LZ4LIB_API
+#else
+#define LZ4LIB_STATIC_API
+#endif
+
+namespace tracy
+{
+
+/*! LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step.
+ * It is only safe to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized").
+ * From a high level, the difference is that
+ * this function initializes the provided state with a call to something like LZ4_resetStream_fast()
+ * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream().
+ */
+LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+/*! LZ4_attach_dictionary() :
+ * This is an experimental API that allows
+ * efficient use of a static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
+ * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDict() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionaryStream may be NULL,
+ * in which case any existing dictionary stream is unset.
+ *
+ * If a dictionary is provided, it replaces any pre-existing stream history.
+ * The dictionary contents are the only history that can be referenced and
+ * logically immediately precede the data compressed in the first subsequent
+ * compression call.
+ *
+ * The dictionary will only remain attached to the working stream through the
+ * first compression call, at the end of which it is cleared. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the completion of the first compression call on the stream.
+ */
+LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream);
+
+
+/*! In-place compression and decompression
+ *
+ * It's possible to have input and output sharing the same buffer,
+ * for highly contrained memory environments.
+ * In both cases, it requires input to lay at the end of the buffer,
+ * and decompression to start at beginning of the buffer.
+ * Buffer size must feature some margin, hence be larger than final size.
+ *
+ * |<------------------------buffer--------------------------------->|
+ * |<-----------compressed data--------->|
+ * |<-----------decompressed size------------------>|
+ * |<----margin---->|
+ *
+ * This technique is more useful for decompression,
+ * since decompressed size is typically larger,
+ * and margin is short.
+ *
+ * In-place decompression will work inside any buffer
+ * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
+ * This presumes that decompressedSize > compressedSize.
+ * Otherwise, it means compression actually expanded data,
+ * and it would be more efficient to store such data with a flag indicating it's not compressed.
+ * This can happen when data is not compressible (already compressed, or encrypted).
+ *
+ * For in-place compression, margin is larger, as it must be able to cope with both
+ * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
+ * and data expansion, which can happen when input is not compressible.
+ * As a consequence, buffer size requirements are much higher,
+ * and memory savings offered by in-place compression are more limited.
+ *
+ * There are ways to limit this cost for compression :
+ * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
+ * Note that it is a compile-time constant, so all compressions will apply this limit.
+ * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
+ * so it's a reasonable trick when inputs are known to be small.
+ * - Require the compressor to deliver a "maximum compressed size".
+ * This is the `dstCapacity` parameter in `LZ4_compress*()`.
+ * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
+ * in which case, the return code will be 0 (zero).
+ * The caller must be ready for these cases to happen,
+ * and typically design a backup scheme to send data uncompressed.
+ * The combination of both techniques can significantly reduce
+ * the amount of margin required for in-place compression.
+ *
+ * In-place compression can work in any buffer
+ * which size is >= (maxCompressedSize)
+ * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
+ * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
+ * so it's possible to reduce memory requirements by playing with them.
+ */
+
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
+#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
+#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
+
+}
+
+#endif /* LZ4_STATIC_3504398509 */
+#endif /* LZ4_STATIC_LINKING_ONLY */
+
+#ifndef TRACY_LZ4_H_98237428734687
+#define TRACY_LZ4_H_98237428734687
+
+namespace tracy
+{
+
+/*-************************************************************
+ * Private Definitions
+ **************************************************************
+ * Do not use these definitions directly.
+ * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
+ * Accessing members will expose user code to API and/or ABI break in future versions of the library.
+ **************************************************************/
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
+
+#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+ typedef int8_t LZ4_i8;
+ typedef uint8_t LZ4_byte;
+ typedef uint16_t LZ4_u16;
+ typedef uint32_t LZ4_u32;
+#else
+ typedef signed char LZ4_i8;
+ typedef unsigned char LZ4_byte;
+ typedef unsigned short LZ4_u16;
+ typedef unsigned int LZ4_u32;
+#endif
+
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
+struct LZ4_stream_t_internal {
+ LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
+ LZ4_u32 currentOffset;
+ LZ4_u32 tableType;
+ const LZ4_byte* dictionary;
+ const LZ4_stream_t_internal* dictCtx;
+ LZ4_u32 dictSize;
+};
+
+typedef struct {
+ const LZ4_byte* externalDict;
+ size_t extDictSize;
+ const LZ4_byte* prefixEnd;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+
+
+/*! LZ4_stream_t :
+ * Do not use below internal definitions directly !
+ * Declare or allocate an LZ4_stream_t instead.
+ * LZ4_stream_t can also be created using LZ4_createStream(), which is recommended.
+ * The structure definition can be convenient for static allocation
+ * (on stack, or as part of larger structure).
+ * Init this structure with LZ4_initStream() before first use.
+ * note : only use this definition in association with static linking !
+ * this definition is not API/ABI safe, and may change in future versions.
+ */
+#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
+#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*))
+union LZ4_stream_u {
+ void* table[LZ4_STREAMSIZE_VOIDP];
+ LZ4_stream_t_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_stream_t */
+
+
+/*! LZ4_initStream() : v1.9.0+
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is automatically done when invoking LZ4_createStream(),
+ * but it's not when the structure is simply declared on stack (for example).
+ *
+ * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t.
+ * It can also initialize any arbitrary buffer of sufficient size,
+ * and will @return a pointer of proper type upon initialization.
+ *
+ * Note : initialization fails if size and alignment conditions are not respected.
+ * In which case, the function will @return NULL.
+ * Note2: An LZ4_stream_t structure guarantees correct alignment and size.
+ * Note3: Before v1.9.0, use LZ4_resetStream() instead
+ */
+LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
+
+
+/*! LZ4_streamDecode_t :
+ * information structure to track an LZ4 stream during decompression.
+ * init this structure using LZ4_setStreamDecode() before first use.
+ * note : only use in association with static linking !
+ * this definition is not API/ABI safe,
+ * and may change in a future version !
+ */
+#define LZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ )
+#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
+union LZ4_streamDecode_u {
+ unsigned long long table[LZ4_STREAMDECODESIZE_U64];
+ LZ4_streamDecode_t_internal internal_donotuse;
+} ; /* previously typedef'd to LZ4_streamDecode_t */
+
+
+
+/*-************************************
+* Obsolete Functions
+**************************************/
+
+/*! Deprecation warnings
+ *
+ * Deprecated functions make the compiler generate a warning when invoked.
+ * This is meant to invite users to update their source code.
+ * Should deprecation warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc
+ * or _CRT_SECURE_NO_WARNINGS in Visual.
+ *
+ * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS
+ * before including the header file.
+ */
+#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
+# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
+#else
+# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+# define LZ4_DEPRECATED(message) [[deprecated(message)]]
+# elif defined(_MSC_VER)
+# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
+# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
+# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
+# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
+# define LZ4_DEPRECATED(message) __attribute__((deprecated))
+# else
+# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
+# define LZ4_DEPRECATED(message) /* disabled */
+# endif
+#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
+
+/*! Obsolete compression functions (since v1.7.3) */
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/*! Obsolete decompression functions (since v1.8.0) */
+LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
+LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
+
+/* Obsolete streaming functions (since v1.7.0)
+ * degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, they don't
+ * actually retain any history between compression calls. The compression ratio
+ * achieved will therefore be no better than compressing each chunk
+ * independently.
+ */
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void);
+LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
+
+/*! Obsolete streaming decoding functions (since v1.7.0) */
+LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
+LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
+
+/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
+ * These functions used to be faster than LZ4_decompress_safe(),
+ * but this is no longer the case. They are now slower.
+ * This is because LZ4_decompress_fast() doesn't know the input size,
+ * and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
+ * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
+ * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
+ *
+ * The last remaining LZ4_decompress_fast() specificity is that
+ * it can decompress a block without knowing its compressed size.
+ * Such functionality can be achieved in a more secure manner
+ * by employing LZ4_decompress_safe_partial().
+ *
+ * Parameters:
+ * originalSize : is the uncompressed size to regenerate.
+ * `dst` must be already allocated, its size must be >= 'originalSize' bytes.
+ * @return : number of bytes read from source buffer (== compressed size).
+ * The function expects to finish at block's end exactly.
+ * If the source stream is detected malformed, the function stops decoding and returns a negative result.
+ * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer.
+ * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds.
+ * Also, since match offsets are not validated, match reads from 'src' may underflow too.
+ * These issues never happen if input (compressed) data is correct.
+ * But they may happen if input data is invalid (error or intentional tampering).
+ * As a consequence, use these functions in trusted environments with trusted data **only**.
+ */
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
+LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
+LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
+
+/*! LZ4_resetStream() :
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is done with LZ4_initStream(), or LZ4_resetStream().
+ * Consider switching to LZ4_initStream(),
+ * invoking LZ4_resetStream() will trigger deprecation warnings in the future.
+ */
+LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
+
+}
+
+#endif /* LZ4_H_98237428734687 */
diff --git a/3rdparty/tracy/tracy/common/tracy_lz4hc.cpp b/3rdparty/tracy/tracy/common/tracy_lz4hc.cpp
new file mode 100644
index 0000000..9c899b8
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/tracy_lz4hc.cpp
@@ -0,0 +1,1620 @@
+/*
+ LZ4 HC - High Compression Mode of LZ4
+ Copyright (C) 2011-2017, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
+
+
+/* *************************************
+* Tuning Parameter
+***************************************/
+
+/*! HEAPMODE :
+ * Select how default compression function will allocate workplace memory,
+ * in stack (0:fastest), or in heap (1:requires malloc()).
+ * Since workplace is rather large, heap mode is recommended.
+ */
+#ifndef LZ4HC_HEAPMODE
+# define LZ4HC_HEAPMODE 1
+#endif
+
+
+/*=== Dependency ===*/
+#define LZ4_HC_STATIC_LINKING_ONLY
+#include "tracy_lz4hc.hpp"
+
+
+/*=== Common definitions ===*/
+#if defined(__GNUC__)
+# pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+#if defined (__clang__)
+# pragma clang diagnostic ignored "-Wunused-function"
+#endif
+
+#define LZ4_COMMONDEFS_ONLY
+#ifndef LZ4_SRC_INCLUDED
+#include "tracy_lz4.cpp" /* LZ4_count, constants, mem */
+#endif
+
+
+/*=== Enums ===*/
+typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
+
+
+/*=== Constants ===*/
+#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
+#define LZ4_OPT_NUM (1<<12)
+
+
+/*=== Macros ===*/
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
+#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
+#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */
+#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
+/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
+#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
+
+namespace tracy
+{
+
+static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
+
+
+/**************************************
+* HC Compression
+**************************************/
+static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
+{
+ MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
+ MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+}
+
+static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
+{
+ uptrval startingOffset = (uptrval)(hc4->end - hc4->base);
+ if (startingOffset > 1 GB) {
+ LZ4HC_clearTables(hc4);
+ startingOffset = 0;
+ }
+ startingOffset += 64 KB;
+ hc4->nextToUpdate = (U32) startingOffset;
+ hc4->base = start - startingOffset;
+ hc4->end = start;
+ hc4->dictBase = start - startingOffset;
+ hc4->dictLimit = (U32) startingOffset;
+ hc4->lowLimit = (U32) startingOffset;
+}
+
+
+/* Update chains up to ip (excluded) */
+LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
+{
+ U16* const chainTable = hc4->chainTable;
+ U32* const hashTable = hc4->hashTable;
+ const BYTE* const base = hc4->base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = hc4->nextToUpdate;
+
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(base+idx);
+ size_t delta = idx - hashTable[h];
+ if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
+ DELTANEXTU16(chainTable, idx) = (U16)delta;
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ hc4->nextToUpdate = target;
+}
+
+/** LZ4HC_countBack() :
+ * @return : negative value, nb of common bytes before ip/match */
+LZ4_FORCE_INLINE
+int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
+ const BYTE* const iMin, const BYTE* const mMin)
+{
+ int back = 0;
+ int const min = (int)MAX(iMin - ip, mMin - match);
+ assert(min <= 0);
+ assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
+ assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
+ while ( (back > min)
+ && (ip[back-1] == match[back-1]) )
+ back--;
+ return back;
+}
+
+#if defined(_MSC_VER)
+# define LZ4HC_rotl32(x,r) _rotl(x,r)
+#else
+# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#endif
+
+
+static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
+{
+ size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
+ if (bitsToRotate == 0) return pattern;
+ return LZ4HC_rotl32(pattern, (int)bitsToRotate);
+}
+
+/* LZ4HC_countPattern() :
+ * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
+static unsigned
+LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
+{
+ const BYTE* const iStart = ip;
+ reg_t const pattern = (sizeof(pattern)==8) ?
+ (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
+
+ while (likely(ip < iEnd-(sizeof(pattern)-1))) {
+ reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
+ if (!diff) { ip+=sizeof(pattern); continue; }
+ ip += LZ4_NbCommonBytes(diff);
+ return (unsigned)(ip - iStart);
+ }
+
+ if (LZ4_isLittleEndian()) {
+ reg_t patternByte = pattern;
+ while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
+ ip++; patternByte >>= 8;
+ }
+ } else { /* big endian */
+ U32 bitOffset = (sizeof(pattern)*8) - 8;
+ while (ip < iEnd) {
+ BYTE const byte = (BYTE)(pattern >> bitOffset);
+ if (*ip != byte) break;
+ ip ++; bitOffset -= 8;
+ }
+ }
+
+ return (unsigned)(ip - iStart);
+}
+
+/* LZ4HC_reverseCountPattern() :
+ * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
+ * read using natural platform endianess */
+static unsigned
+LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
+{
+ const BYTE* const iStart = ip;
+
+ while (likely(ip >= iLow+4)) {
+ if (LZ4_read32(ip-4) != pattern) break;
+ ip -= 4;
+ }
+ { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */
+ while (likely(ip>iLow)) {
+ if (ip[-1] != *bytePtr) break;
+ ip--; bytePtr--;
+ } }
+ return (unsigned)(iStart - ip);
+}
+
+/* LZ4HC_protectDictEnd() :
+ * Checks if the match is in the last 3 bytes of the dictionary, so reading the
+ * 4 byte MINMATCH would overflow.
+ * @returns true if the match index is okay.
+ */
+static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
+{
+ return ((U32)((dictLimit - 1) - matchIndex) >= 3);
+}
+
+typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
+typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
+
+LZ4_FORCE_INLINE int
+LZ4HC_InsertAndGetWiderMatch (
+ LZ4HC_CCtx_internal* hc4,
+ const BYTE* const ip,
+ const BYTE* const iLowLimit,
+ const BYTE* const iHighLimit,
+ int longest,
+ const BYTE** matchpos,
+ const BYTE** startpos,
+ const int maxNbAttempts,
+ const int patternAnalysis,
+ const int chainSwap,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ U16* const chainTable = hc4->chainTable;
+ U32* const HashTable = hc4->hashTable;
+ const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
+ const BYTE* const base = hc4->base;
+ const U32 dictLimit = hc4->dictLimit;
+ const BYTE* const lowPrefixPtr = base + dictLimit;
+ const U32 ipIndex = (U32)(ip - base);
+ const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
+ const BYTE* const dictBase = hc4->dictBase;
+ int const lookBackLength = (int)(ip-iLowLimit);
+ int nbAttempts = maxNbAttempts;
+ U32 matchChainPos = 0;
+ U32 const pattern = LZ4_read32(ip);
+ U32 matchIndex;
+ repeat_state_e repeat = rep_untested;
+ size_t srcPatternLength = 0;
+
+ DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
+ /* First Match */
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+ DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
+ matchIndex, lowestMatchIndex);
+
+ while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
+ int matchLength=0;
+ nbAttempts--;
+ assert(matchIndex < ipIndex);
+ if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
+ /* do nothing */
+ } else if (matchIndex >= dictLimit) { /* within current Prefix */
+ const BYTE* const matchPtr = base + matchIndex;
+ assert(matchPtr >= lowPrefixPtr);
+ assert(matchPtr < ip);
+ assert(longest >= 1);
+ if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
+ if (LZ4_read32(matchPtr) == pattern) {
+ int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr) : 0;
+ matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
+ matchLength -= back;
+ if (matchLength > longest) {
+ longest = matchLength;
+ *matchpos = matchPtr + back;
+ *startpos = ip + back;
+ } } }
+ } else { /* lowestMatchIndex <= matchIndex < dictLimit */
+ const BYTE* const matchPtr = dictBase + matchIndex;
+ if (LZ4_read32(matchPtr) == pattern) {
+ const BYTE* const dictStart = dictBase + hc4->lowLimit;
+ int back = 0;
+ const BYTE* vLimit = ip + (dictLimit - matchIndex);
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
+ matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
+ if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
+ matchLength += LZ4_count(ip+matchLength, lowPrefixPtr, iHighLimit);
+ back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
+ matchLength -= back;
+ if (matchLength > longest) {
+ longest = matchLength;
+ *matchpos = base + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
+ *startpos = ip + back;
+ } } }
+
+ if (chainSwap && matchLength==longest) { /* better match => select a better chain */
+ assert(lookBackLength==0); /* search forward only */
+ if (matchIndex + (U32)longest <= ipIndex) {
+ int const kTrigger = 4;
+ U32 distanceToNextMatch = 1;
+ int const end = longest - MINMATCH + 1;
+ int step = 1;
+ int accel = 1 << kTrigger;
+ int pos;
+ for (pos = 0; pos < end; pos += step) {
+ U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
+ step = (accel++ >> kTrigger);
+ if (candidateDist > distanceToNextMatch) {
+ distanceToNextMatch = candidateDist;
+ matchChainPos = (U32)pos;
+ accel = 1 << kTrigger;
+ }
+ }
+ if (distanceToNextMatch > 1) {
+ if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
+ matchIndex -= distanceToNextMatch;
+ continue;
+ } } }
+
+ { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
+ if (patternAnalysis && distNextMatch==1 && matchChainPos==0) {
+ U32 const matchCandidateIdx = matchIndex-1;
+ /* may be a repeated pattern */
+ if (repeat == rep_untested) {
+ if ( ((pattern & 0xFFFF) == (pattern >> 16))
+ & ((pattern & 0xFF) == (pattern >> 24)) ) {
+ repeat = rep_confirmed;
+ srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
+ } else {
+ repeat = rep_not;
+ } }
+ if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
+ && LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
+ const int extDict = matchCandidateIdx < dictLimit;
+ const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
+ if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
+ const BYTE* const dictStart = dictBase + hc4->lowLimit;
+ const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
+ size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
+ if (extDict && matchPtr + forwardPatternLength == iLimit) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
+ forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
+ }
+ { const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
+ size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
+ size_t currentSegmentLength;
+ if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
+ backLength += LZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
+ }
+ /* Limit backLength not go further than lowestMatchIndex */
+ backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
+ assert(matchCandidateIdx - backLength >= lowestMatchIndex);
+ currentSegmentLength = backLength + forwardPatternLength;
+ /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
+ if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
+ && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
+ U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
+ if (LZ4HC_protectDictEnd(dictLimit, newMatchIndex))
+ matchIndex = newMatchIndex;
+ else {
+ /* Can only happen if started in the prefix */
+ assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
+ matchIndex = dictLimit;
+ }
+ } else {
+ U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
+ if (!LZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
+ assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
+ matchIndex = dictLimit;
+ } else {
+ matchIndex = newMatchIndex;
+ if (lookBackLength==0) { /* no back possible */
+ size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
+ if ((size_t)longest < maxML) {
+ assert(base + matchIndex != ip);
+ if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
+ assert(maxML < 2 GB);
+ longest = (int)maxML;
+ *matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
+ *startpos = ip;
+ }
+ { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
+ if (distToNextPattern > matchIndex) break; /* avoid overflow */
+ matchIndex -= distToNextPattern;
+ } } } } }
+ continue;
+ } }
+ } } /* PA optimization */
+
+ /* follow current chain */
+ matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos);
+
+ } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
+
+ if ( dict == usingDictCtxHc
+ && nbAttempts > 0
+ && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
+ size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
+ U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
+ assert(dictEndOffset <= 1 GB);
+ matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
+ while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
+ const BYTE* const matchPtr = dictCtx->base + dictMatchIndex;
+
+ if (LZ4_read32(matchPtr) == pattern) {
+ int mlt;
+ int back = 0;
+ const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
+ mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
+ back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
+ mlt -= back;
+ if (mlt > longest) {
+ longest = mlt;
+ *matchpos = base + matchIndex + back;
+ *startpos = ip + back;
+ } }
+
+ { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
+ dictMatchIndex -= nextOffset;
+ matchIndex -= nextOffset;
+ } } }
+
+ return longest;
+}
+
+LZ4_FORCE_INLINE
+int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
+ const BYTE* const ip, const BYTE* const iLimit,
+ const BYTE** matchpos,
+ const int maxNbAttempts,
+ const int patternAnalysis,
+ const dictCtx_directive dict)
+{
+ const BYTE* uselessPtr = ip;
+ /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
+ * but this won't be the case here, as we define iLowLimit==ip,
+ * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
+}
+
+/* LZ4HC_encodeSequence() :
+ * @return : 0 if ok,
+ * 1 if buffer issue detected */
+LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
+ const BYTE** _ip,
+ BYTE** _op,
+ const BYTE** _anchor,
+ int matchLength,
+ const BYTE* const match,
+ limitedOutput_directive limit,
+ BYTE* oend)
+{
+#define ip (*_ip)
+#define op (*_op)
+#define anchor (*_anchor)
+
+ size_t length;
+ BYTE* const token = op++;
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
+ static const BYTE* start = NULL;
+ static U32 totalCost = 0;
+ U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
+ U32 const ll = (U32)(ip - anchor);
+ U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
+ U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
+ U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
+ if (start==NULL) start = anchor; /* only works for single segment */
+ /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
+ DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
+ pos,
+ (U32)(ip - anchor), matchLength, (U32)(ip-match),
+ cost, totalCost);
+ totalCost += cost;
+#endif
+
+ /* Encode Literal length */
+ length = (size_t)(ip - anchor);
+ LZ4_STATIC_ASSERT(notLimited == 0);
+ /* Check output limit */
+ if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
+ (int)length, (int)(oend - op));
+ return 1;
+ }
+ if (length >= RUN_MASK) {
+ size_t len = length - RUN_MASK;
+ *token = (RUN_MASK << ML_BITS);
+ for(; len >= 255 ; len -= 255) *op++ = 255;
+ *op++ = (BYTE)len;
+ } else {
+ *token = (BYTE)(length << ML_BITS);
+ }
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op + length);
+ op += length;
+
+ /* Encode Offset */
+ assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
+
+ /* Encode MatchLength */
+ assert(matchLength >= MINMATCH);
+ length = (size_t)matchLength - MINMATCH;
+ if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
+ DEBUGLOG(6, "Not enough room to write match length");
+ return 1; /* Check output limit */
+ }
+ if (length >= ML_MASK) {
+ *token += ML_MASK;
+ length -= ML_MASK;
+ for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
+ if (length >= 255) { length -= 255; *op++ = 255; }
+ *op++ = (BYTE)length;
+ } else {
+ *token += (BYTE)(length);
+ }
+
+ /* Prepare next loop */
+ ip += matchLength;
+ anchor = ip;
+
+ return 0;
+}
+#undef ip
+#undef op
+#undef anchor
+
+LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const source,
+ char* const dest,
+ int* srcSizePtr,
+ int const maxOutputSize,
+ int maxNbAttempts,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
+ )
+{
+ const int inputSize = *srcSizePtr;
+ const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
+
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = (iend - LASTLITERALS);
+
+ BYTE* optr = (BYTE*) dest;
+ BYTE* op = (BYTE*) dest;
+ BYTE* oend = op + maxOutputSize;
+
+ int ml0, ml, ml2, ml3;
+ const BYTE* start0;
+ const BYTE* ref0;
+ const BYTE* ref = NULL;
+ const BYTE* start2 = NULL;
+ const BYTE* ref2 = NULL;
+ const BYTE* start3 = NULL;
+ const BYTE* ref3 = NULL;
+
+ /* init */
+ *srcSizePtr = 0;
+ if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* Main Loop */
+ while (ip <= mflimit) {
+ ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict);
+ if (ml<MINMATCH) { ip++; continue; }
+
+ /* saved, in case we would skip too much */
+ start0 = ip; ref0 = ref; ml0 = ml;
+
+_Search2:
+ if (ip+ml <= mflimit) {
+ ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2,
+ maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
+ } else {
+ ml2 = ml;
+ }
+
+ if (ml2 == ml) { /* No better match => encode ML1 */
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ continue;
+ }
+
+ if (start0 < ip) { /* first match was skipped at least once */
+ if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */
+ ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */
+ } }
+
+ /* Here, start0==ip */
+ if ((start2 - ip) < 3) { /* First Match too small : removed */
+ ml = ml2;
+ ip = start2;
+ ref =ref2;
+ goto _Search2;
+ }
+
+_Search3:
+ /* At this stage, we have :
+ * ml2 > ml1, and
+ * ip1+3 <= ip2 (usually < ip1+ml1) */
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ int new_ml = ml;
+ if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
+ if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = new_ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ }
+ /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
+
+ if (start2 + ml2 <= mflimit) {
+ ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3,
+ maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
+ } else {
+ ml3 = ml2;
+ }
+
+ if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */
+ /* ip & ref are known; Now for ml */
+ if (start2 < ip+ml) ml = (int)(start2 - ip);
+ /* Now, encode 2 sequences */
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ ip = start2;
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
+ ml = ml2;
+ ref = ref2;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ if (start3 < ip+ml+3) { /* Not enough space for match 2 : remove it */
+ if (start3 >= (ip+ml)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
+ if (start2 < ip+ml) {
+ int correction = (int)(ip+ml - start2);
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ if (ml2 < MINMATCH) {
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ }
+ }
+
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ ip = start3;
+ ref = ref3;
+ ml = ml3;
+
+ start0 = start2;
+ ref0 = ref2;
+ ml0 = ml2;
+ goto _Search2;
+ }
+
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ goto _Search3;
+ }
+
+ /*
+ * OK, now we have 3 ascending matches;
+ * let's write the first one ML1.
+ * ip & ref are known; Now decide ml.
+ */
+ if (start2 < ip+ml) {
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
+ if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ } else {
+ ml = (int)(start2 - ip);
+ }
+ }
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+
+ /* ML2 becomes ML1 */
+ ip = start2; ref = ref2; ml = ml2;
+
+ /* ML3 becomes ML2 */
+ start2 = start3; ref2 = ref3; ml2 = ml3;
+
+ /* let's find a new ML3 */
+ goto _Search3;
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput) return 0;
+ /* adapt lastRunSize to fill 'dest' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int) (((const char*)ip) - source);
+ return (int) (((char*)op)-dest);
+
+_dest_overflow:
+ if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ml and ref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing");
+ op = optr; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ml >= 0);
+ if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
+ } }
+ goto _last_literals;
+ }
+ /* compression failed */
+ return 0;
+}
+
+
+static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
+ const char* const source, char* dst,
+ int* srcSizePtr, int dstCapacity,
+ int const nbSearches, size_t sufficient_len,
+ const limitedOutput_directive limit, int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed);
+
+
+LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
+ )
+{
+ typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
+ typedef struct {
+ lz4hc_strat_e strat;
+ int nbSearches;
+ U32 targetLength;
+ } cParams_t;
+ static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
+ { lz4hc, 2, 16 }, /* 0, unused */
+ { lz4hc, 2, 16 }, /* 1, unused */
+ { lz4hc, 2, 16 }, /* 2, unused */
+ { lz4hc, 4, 16 }, /* 3 */
+ { lz4hc, 8, 16 }, /* 4 */
+ { lz4hc, 16, 16 }, /* 5 */
+ { lz4hc, 32, 16 }, /* 6 */
+ { lz4hc, 64, 16 }, /* 7 */
+ { lz4hc, 128, 16 }, /* 8 */
+ { lz4hc, 256, 16 }, /* 9 */
+ { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
+ { lz4opt, 512,128 }, /*11 */
+ { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
+ };
+
+ DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ ctx, src, *srcSizePtr, limit);
+
+ if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
+
+ ctx->end += *srcSizePtr;
+ if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */
+ cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
+ { cParams_t const cParam = clTable[cLevel];
+ HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
+ int result;
+
+ if (cParam.strat == lz4hc) {
+ result = LZ4HC_compress_hashChain(ctx,
+ src, dst, srcSizePtr, dstCapacity,
+ cParam.nbSearches, limit, dict);
+ } else {
+ assert(cParam.strat == lz4opt);
+ result = LZ4HC_compress_optimal(ctx,
+ src, dst, srcSizePtr, dstCapacity,
+ cParam.nbSearches, cParam.targetLength, limit,
+ cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
+ dict, favor);
+ }
+ if (result <= 0) ctx->dirty = 1;
+ return result;
+ }
+}
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
+
+static int
+LZ4HC_compress_generic_noDictCtx (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ assert(ctx->dictCtx == NULL);
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
+}
+
+static int
+LZ4HC_compress_generic_dictCtx (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
+ assert(ctx->dictCtx != NULL);
+ if (position >= 64 KB) {
+ ctx->dictCtx = NULL;
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else if (position == 0 && *srcSizePtr > 4 KB) {
+ memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
+ LZ4HC_setExternalDict(ctx, (const BYTE *)src);
+ ctx->compressionLevel = (short)cLevel;
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc);
+ }
+}
+
+static int
+LZ4HC_compress_generic (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ if (ctx->dictCtx == NULL) {
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ }
+}
+
+
+int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
+
+static size_t LZ4_streamHC_t_alignment(void)
+{
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_streamHC_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_streamHC_t);
+#else
+ return 1; /* effectively disabled */
+#endif
+}
+
+/* state is presumed correctly initialized,
+ * in which case its size and alignment have already been validate */
+int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+ LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
+ if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
+ LZ4HC_init_internal (ctx, (const BYTE*)src);
+ if (dstCapacity < LZ4_compressBound(srcSize))
+ return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
+ else
+ return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
+}
+
+int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+ LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
+ if (ctx==NULL) return 0; /* init failure */
+ return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
+}
+
+int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
+#else
+ LZ4_streamHC_t state;
+ LZ4_streamHC_t* const statePtr = &state;
+#endif
+ int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ FREEMEM(statePtr);
+#endif
+ return cSize;
+}
+
+/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
+int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
+{
+ LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
+ if (ctx==NULL) return 0; /* init failure */
+ LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source);
+ LZ4_setCompressionLevel(ctx, cLevel);
+ return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput);
+}
+
+
+
+/**************************************
+* Streaming Functions
+**************************************/
+/* allocation */
+LZ4_streamHC_t* LZ4_createStreamHC(void)
+{
+ LZ4_streamHC_t* const state =
+ (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
+ if (state == NULL) return NULL;
+ LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
+ return state;
+}
+
+int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
+{
+ DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
+ if (!LZ4_streamHCPtr) return 0; /* support free on NULL */
+ FREEMEM(LZ4_streamHCPtr);
+ return 0;
+}
+
+
+LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
+{
+ LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
+ /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
+ LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
+ DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
+ /* check conditions */
+ if (buffer == NULL) return NULL;
+ if (size < sizeof(LZ4_streamHC_t)) return NULL;
+ if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
+ /* init */
+ { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
+ MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
+ return LZ4_streamHCPtr;
+}
+
+/* just a stub */
+void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ if (LZ4_streamHCPtr->internal_donotuse.dirty) {
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ } else {
+ /* preserve end - base : can trigger clearTable's threshold */
+ LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.base;
+ LZ4_streamHCPtr->internal_donotuse.base = NULL;
+ LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
+ }
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
+ if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
+}
+
+void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
+{
+ LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
+}
+
+/* LZ4_loadDictHC() :
+ * LZ4_streamHCPtr is presumed properly initialized */
+int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* dictionary, int dictSize)
+{
+ LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
+ assert(LZ4_streamHCPtr != NULL);
+ if (dictSize > 64 KB) {
+ dictionary += (size_t)dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ /* need a full initialization, there are bad side-effects when using resetFast() */
+ { int const cLevel = ctxPtr->compressionLevel;
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
+ }
+ LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
+ ctxPtr->end = (const BYTE*)dictionary + dictSize;
+ if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
+ return dictSize;
+}
+
+void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
+ working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
+}
+
+/* compression */
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
+{
+ DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
+ if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
+ LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
+
+ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
+ ctxPtr->dictBase = ctxPtr->base;
+ ctxPtr->base = newBlock - ctxPtr->dictLimit;
+ ctxPtr->end = newBlock;
+ ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
+
+ /* cannot reference an extDict and a dictCtx at the same time */
+ ctxPtr->dictCtx = NULL;
+}
+
+static int
+LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* src, char* dst,
+ int* srcSizePtr, int dstCapacity,
+ limitedOutput_directive limit)
+{
+ LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ LZ4_streamHCPtr, src, *srcSizePtr, limit);
+ assert(ctxPtr != NULL);
+ /* auto-init if forgotten */
+ if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
+
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
+ if (dictSize > 64 KB) dictSize = 64 KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
+ }
+
+ /* Check if blocks follow each other */
+ if ((const BYTE*)src != ctxPtr->end)
+ LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
+
+ /* Check overlapping input/dictionary space */
+ { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
+ const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
+ const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
+ if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
+ if (sourceEnd > dictEnd) sourceEnd = dictEnd;
+ ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
+ } }
+
+ return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
+}
+
+int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
+{
+ if (dstCapacity < LZ4_compressBound(srcSize))
+ return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
+ else
+ return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
+}
+
+int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
+{
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
+}
+
+
+
+/* LZ4_saveDictHC :
+ * save history content
+ * into a user-provided buffer
+ * which is then used to continue compression
+ */
+int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
+{
+ LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
+ DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
+ assert(prefixSize >= 0);
+ if (dictSize > 64 KB) dictSize = 64 KB;
+ if (dictSize < 4) dictSize = 0;
+ if (dictSize > prefixSize) dictSize = prefixSize;
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+ { U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
+ streamPtr->end = (const BYTE*)safeBuffer + dictSize;
+ streamPtr->base = streamPtr->end - endIndex;
+ streamPtr->dictLimit = endIndex - (U32)dictSize;
+ streamPtr->lowLimit = endIndex - (U32)dictSize;
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
+}
+
+
+/***************************************************
+* Deprecated Functions
+***************************************************/
+
+/* These functions currently generate deprecation warnings */
+
+/* Wrappers for deprecated compression functions */
+int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
+int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
+
+
+/* Deprecated streaming functions */
+int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
+
+/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
+ * @return : 0 on success, !=0 if error */
+int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
+{
+ LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
+ if (hc4 == NULL) return 1; /* init failed */
+ LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
+ return 0;
+}
+
+void* LZ4_createHC (const char* inputBuffer)
+{
+ LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
+ if (hc4 == NULL) return NULL; /* not enough memory */
+ LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
+ return hc4;
+}
+
+int LZ4_freeHC (void* LZ4HC_Data)
+{
+ if (!LZ4HC_Data) return 0; /* support free on NULL */
+ FREEMEM(LZ4HC_Data);
+ return 0;
+}
+
+int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
+{
+ return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
+}
+
+int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
+{
+ return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
+}
+
+char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
+{
+ LZ4_streamHC_t *ctx = (LZ4_streamHC_t*)LZ4HC_Data;
+ const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
+ LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
+ /* avoid const char * -> char * conversion warning :( */
+ return (char *)(uptrval)bufferStart;
+}
+
+
+/* ================================================
+ * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
+ * ===============================================*/
+typedef struct {
+ int price;
+ int off;
+ int mlen;
+ int litlen;
+} LZ4HC_optimal_t;
+
+/* price in bytes */
+LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
+{
+ int price = litlen;
+ assert(litlen >= 0);
+ if (litlen >= (int)RUN_MASK)
+ price += 1 + ((litlen-(int)RUN_MASK) / 255);
+ return price;
+}
+
+
+/* requires mlen >= MINMATCH */
+LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
+{
+ int price = 1 + 2 ; /* token + 16-bit offset */
+ assert(litlen >= 0);
+ assert(mlen >= MINMATCH);
+
+ price += LZ4HC_literalsPrice(litlen);
+
+ if (mlen >= (int)(ML_MASK+MINMATCH))
+ price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255);
+
+ return price;
+}
+
+
+typedef struct {
+ int off;
+ int len;
+} LZ4HC_match_t;
+
+LZ4_FORCE_INLINE LZ4HC_match_t
+LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
+ const BYTE* ip, const BYTE* const iHighLimit,
+ int minLen, int nbSearches,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ LZ4HC_match_t match = { 0 , 0 };
+ const BYTE* matchPtr = NULL;
+ /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
+ * but this won't be the case here, as we define iLowLimit==ip,
+ * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
+ if (matchLength <= minLen) return match;
+ if (favorDecSpeed) {
+ if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */
+ }
+ match.len = matchLength;
+ match.off = (int)(ip-matchPtr);
+ return match;
+}
+
+
+static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
+ const char* const source,
+ char* dst,
+ int* srcSizePtr,
+ int dstCapacity,
+ int const nbSearches,
+ size_t sufficient_len,
+ const limitedOutput_directive limit,
+ int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ int retval = 0;
+#define TRAILING_LITERALS 3
+#ifdef LZ4HC_HEAPMODE
+ LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
+#else
+ LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
+#endif
+
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + *srcSizePtr;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+ BYTE* op = (BYTE*) dst;
+ BYTE* opSaved = (BYTE*) dst;
+ BYTE* oend = op + dstCapacity;
+ int ovml = MINMATCH; /* overflow - last sequence */
+ const BYTE* ovref = NULL;
+
+ /* init */
+#ifdef LZ4HC_HEAPMODE
+ if (opt == NULL) goto _return_label;
+#endif
+ DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
+ *srcSizePtr = 0;
+ if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
+
+ /* Main Loop */
+ while (ip <= mflimit) {
+ int const llen = (int)(ip - anchor);
+ int best_mlen, best_off;
+ int cur, last_match_pos = 0;
+
+ LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
+ if (firstMatch.len==0) { ip++; continue; }
+
+ if ((size_t)firstMatch.len > sufficient_len) {
+ /* good enough solution : immediate encoding */
+ int const firstML = firstMatch.len;
+ const BYTE* const matchPos = ip - firstMatch.off;
+ opSaved = op;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = firstML;
+ ovref = matchPos;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ /* set prices for first positions (literals) */
+ { int rPos;
+ for (rPos = 0 ; rPos < MINMATCH ; rPos++) {
+ int const cost = LZ4HC_literalsPrice(llen + rPos);
+ opt[rPos].mlen = 1;
+ opt[rPos].off = 0;
+ opt[rPos].litlen = llen + rPos;
+ opt[rPos].price = cost;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
+ rPos, cost, opt[rPos].litlen);
+ } }
+ /* set prices using initial match */
+ { int mlen = MINMATCH;
+ int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
+ int const offset = firstMatch.off;
+ assert(matchML < LZ4_OPT_NUM);
+ for ( ; mlen <= matchML ; mlen++) {
+ int const cost = LZ4HC_sequencePrice(llen, mlen);
+ opt[mlen].mlen = mlen;
+ opt[mlen].off = offset;
+ opt[mlen].litlen = llen;
+ opt[mlen].price = cost;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
+ mlen, cost, mlen);
+ } }
+ last_match_pos = firstMatch.len;
+ { int addLit;
+ for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
+ opt[last_match_pos+addLit].mlen = 1; /* literal */
+ opt[last_match_pos+addLit].off = 0;
+ opt[last_match_pos+addLit].litlen = addLit;
+ opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
+ last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
+ } }
+
+ /* check further positions */
+ for (cur = 1; cur < last_match_pos; cur++) {
+ const BYTE* const curPtr = ip + cur;
+ LZ4HC_match_t newMatch;
+
+ if (curPtr > mflimit) break;
+ DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u",
+ cur, opt[cur].price, opt[cur+1].price, cur+1);
+ if (fullUpdate) {
+ /* not useful to search here if next position has same (or lower) cost */
+ if ( (opt[cur+1].price <= opt[cur].price)
+ /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
+ && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) )
+ continue;
+ } else {
+ /* not useful to search here if next position has same (or lower) cost */
+ if (opt[cur+1].price <= opt[cur].price) continue;
+ }
+
+ DEBUGLOG(7, "search at rPos:%u", cur);
+ if (fullUpdate)
+ newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
+ else
+ /* only test matches of minimum length; slightly faster, but misses a few bytes */
+ newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed);
+ if (!newMatch.len) continue;
+
+ if ( ((size_t)newMatch.len > sufficient_len)
+ || (newMatch.len + cur >= LZ4_OPT_NUM) ) {
+ /* immediate encoding */
+ best_mlen = newMatch.len;
+ best_off = newMatch.off;
+ last_match_pos = cur + 1;
+ goto encode;
+ }
+
+ /* before match : set price with literals at beginning */
+ { int const baseLitlen = opt[cur].litlen;
+ int litlen;
+ for (litlen = 1; litlen < MINMATCH; litlen++) {
+ int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen);
+ int const pos = cur + litlen;
+ if (price < opt[pos].price) {
+ opt[pos].mlen = 1; /* literal */
+ opt[pos].off = 0;
+ opt[pos].litlen = baseLitlen+litlen;
+ opt[pos].price = price;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)",
+ pos, price, opt[pos].litlen);
+ } } }
+
+ /* set prices using match at position = cur */
+ { int const matchML = newMatch.len;
+ int ml = MINMATCH;
+
+ assert(cur + newMatch.len < LZ4_OPT_NUM);
+ for ( ; ml <= matchML ; ml++) {
+ int const pos = cur + ml;
+ int const offset = newMatch.off;
+ int price;
+ int ll;
+ DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)",
+ pos, last_match_pos);
+ if (opt[cur].mlen == 1) {
+ ll = opt[cur].litlen;
+ price = ((cur > ll) ? opt[cur - ll].price : 0)
+ + LZ4HC_sequencePrice(ll, ml);
+ } else {
+ ll = 0;
+ price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
+ }
+
+ assert((U32)favorDecSpeed <= 1);
+ if (pos > last_match_pos+TRAILING_LITERALS
+ || price <= opt[pos].price - (int)favorDecSpeed) {
+ DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)",
+ pos, price, ml);
+ assert(pos < LZ4_OPT_NUM);
+ if ( (ml == matchML) /* last pos of last match */
+ && (last_match_pos < pos) )
+ last_match_pos = pos;
+ opt[pos].mlen = ml;
+ opt[pos].off = offset;
+ opt[pos].litlen = ll;
+ opt[pos].price = price;
+ } } }
+ /* complete following positions with literals */
+ { int addLit;
+ for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
+ opt[last_match_pos+addLit].mlen = 1; /* literal */
+ opt[last_match_pos+addLit].off = 0;
+ opt[last_match_pos+addLit].litlen = addLit;
+ opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
+ } }
+ } /* for (cur = 1; cur <= last_match_pos; cur++) */
+
+ assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS);
+ best_mlen = opt[last_match_pos].mlen;
+ best_off = opt[last_match_pos].off;
+ cur = last_match_pos - best_mlen;
+
+encode: /* cur, last_match_pos, best_mlen, best_off must be set */
+ assert(cur < LZ4_OPT_NUM);
+ assert(last_match_pos >= 1); /* == 1 when only one candidate */
+ DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
+ { int candidate_pos = cur;
+ int selected_matchLength = best_mlen;
+ int selected_offset = best_off;
+ while (1) { /* from end to beginning */
+ int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */
+ int const next_offset = opt[candidate_pos].off;
+ DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength);
+ opt[candidate_pos].mlen = selected_matchLength;
+ opt[candidate_pos].off = selected_offset;
+ selected_matchLength = next_matchLength;
+ selected_offset = next_offset;
+ if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */
+ assert(next_matchLength > 0); /* can be 1, means literal */
+ candidate_pos -= next_matchLength;
+ } }
+
+ /* encode all recorded sequences in order */
+ { int rPos = 0; /* relative position (to ip) */
+ while (rPos < last_match_pos) {
+ int const ml = opt[rPos].mlen;
+ int const offset = opt[rPos].off;
+ if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */
+ rPos += ml;
+ assert(ml >= MINMATCH);
+ assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
+ opSaved = op;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = ml;
+ ovref = ip - offset;
+ goto _dest_overflow;
+ } } }
+ } /* while (ip <= mflimit) */
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput) { /* Check output limit */
+ retval = 0;
+ goto _return_label;
+ }
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int) (((const char*)ip) - source);
+ retval = (int) ((char*)op-dst);
+ goto _return_label;
+
+_dest_overflow:
+if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ovml and ovref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
+ op = opSaved; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ovml >= 0);
+ if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
+ DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
+ DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
+ DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
+ } }
+ goto _last_literals;
+}
+_return_label:
+#ifdef LZ4HC_HEAPMODE
+ FREEMEM(opt);
+#endif
+ return retval;
+}
+
+}
diff --git a/3rdparty/tracy/tracy/common/tracy_lz4hc.hpp b/3rdparty/tracy/tracy/common/tracy_lz4hc.hpp
new file mode 100644
index 0000000..18bf30d
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/tracy_lz4hc.hpp
@@ -0,0 +1,405 @@
+/*
+ LZ4 HC - High Compression Mode of LZ4
+ Header File
+ Copyright (C) 2011-2017, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+#ifndef TRACY_LZ4_HC_H_19834876238432
+#define TRACY_LZ4_HC_H_19834876238432
+
+/* --- Dependency --- */
+/* note : lz4hc requires lz4.h/lz4.c for compilation */
+#include "tracy_lz4.hpp" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
+
+
+/* --- Useful constants --- */
+#define LZ4HC_CLEVEL_MIN 3
+#define LZ4HC_CLEVEL_DEFAULT 9
+#define LZ4HC_CLEVEL_OPT_MIN 10
+#define LZ4HC_CLEVEL_MAX 12
+
+namespace tracy
+{
+
+/*-************************************
+ * Block Compression
+ **************************************/
+/*! LZ4_compress_HC() :
+ * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
+ * `dst` must be already allocated.
+ * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
+ * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
+ * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work.
+ * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX.
+ * @return : the number of bytes written into 'dst'
+ * or 0 if compression fails.
+ */
+LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
+
+
+/* Note :
+ * Decompression functions are provided within "lz4.h" (BSD license)
+ */
+
+
+/*! LZ4_compress_HC_extStateHC() :
+ * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
+ * `state` size is provided by LZ4_sizeofStateHC().
+ * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
+ */
+LZ4LIB_API int LZ4_sizeofStateHC(void);
+LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel);
+
+
+/*! LZ4_compress_HC_destSize() : v1.9.0+
+ * Will compress as much data as possible from `src`
+ * to fit into `targetDstSize` budget.
+ * Result is provided in 2 parts :
+ * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
+ * or 0 if compression fails.
+ * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src`
+ */
+LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC,
+ const char* src, char* dst,
+ int* srcSizePtr, int targetDstSize,
+ int compressionLevel);
+
+
+/*-************************************
+ * Streaming Compression
+ * Bufferless synchronous API
+ **************************************/
+ typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */
+
+/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
+ * These functions create and release memory for LZ4 HC streaming state.
+ * Newly created states are automatically initialized.
+ * A same state can be used multiple times consecutively,
+ * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
+ */
+LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
+LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
+
+/*
+ These functions compress data in successive blocks of any size,
+ using previous blocks as dictionary, to improve compression ratio.
+ One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
+ There is an exception for ring buffers, which can be smaller than 64 KB.
+ Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue().
+
+ Before starting compression, state must be allocated and properly initialized.
+ LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT.
+
+ Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream)
+ or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental).
+ LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once,
+ which is automatically the case when state is created using LZ4_createStreamHC().
+
+ After reset, a first "fictional block" can be designated as initial dictionary,
+ using LZ4_loadDictHC() (Optional).
+
+ Invoke LZ4_compress_HC_continue() to compress each successive block.
+ The number of blocks is unlimited.
+ Previous input blocks, including initial dictionary when present,
+ must remain accessible and unmodified during compression.
+
+ It's allowed to update compression level anytime between blocks,
+ using LZ4_setCompressionLevel() (experimental).
+
+ 'dst' buffer should be sized to handle worst case scenarios
+ (see LZ4_compressBound(), it ensures compression success).
+ In case of failure, the API does not guarantee recovery,
+ so the state _must_ be reset.
+ To ensure compression success
+ whenever `dst` buffer size cannot be made >= LZ4_compressBound(),
+ consider using LZ4_compress_HC_continue_destSize().
+
+ Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
+ it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC().
+ Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB)
+
+ After completing a streaming compression,
+ it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state,
+ just by resetting it, using LZ4_resetStreamHC_fast().
+*/
+
+LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */
+LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize);
+
+LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr,
+ const char* src, char* dst,
+ int srcSize, int maxDstSize);
+
+/*! LZ4_compress_HC_continue_destSize() : v1.9.0+
+ * Similar to LZ4_compress_HC_continue(),
+ * but will read as much data as possible from `src`
+ * to fit into `targetDstSize` budget.
+ * Result is provided into 2 parts :
+ * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
+ * or 0 if compression fails.
+ * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`.
+ * Note that this function may not consume the entire input.
+ */
+LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* src, char* dst,
+ int* srcSizePtr, int targetDstSize);
+
+LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
+
+
+
+/*^**********************************************
+ * !!!!!! STATIC LINKING ONLY !!!!!!
+ ***********************************************/
+
+/*-******************************************************************
+ * PRIVATE DEFINITIONS :
+ * Do not use these definitions directly.
+ * They are merely exposed to allow static allocation of `LZ4_streamHC_t`.
+ * Declare an `LZ4_streamHC_t` directly, rather than any type below.
+ * Even then, only do so in the context of static linking, as definitions may change between versions.
+ ********************************************************************/
+
+#define LZ4HC_DICTIONARY_LOGSIZE 16
+#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
+#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
+
+#define LZ4HC_HASH_LOG 15
+#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
+#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
+
+
+typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
+struct LZ4HC_CCtx_internal
+{
+ LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
+ LZ4_u16 chainTable[LZ4HC_MAXD];
+ const LZ4_byte* end; /* next block here to continue on current prefix */
+ const LZ4_byte* base; /* All index relative to this position */
+ const LZ4_byte* dictBase; /* alternate base for extDict */
+ LZ4_u32 dictLimit; /* below that point, need extDict */
+ LZ4_u32 lowLimit; /* below that point, no more dict */
+ LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
+ short compressionLevel;
+ LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
+ otherwise, favor compression ratio */
+ LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
+ const LZ4HC_CCtx_internal* dictCtx;
+};
+
+
+/* Do not use these definitions directly !
+ * Declare or allocate an LZ4_streamHC_t instead.
+ */
+#define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
+#define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
+union LZ4_streamHC_u {
+ void* table[LZ4_STREAMHCSIZE_VOIDP];
+ LZ4HC_CCtx_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_streamHC_t */
+
+/* LZ4_streamHC_t :
+ * This structure allows static allocation of LZ4 HC streaming state.
+ * This can be used to allocate statically, on state, or as part of a larger structure.
+ *
+ * Such state **must** be initialized using LZ4_initStreamHC() before first use.
+ *
+ * Note that invoking LZ4_initStreamHC() is not required when
+ * the state was created using LZ4_createStreamHC() (which is recommended).
+ * Using the normal builder, a newly created state is automatically initialized.
+ *
+ * Static allocation shall only be used in combination with static linking.
+ */
+
+/* LZ4_initStreamHC() : v1.9.0+
+ * Required before first use of a statically allocated LZ4_streamHC_t.
+ * Before v1.9.0 : use LZ4_resetStreamHC() instead
+ */
+LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size);
+
+
+/*-************************************
+* Deprecated Functions
+**************************************/
+/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
+
+/* deprecated compression functions */
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/* Obsolete streaming functions; degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, use of
+ * LZ4_slideInputBufferHC() will truncate the history of the stream, rather
+ * than preserve a window-sized chunk of history.
+ */
+LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer);
+LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
+LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
+LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer);
+
+
+/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC().
+ * The intention is to emphasize the difference with LZ4_resetStreamHC_fast(),
+ * which is now the recommended function to start a new stream of blocks,
+ * but cannot be used to initialize a memory segment containing arbitrary garbage data.
+ *
+ * It is recommended to switch to LZ4_initStreamHC().
+ * LZ4_resetStreamHC() will generate deprecation warnings in a future version.
+ */
+LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
+
+}
+
+#endif /* LZ4_HC_H_19834876238432 */
+
+
+/*-**************************************************
+ * !!!!! STATIC LINKING ONLY !!!!!
+ * Following definitions are considered experimental.
+ * They should not be linked from DLL,
+ * as there is no guarantee of API stability yet.
+ * Prototypes will be promoted to "stable" status
+ * after successfull usage in real-life scenarios.
+ ***************************************************/
+#ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */
+#ifndef TRACY_LZ4_HC_SLO_098092834
+#define TRACY_LZ4_HC_SLO_098092834
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
+#include "tracy_lz4.hpp"
+
+namespace tracy
+{
+
+/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental)
+ * It's possible to change compression level
+ * between successive invocations of LZ4_compress_HC_continue*()
+ * for dynamic adaptation.
+ */
+LZ4LIB_STATIC_API void LZ4_setCompressionLevel(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
+
+/*! LZ4_favorDecompressionSpeed() : v1.8.2+ (experimental)
+ * Opt. Parser will favor decompression speed over compression ratio.
+ * Only applicable to levels >= LZ4HC_CLEVEL_OPT_MIN.
+ */
+LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int favor);
+
+/*! LZ4_resetStreamHC_fast() : v1.9.0+
+ * When an LZ4_streamHC_t is known to be in a internally coherent state,
+ * it can often be prepared for a new compression with almost no work, only
+ * sometimes falling back to the full, expensive reset that is always required
+ * when the stream is in an indeterminate state (i.e., the reset performed by
+ * LZ4_resetStreamHC()).
+ *
+ * LZ4_streamHCs are guaranteed to be in a valid state when:
+ * - returned from LZ4_createStreamHC()
+ * - reset by LZ4_resetStreamHC()
+ * - memset(stream, 0, sizeof(LZ4_streamHC_t))
+ * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast()
+ * - the stream was in a valid state and was then used in any compression call
+ * that returned success
+ * - the stream was in an indeterminate state and was used in a compression
+ * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that
+ * returned success
+ *
+ * Note:
+ * A stream that was last used in a compression call that returned an error
+ * may be passed to this function. However, it will be fully reset, which will
+ * clear any existing history and settings from the context.
+ */
+LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
+
+/*! LZ4_compress_HC_extStateHC_fastReset() :
+ * A variant of LZ4_compress_HC_extStateHC().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStreamHC_fast() for a definition of
+ * "correctly initialized"). From a high level, the difference is that this
+ * function initializes the provided state with a call to
+ * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a
+ * call to LZ4_resetStreamHC().
+ */
+LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset (
+ void* state,
+ const char* src, char* dst,
+ int srcSize, int dstCapacity,
+ int compressionLevel);
+
+/*! LZ4_attach_HC_dictionary() :
+ * This is an experimental API that allows for the efficient use of a
+ * static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
+ * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDictHC() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionary stream pointer may be NULL, in which
+ * case any existing dictionary stream is unset.
+ *
+ * A dictionary should only be attached to a stream without any history (i.e.,
+ * a stream that has just been reset).
+ *
+ * The dictionary will remain attached to the working stream only for the
+ * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
+ * dictionary context association from the working stream. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the lifetime of the stream session.
+ */
+LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
+ LZ4_streamHC_t *working_stream,
+ const LZ4_streamHC_t *dictionary_stream);
+
+}
+
+#endif /* LZ4_HC_SLO_098092834 */
+#endif /* LZ4_HC_STATIC_LINKING_ONLY */
diff --git a/3rdparty/tracy/tracy/common/unix-release.mk b/3rdparty/tracy/tracy/common/unix-release.mk
new file mode 100644
index 0000000..07ac7d6
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/unix-release.mk
@@ -0,0 +1,13 @@
+ARCH := $(shell uname -m)
+
+ifeq (0,$(shell $(CC) --version | grep clang && echo 1 || echo 0))
+CFLAGS += -s
+else
+LDFLAGS := -s
+endif
+
+ifneq (,$(filter $(ARCH),aarch64 arm64))
+CFLAGS += -mcpu=native
+else
+CFLAGS += -march=native
+endif
diff --git a/3rdparty/tracy/tracy/common/unix.mk b/3rdparty/tracy/tracy/common/unix.mk
new file mode 100644
index 0000000..0105093
--- /dev/null
+++ b/3rdparty/tracy/tracy/common/unix.mk
@@ -0,0 +1,82 @@
+# Common code needed by most Tracy Unix Makefiles.
+
+# Ensure these are simply-substituted variables, without changing their values.
+LIBS := $(LIBS)
+
+# Tracy does not use TBB directly, but the implementation of parallel algorithms
+# in some versions of libstdc++ depends on TBB. When it does, you must
+# explicitly link against -ltbb.
+#
+# Some distributions have pgk-config files for TBB, others don't.
+ifeq (0,$(shell pkg-config --libs tbb >/dev/null 2>&1; echo $$?))
+ LIBS += $(shell pkg-config --libs tbb)
+else ifeq (0,$(shell ld -ltbb -o /dev/null 2>/dev/null; echo $$?))
+ LIBS += -ltbb
+endif
+
+OBJDIRBASE := obj/$(BUILD)
+OBJDIR := $(OBJDIRBASE)/o/o/o
+
+OBJ := $(addprefix $(OBJDIR)/,$(SRC:%.cpp=%.o))
+OBJ2 := $(addprefix $(OBJDIR)/,$(SRC2:%.c=%.o))
+OBJ3 := $(addprefix $(OBJDIR)/,$(SRC3:%.m=%.o))
+OBJ4 := $(addprefix $(OBJDIR)/,$(SRC4:%.S=%.o))
+
+all: $(IMAGE)
+
+$(OBJDIR)/%.o: %.cpp
+ $(CXX) -c $(INCLUDES) $(CXXFLAGS) $(DEFINES) $< -o $@
+
+$(OBJDIR)/%.d : %.cpp
+ @echo Resolving dependencies of $<
+ @mkdir -p $(@D)
+ @$(CXX) -MM $(INCLUDES) $(CXXFLAGS) $(DEFINES) $< > $@.$$$$; \
+ sed 's,.*\.o[ :]*,$(OBJDIR)/$(<:.cpp=.o) $@ : ,g' < $@.$$$$ > $@; \
+ rm -f $@.$$$$
+
+$(OBJDIR)/%.o: %.c
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(DEFINES) $< -o $@
+
+$(OBJDIR)/%.d : %.c
+ @echo Resolving dependencies of $<
+ @mkdir -p $(@D)
+ @$(CC) -MM $(INCLUDES) $(CFLAGS) $(DEFINES) $< > $@.$$$$; \
+ sed 's,.*\.o[ :]*,$(OBJDIR)/$(<:.c=.o) $@ : ,g' < $@.$$$$ > $@; \
+ rm -f $@.$$$$
+
+$(OBJDIR)/%.o: %.m
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(DEFINES) $< -o $@
+
+$(OBJDIR)/%.d : %.m
+ @echo Resolving dependencies of $<
+ @mkdir -p $(@D)
+ @$(CC) -MM $(INCLUDES) $(CFLAGS) $(DEFINES) $< > $@.$$$$; \
+ sed 's,.*\.o[ :]*,$(OBJDIR)/$(<:.m=.o) $@ : ,g' < $@.$$$$ > $@; \
+ rm -f $@.$$$$
+
+$(OBJDIR)/%.o: %.S
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(DEFINES) $< -o $@
+
+$(OBJDIR)/%.d : %.S
+ @echo Resolving dependencies of $<
+ @mkdir -p $(@D)
+ @$(CC) -MM $(INCLUDES) $(CFLAGS) $(DEFINES) $< > $@.$$$$; \
+ sed 's,.*\.o[ :]*,$(OBJDIR)/$(<:.m=.o) $@ : ,g' < $@.$$$$ > $@; \
+ rm -f $@.$$$$
+
+ifeq (yes,$(SHARED_LIBRARY))
+$(IMAGE): $(OBJ) $(OBJ2) $(OBJ4)
+ $(CXX) $(CXXFLAGS) $(LDFLAGS) $(DEFINES) $(OBJ) $(OBJ2) $(OBJ4) $(LIBS) -shared -o $@
+else
+$(IMAGE): $(OBJ) $(OBJ2) $(OBJ3) $(OBJ4)
+ $(CXX) $(CXXFLAGS) $(LDFLAGS) $(DEFINES) $(OBJ) $(OBJ2) $(OBJ3) $(OBJ4) $(LIBS) -o $@
+endif
+
+ifneq "$(MAKECMDGOALS)" "clean"
+-include $(addprefix $(OBJDIR)/,$(SRC:.cpp=.d)) $(addprefix $(OBJDIR)/,$(SRC2:.c=.d)) $(addprefix $(OBJDIR)/,$(SRC3:.m=.d)) $(addprefix $(OBJDIR)/,$(SRC4:.S=.d))
+endif
+
+clean:
+ rm -rf $(OBJDIRBASE) $(IMAGE)*
+
+.PHONY: clean all
diff --git a/3rdparty/tracy/tracy/libbacktrace/LICENSE b/3rdparty/tracy/tracy/libbacktrace/LICENSE
new file mode 100644
index 0000000..097d277
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/LICENSE
@@ -0,0 +1,29 @@
+# Copyright (C) 2012-2016 Free Software Foundation, Inc.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+# (1) Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+
+# (2) Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+
+# (3) The name of the author may not be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
diff --git a/3rdparty/tracy/tracy/libbacktrace/alloc.cpp b/3rdparty/tracy/tracy/libbacktrace/alloc.cpp
new file mode 100644
index 0000000..a365a48
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/alloc.cpp
@@ -0,0 +1,174 @@
+/* alloc.c -- Memory allocation without mmap.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+#include "../common/TracyAlloc.hpp"
+
+namespace tracy
+{
+
+/* Allocation routines to use on systems that do not support anonymous
+ mmap. This implementation just uses malloc, which means that the
+ backtrace functions may not be safely invoked from a signal
+ handler. */
+
+/* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't
+ report an error. */
+
+void *
+backtrace_alloc (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ size_t size, backtrace_error_callback error_callback,
+ void *data)
+{
+ void *ret;
+
+ ret = tracy_malloc (size);
+ if (ret == NULL)
+ {
+ if (error_callback)
+ error_callback (data, "malloc", errno);
+ }
+ return ret;
+}
+
+/* Free memory. */
+
+void
+backtrace_free (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ void *p, size_t size ATTRIBUTE_UNUSED,
+ backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED)
+{
+ tracy_free (p);
+}
+
+/* Grow VEC by SIZE bytes. */
+
+void *
+backtrace_vector_grow (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ size_t size, backtrace_error_callback error_callback,
+ void *data, struct backtrace_vector *vec)
+{
+ void *ret;
+
+ if (size > vec->alc)
+ {
+ size_t alc;
+ void *base;
+
+ if (vec->size == 0)
+ alc = 32 * size;
+ else if (vec->size >= 4096)
+ alc = vec->size + 4096;
+ else
+ alc = 2 * vec->size;
+
+ if (alc < vec->size + size)
+ alc = vec->size + size;
+
+ base = tracy_realloc (vec->base, alc);
+ if (base == NULL)
+ {
+ error_callback (data, "realloc", errno);
+ return NULL;
+ }
+
+ vec->base = base;
+ vec->alc = alc - vec->size;
+ }
+
+ ret = (char *) vec->base + vec->size;
+ vec->size += size;
+ vec->alc -= size;
+ return ret;
+}
+
+/* Finish the current allocation on VEC. */
+
+void *
+backtrace_vector_finish (struct backtrace_state *state,
+ struct backtrace_vector *vec,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ void *ret;
+
+ /* With this allocator we call realloc in backtrace_vector_grow,
+ which means we can't easily reuse the memory here. So just
+ release it. */
+ if (!backtrace_vector_release (state, vec, error_callback, data))
+ return NULL;
+ ret = vec->base;
+ vec->base = NULL;
+ vec->size = 0;
+ vec->alc = 0;
+ return ret;
+}
+
+/* Release any extra space allocated for VEC. */
+
+int
+backtrace_vector_release (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ struct backtrace_vector *vec,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ vec->alc = 0;
+
+ if (vec->size == 0)
+ {
+ /* As of C17, realloc with size 0 is marked as an obsolescent feature, use
+ free instead. */
+ tracy_free (vec->base);
+ vec->base = NULL;
+ return 1;
+ }
+
+ vec->base = tracy_realloc (vec->base, vec->size);
+ if (vec->base == NULL)
+ {
+ error_callback (data, "realloc", errno);
+ return 0;
+ }
+
+ return 1;
+}
+
+}
diff --git a/3rdparty/tracy/tracy/libbacktrace/backtrace.hpp b/3rdparty/tracy/tracy/libbacktrace/backtrace.hpp
new file mode 100644
index 0000000..e4be297
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/backtrace.hpp
@@ -0,0 +1,186 @@
+/* backtrace.h -- Public header file for stack backtrace library.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#ifndef BACKTRACE_H
+#define BACKTRACE_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+namespace tracy
+{
+
+/* The backtrace state. This struct is intentionally not defined in
+ the public interface. */
+
+struct backtrace_state;
+
+/* The type of the error callback argument to backtrace functions.
+ This function, if not NULL, will be called for certain error cases.
+ The DATA argument is passed to the function that calls this one.
+ The MSG argument is an error message. The ERRNUM argument, if
+ greater than 0, holds an errno value. The MSG buffer may become
+ invalid after this function returns.
+
+ As a special case, the ERRNUM argument will be passed as -1 if no
+ debug info can be found for the executable, or if the debug info
+ exists but has an unsupported version, but the function requires
+ debug info (e.g., backtrace_full, backtrace_pcinfo). The MSG in
+ this case will be something along the lines of "no debug info".
+ Similarly, ERRNUM will be passed as -1 if there is no symbol table,
+ but the function requires a symbol table (e.g., backtrace_syminfo).
+ This may be used as a signal that some other approach should be
+ tried. */
+
+typedef void (*backtrace_error_callback) (void *data, const char *msg,
+ int errnum);
+
+/* Create state information for the backtrace routines. This must be
+ called before any of the other routines, and its return value must
+ be passed to all of the other routines. FILENAME is the path name
+ of the executable file; if it is NULL the library will try
+ system-specific path names. If not NULL, FILENAME must point to a
+ permanent buffer. If THREADED is non-zero the state may be
+ accessed by multiple threads simultaneously, and the library will
+ use appropriate atomic operations. If THREADED is zero the state
+ may only be accessed by one thread at a time. This returns a state
+ pointer on success, NULL on error. If an error occurs, this will
+ call the ERROR_CALLBACK routine.
+
+ Calling this function allocates resources that cannot be freed.
+ There is no backtrace_free_state function. The state is used to
+ cache information that is expensive to recompute. Programs are
+ expected to call this function at most once and to save the return
+ value for all later calls to backtrace functions. */
+
+extern struct backtrace_state *backtrace_create_state (
+ const char *filename, int threaded,
+ backtrace_error_callback error_callback, void *data);
+
+/* The type of the callback argument to the backtrace_full function.
+ DATA is the argument passed to backtrace_full. PC is the program
+ counter. FILENAME is the name of the file containing PC, or NULL
+ if not available. LINENO is the line number in FILENAME containing
+ PC, or 0 if not available. FUNCTION is the name of the function
+ containing PC, or NULL if not available. This should return 0 to
+ continuing tracing. The FILENAME and FUNCTION buffers may become
+ invalid after this function returns. */
+
+typedef int (*backtrace_full_callback) (void *data, uintptr_t pc, uintptr_t lowaddr,
+ const char *filename, int lineno,
+ const char *function);
+
+/* Get a full stack backtrace. SKIP is the number of frames to skip;
+ passing 0 will start the trace with the function calling
+ backtrace_full. DATA is passed to the callback routine. If any
+ call to CALLBACK returns a non-zero value, the stack backtrace
+ stops, and backtrace returns that value; this may be used to limit
+ the number of stack frames desired. If all calls to CALLBACK
+ return 0, backtrace returns 0. The backtrace_full function will
+ make at least one call to either CALLBACK or ERROR_CALLBACK. This
+ function requires debug info for the executable. */
+
+extern int backtrace_full (struct backtrace_state *state, int skip,
+ backtrace_full_callback callback,
+ backtrace_error_callback error_callback,
+ void *data);
+
+/* The type of the callback argument to the backtrace_simple function.
+ DATA is the argument passed to simple_backtrace. PC is the program
+ counter. This should return 0 to continue tracing. */
+
+typedef int (*backtrace_simple_callback) (void *data, uintptr_t pc);
+
+/* Get a simple backtrace. SKIP is the number of frames to skip, as
+ in backtrace. DATA is passed to the callback routine. If any call
+ to CALLBACK returns a non-zero value, the stack backtrace stops,
+ and backtrace_simple returns that value. Otherwise
+ backtrace_simple returns 0. The backtrace_simple function will
+ make at least one call to either CALLBACK or ERROR_CALLBACK. This
+ function does not require any debug info for the executable. */
+
+extern int backtrace_simple (struct backtrace_state *state, int skip,
+ backtrace_simple_callback callback,
+ backtrace_error_callback error_callback,
+ void *data);
+
+/* Print the current backtrace in a user readable format to a FILE.
+ SKIP is the number of frames to skip, as in backtrace_full. Any
+ error messages are printed to stderr. This function requires debug
+ info for the executable. */
+
+extern void backtrace_print (struct backtrace_state *state, int skip, FILE *);
+
+/* Given PC, a program counter in the current program, call the
+ callback function with filename, line number, and function name
+ information. This will normally call the callback function exactly
+ once. However, if the PC happens to describe an inlined call, and
+ the debugging information contains the necessary information, then
+ this may call the callback function multiple times. This will make
+ at least one call to either CALLBACK or ERROR_CALLBACK. This
+ returns the first non-zero value returned by CALLBACK, or 0. */
+
+extern int backtrace_pcinfo (struct backtrace_state *state, uintptr_t pc,
+ backtrace_full_callback callback,
+ backtrace_error_callback error_callback,
+ void *data);
+
+/* The type of the callback argument to backtrace_syminfo. DATA and
+ PC are the arguments passed to backtrace_syminfo. SYMNAME is the
+ name of the symbol for the corresponding code. SYMVAL is the
+ value and SYMSIZE is the size of the symbol. SYMNAME will be NULL
+ if no error occurred but the symbol could not be found. */
+
+typedef void (*backtrace_syminfo_callback) (void *data, uintptr_t pc,
+ const char *symname,
+ uintptr_t symval,
+ uintptr_t symsize);
+
+/* Given ADDR, an address or program counter in the current program,
+ call the callback information with the symbol name and value
+ describing the function or variable in which ADDR may be found.
+ This will call either CALLBACK or ERROR_CALLBACK exactly once.
+ This returns 1 on success, 0 on failure. This function requires
+ the symbol table but does not require the debug info. Note that if
+ the symbol table is present but ADDR could not be found in the
+ table, CALLBACK will be called with a NULL SYMNAME argument.
+ Returns 1 on success, 0 on error. */
+
+extern int backtrace_syminfo (struct backtrace_state *state, uintptr_t addr,
+ backtrace_syminfo_callback callback,
+ backtrace_error_callback error_callback,
+ void *data);
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/libbacktrace/config.h b/3rdparty/tracy/tracy/libbacktrace/config.h
new file mode 100644
index 0000000..aa3259d
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/config.h
@@ -0,0 +1,22 @@
+#include <limits.h>
+#if __WORDSIZE == 64
+# define BACKTRACE_ELF_SIZE 64
+#else
+# define BACKTRACE_ELF_SIZE 32
+#endif
+
+#define HAVE_DLFCN_H 1
+#define HAVE_FCNTL 1
+#define HAVE_INTTYPES_H 1
+#define HAVE_LSTAT 1
+#define HAVE_READLINK 1
+#define HAVE_DL_ITERATE_PHDR 1
+#define HAVE_ATOMIC_FUNCTIONS 1
+#define HAVE_DECL_STRNLEN 1
+
+#ifdef __APPLE__
+# define HAVE_MACH_O_DYLD_H 1
+#elif defined BSD
+# define HAVE_KERN_PROC 1
+# define HAVE_KERN_PROC_ARGS 1
+#endif
diff --git a/3rdparty/tracy/tracy/libbacktrace/dwarf.cpp b/3rdparty/tracy/tracy/libbacktrace/dwarf.cpp
new file mode 100644
index 0000000..2715988
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/dwarf.cpp
@@ -0,0 +1,4407 @@
+/* dwarf.c -- Get file/line information from DWARF for backtraces.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include "filenames.hpp"
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+namespace tracy
+{
+
+/* DWARF constants. */
+
+enum dwarf_tag {
+ DW_TAG_entry_point = 0x3,
+ DW_TAG_compile_unit = 0x11,
+ DW_TAG_inlined_subroutine = 0x1d,
+ DW_TAG_subprogram = 0x2e,
+ DW_TAG_skeleton_unit = 0x4a,
+};
+
+enum dwarf_form {
+ DW_FORM_addr = 0x01,
+ DW_FORM_block2 = 0x03,
+ DW_FORM_block4 = 0x04,
+ DW_FORM_data2 = 0x05,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_data8 = 0x07,
+ DW_FORM_string = 0x08,
+ DW_FORM_block = 0x09,
+ DW_FORM_block1 = 0x0a,
+ DW_FORM_data1 = 0x0b,
+ DW_FORM_flag = 0x0c,
+ DW_FORM_sdata = 0x0d,
+ DW_FORM_strp = 0x0e,
+ DW_FORM_udata = 0x0f,
+ DW_FORM_ref_addr = 0x10,
+ DW_FORM_ref1 = 0x11,
+ DW_FORM_ref2 = 0x12,
+ DW_FORM_ref4 = 0x13,
+ DW_FORM_ref8 = 0x14,
+ DW_FORM_ref_udata = 0x15,
+ DW_FORM_indirect = 0x16,
+ DW_FORM_sec_offset = 0x17,
+ DW_FORM_exprloc = 0x18,
+ DW_FORM_flag_present = 0x19,
+ DW_FORM_ref_sig8 = 0x20,
+ DW_FORM_strx = 0x1a,
+ DW_FORM_addrx = 0x1b,
+ DW_FORM_ref_sup4 = 0x1c,
+ DW_FORM_strp_sup = 0x1d,
+ DW_FORM_data16 = 0x1e,
+ DW_FORM_line_strp = 0x1f,
+ DW_FORM_implicit_const = 0x21,
+ DW_FORM_loclistx = 0x22,
+ DW_FORM_rnglistx = 0x23,
+ DW_FORM_ref_sup8 = 0x24,
+ DW_FORM_strx1 = 0x25,
+ DW_FORM_strx2 = 0x26,
+ DW_FORM_strx3 = 0x27,
+ DW_FORM_strx4 = 0x28,
+ DW_FORM_addrx1 = 0x29,
+ DW_FORM_addrx2 = 0x2a,
+ DW_FORM_addrx3 = 0x2b,
+ DW_FORM_addrx4 = 0x2c,
+ DW_FORM_GNU_addr_index = 0x1f01,
+ DW_FORM_GNU_str_index = 0x1f02,
+ DW_FORM_GNU_ref_alt = 0x1f20,
+ DW_FORM_GNU_strp_alt = 0x1f21
+};
+
+enum dwarf_attribute {
+ DW_AT_sibling = 0x01,
+ DW_AT_location = 0x02,
+ DW_AT_name = 0x03,
+ DW_AT_ordering = 0x09,
+ DW_AT_subscr_data = 0x0a,
+ DW_AT_byte_size = 0x0b,
+ DW_AT_bit_offset = 0x0c,
+ DW_AT_bit_size = 0x0d,
+ DW_AT_element_list = 0x0f,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12,
+ DW_AT_language = 0x13,
+ DW_AT_member = 0x14,
+ DW_AT_discr = 0x15,
+ DW_AT_discr_value = 0x16,
+ DW_AT_visibility = 0x17,
+ DW_AT_import = 0x18,
+ DW_AT_string_length = 0x19,
+ DW_AT_common_reference = 0x1a,
+ DW_AT_comp_dir = 0x1b,
+ DW_AT_const_value = 0x1c,
+ DW_AT_containing_type = 0x1d,
+ DW_AT_default_value = 0x1e,
+ DW_AT_inline = 0x20,
+ DW_AT_is_optional = 0x21,
+ DW_AT_lower_bound = 0x22,
+ DW_AT_producer = 0x25,
+ DW_AT_prototyped = 0x27,
+ DW_AT_return_addr = 0x2a,
+ DW_AT_start_scope = 0x2c,
+ DW_AT_bit_stride = 0x2e,
+ DW_AT_upper_bound = 0x2f,
+ DW_AT_abstract_origin = 0x31,
+ DW_AT_accessibility = 0x32,
+ DW_AT_address_class = 0x33,
+ DW_AT_artificial = 0x34,
+ DW_AT_base_types = 0x35,
+ DW_AT_calling_convention = 0x36,
+ DW_AT_count = 0x37,
+ DW_AT_data_member_location = 0x38,
+ DW_AT_decl_column = 0x39,
+ DW_AT_decl_file = 0x3a,
+ DW_AT_decl_line = 0x3b,
+ DW_AT_declaration = 0x3c,
+ DW_AT_discr_list = 0x3d,
+ DW_AT_encoding = 0x3e,
+ DW_AT_external = 0x3f,
+ DW_AT_frame_base = 0x40,
+ DW_AT_friend = 0x41,
+ DW_AT_identifier_case = 0x42,
+ DW_AT_macro_info = 0x43,
+ DW_AT_namelist_items = 0x44,
+ DW_AT_priority = 0x45,
+ DW_AT_segment = 0x46,
+ DW_AT_specification = 0x47,
+ DW_AT_static_link = 0x48,
+ DW_AT_type = 0x49,
+ DW_AT_use_location = 0x4a,
+ DW_AT_variable_parameter = 0x4b,
+ DW_AT_virtuality = 0x4c,
+ DW_AT_vtable_elem_location = 0x4d,
+ DW_AT_allocated = 0x4e,
+ DW_AT_associated = 0x4f,
+ DW_AT_data_location = 0x50,
+ DW_AT_byte_stride = 0x51,
+ DW_AT_entry_pc = 0x52,
+ DW_AT_use_UTF8 = 0x53,
+ DW_AT_extension = 0x54,
+ DW_AT_ranges = 0x55,
+ DW_AT_trampoline = 0x56,
+ DW_AT_call_column = 0x57,
+ DW_AT_call_file = 0x58,
+ DW_AT_call_line = 0x59,
+ DW_AT_description = 0x5a,
+ DW_AT_binary_scale = 0x5b,
+ DW_AT_decimal_scale = 0x5c,
+ DW_AT_small = 0x5d,
+ DW_AT_decimal_sign = 0x5e,
+ DW_AT_digit_count = 0x5f,
+ DW_AT_picture_string = 0x60,
+ DW_AT_mutable = 0x61,
+ DW_AT_threads_scaled = 0x62,
+ DW_AT_explicit = 0x63,
+ DW_AT_object_pointer = 0x64,
+ DW_AT_endianity = 0x65,
+ DW_AT_elemental = 0x66,
+ DW_AT_pure = 0x67,
+ DW_AT_recursive = 0x68,
+ DW_AT_signature = 0x69,
+ DW_AT_main_subprogram = 0x6a,
+ DW_AT_data_bit_offset = 0x6b,
+ DW_AT_const_expr = 0x6c,
+ DW_AT_enum_class = 0x6d,
+ DW_AT_linkage_name = 0x6e,
+ DW_AT_string_length_bit_size = 0x6f,
+ DW_AT_string_length_byte_size = 0x70,
+ DW_AT_rank = 0x71,
+ DW_AT_str_offsets_base = 0x72,
+ DW_AT_addr_base = 0x73,
+ DW_AT_rnglists_base = 0x74,
+ DW_AT_dwo_name = 0x76,
+ DW_AT_reference = 0x77,
+ DW_AT_rvalue_reference = 0x78,
+ DW_AT_macros = 0x79,
+ DW_AT_call_all_calls = 0x7a,
+ DW_AT_call_all_source_calls = 0x7b,
+ DW_AT_call_all_tail_calls = 0x7c,
+ DW_AT_call_return_pc = 0x7d,
+ DW_AT_call_value = 0x7e,
+ DW_AT_call_origin = 0x7f,
+ DW_AT_call_parameter = 0x80,
+ DW_AT_call_pc = 0x81,
+ DW_AT_call_tail_call = 0x82,
+ DW_AT_call_target = 0x83,
+ DW_AT_call_target_clobbered = 0x84,
+ DW_AT_call_data_location = 0x85,
+ DW_AT_call_data_value = 0x86,
+ DW_AT_noreturn = 0x87,
+ DW_AT_alignment = 0x88,
+ DW_AT_export_symbols = 0x89,
+ DW_AT_deleted = 0x8a,
+ DW_AT_defaulted = 0x8b,
+ DW_AT_loclists_base = 0x8c,
+ DW_AT_lo_user = 0x2000,
+ DW_AT_hi_user = 0x3fff,
+ DW_AT_MIPS_fde = 0x2001,
+ DW_AT_MIPS_loop_begin = 0x2002,
+ DW_AT_MIPS_tail_loop_begin = 0x2003,
+ DW_AT_MIPS_epilog_begin = 0x2004,
+ DW_AT_MIPS_loop_unroll_factor = 0x2005,
+ DW_AT_MIPS_software_pipeline_depth = 0x2006,
+ DW_AT_MIPS_linkage_name = 0x2007,
+ DW_AT_MIPS_stride = 0x2008,
+ DW_AT_MIPS_abstract_name = 0x2009,
+ DW_AT_MIPS_clone_origin = 0x200a,
+ DW_AT_MIPS_has_inlines = 0x200b,
+ DW_AT_HP_block_index = 0x2000,
+ DW_AT_HP_unmodifiable = 0x2001,
+ DW_AT_HP_prologue = 0x2005,
+ DW_AT_HP_epilogue = 0x2008,
+ DW_AT_HP_actuals_stmt_list = 0x2010,
+ DW_AT_HP_proc_per_section = 0x2011,
+ DW_AT_HP_raw_data_ptr = 0x2012,
+ DW_AT_HP_pass_by_reference = 0x2013,
+ DW_AT_HP_opt_level = 0x2014,
+ DW_AT_HP_prof_version_id = 0x2015,
+ DW_AT_HP_opt_flags = 0x2016,
+ DW_AT_HP_cold_region_low_pc = 0x2017,
+ DW_AT_HP_cold_region_high_pc = 0x2018,
+ DW_AT_HP_all_variables_modifiable = 0x2019,
+ DW_AT_HP_linkage_name = 0x201a,
+ DW_AT_HP_prof_flags = 0x201b,
+ DW_AT_HP_unit_name = 0x201f,
+ DW_AT_HP_unit_size = 0x2020,
+ DW_AT_HP_widened_byte_size = 0x2021,
+ DW_AT_HP_definition_points = 0x2022,
+ DW_AT_HP_default_location = 0x2023,
+ DW_AT_HP_is_result_param = 0x2029,
+ DW_AT_sf_names = 0x2101,
+ DW_AT_src_info = 0x2102,
+ DW_AT_mac_info = 0x2103,
+ DW_AT_src_coords = 0x2104,
+ DW_AT_body_begin = 0x2105,
+ DW_AT_body_end = 0x2106,
+ DW_AT_GNU_vector = 0x2107,
+ DW_AT_GNU_guarded_by = 0x2108,
+ DW_AT_GNU_pt_guarded_by = 0x2109,
+ DW_AT_GNU_guarded = 0x210a,
+ DW_AT_GNU_pt_guarded = 0x210b,
+ DW_AT_GNU_locks_excluded = 0x210c,
+ DW_AT_GNU_exclusive_locks_required = 0x210d,
+ DW_AT_GNU_shared_locks_required = 0x210e,
+ DW_AT_GNU_odr_signature = 0x210f,
+ DW_AT_GNU_template_name = 0x2110,
+ DW_AT_GNU_call_site_value = 0x2111,
+ DW_AT_GNU_call_site_data_value = 0x2112,
+ DW_AT_GNU_call_site_target = 0x2113,
+ DW_AT_GNU_call_site_target_clobbered = 0x2114,
+ DW_AT_GNU_tail_call = 0x2115,
+ DW_AT_GNU_all_tail_call_sites = 0x2116,
+ DW_AT_GNU_all_call_sites = 0x2117,
+ DW_AT_GNU_all_source_call_sites = 0x2118,
+ DW_AT_GNU_macros = 0x2119,
+ DW_AT_GNU_deleted = 0x211a,
+ DW_AT_GNU_dwo_name = 0x2130,
+ DW_AT_GNU_dwo_id = 0x2131,
+ DW_AT_GNU_ranges_base = 0x2132,
+ DW_AT_GNU_addr_base = 0x2133,
+ DW_AT_GNU_pubnames = 0x2134,
+ DW_AT_GNU_pubtypes = 0x2135,
+ DW_AT_GNU_discriminator = 0x2136,
+ DW_AT_GNU_locviews = 0x2137,
+ DW_AT_GNU_entry_view = 0x2138,
+ DW_AT_VMS_rtnbeg_pd_address = 0x2201,
+ DW_AT_use_GNAT_descriptive_type = 0x2301,
+ DW_AT_GNAT_descriptive_type = 0x2302,
+ DW_AT_GNU_numerator = 0x2303,
+ DW_AT_GNU_denominator = 0x2304,
+ DW_AT_GNU_bias = 0x2305,
+ DW_AT_upc_threads_scaled = 0x3210,
+ DW_AT_PGI_lbase = 0x3a00,
+ DW_AT_PGI_soffset = 0x3a01,
+ DW_AT_PGI_lstride = 0x3a02,
+ DW_AT_APPLE_optimized = 0x3fe1,
+ DW_AT_APPLE_flags = 0x3fe2,
+ DW_AT_APPLE_isa = 0x3fe3,
+ DW_AT_APPLE_block = 0x3fe4,
+ DW_AT_APPLE_major_runtime_vers = 0x3fe5,
+ DW_AT_APPLE_runtime_class = 0x3fe6,
+ DW_AT_APPLE_omit_frame_ptr = 0x3fe7,
+ DW_AT_APPLE_property_name = 0x3fe8,
+ DW_AT_APPLE_property_getter = 0x3fe9,
+ DW_AT_APPLE_property_setter = 0x3fea,
+ DW_AT_APPLE_property_attribute = 0x3feb,
+ DW_AT_APPLE_objc_complete_type = 0x3fec,
+ DW_AT_APPLE_property = 0x3fed
+};
+
+enum dwarf_line_number_op {
+ DW_LNS_extended_op = 0x0,
+ DW_LNS_copy = 0x1,
+ DW_LNS_advance_pc = 0x2,
+ DW_LNS_advance_line = 0x3,
+ DW_LNS_set_file = 0x4,
+ DW_LNS_set_column = 0x5,
+ DW_LNS_negate_stmt = 0x6,
+ DW_LNS_set_basic_block = 0x7,
+ DW_LNS_const_add_pc = 0x8,
+ DW_LNS_fixed_advance_pc = 0x9,
+ DW_LNS_set_prologue_end = 0xa,
+ DW_LNS_set_epilogue_begin = 0xb,
+ DW_LNS_set_isa = 0xc,
+};
+
+enum dwarf_extended_line_number_op {
+ DW_LNE_end_sequence = 0x1,
+ DW_LNE_set_address = 0x2,
+ DW_LNE_define_file = 0x3,
+ DW_LNE_set_discriminator = 0x4,
+};
+
+enum dwarf_line_number_content_type {
+ DW_LNCT_path = 0x1,
+ DW_LNCT_directory_index = 0x2,
+ DW_LNCT_timestamp = 0x3,
+ DW_LNCT_size = 0x4,
+ DW_LNCT_MD5 = 0x5,
+ DW_LNCT_lo_user = 0x2000,
+ DW_LNCT_hi_user = 0x3fff
+};
+
+enum dwarf_range_list_entry {
+ DW_RLE_end_of_list = 0x00,
+ DW_RLE_base_addressx = 0x01,
+ DW_RLE_startx_endx = 0x02,
+ DW_RLE_startx_length = 0x03,
+ DW_RLE_offset_pair = 0x04,
+ DW_RLE_base_address = 0x05,
+ DW_RLE_start_end = 0x06,
+ DW_RLE_start_length = 0x07
+};
+
+enum dwarf_unit_type {
+ DW_UT_compile = 0x01,
+ DW_UT_type = 0x02,
+ DW_UT_partial = 0x03,
+ DW_UT_skeleton = 0x04,
+ DW_UT_split_compile = 0x05,
+ DW_UT_split_type = 0x06,
+ DW_UT_lo_user = 0x80,
+ DW_UT_hi_user = 0xff
+};
+
+#if !defined(HAVE_DECL_STRNLEN) || !HAVE_DECL_STRNLEN
+
+/* If strnlen is not declared, provide our own version. */
+
+static size_t
+xstrnlen (const char *s, size_t maxlen)
+{
+ size_t i;
+
+ for (i = 0; i < maxlen; ++i)
+ if (s[i] == '\0')
+ break;
+ return i;
+}
+
+#define strnlen xstrnlen
+
+#endif
+
+/* A buffer to read DWARF info. */
+
+struct dwarf_buf
+{
+ /* Buffer name for error messages. */
+ const char *name;
+ /* Start of the buffer. */
+ const unsigned char *start;
+ /* Next byte to read. */
+ const unsigned char *buf;
+ /* The number of bytes remaining. */
+ size_t left;
+ /* Whether the data is big-endian. */
+ int is_bigendian;
+ /* Error callback routine. */
+ backtrace_error_callback error_callback;
+ /* Data for error_callback. */
+ void *data;
+ /* Non-zero if we've reported an underflow error. */
+ int reported_underflow;
+};
+
+/* A single attribute in a DWARF abbreviation. */
+
+struct attr
+{
+ /* The attribute name. */
+ enum dwarf_attribute name;
+ /* The attribute form. */
+ enum dwarf_form form;
+ /* The attribute value, for DW_FORM_implicit_const. */
+ int64_t val;
+};
+
+/* A single DWARF abbreviation. */
+
+struct abbrev
+{
+ /* The abbrev code--the number used to refer to the abbrev. */
+ uint64_t code;
+ /* The entry tag. */
+ enum dwarf_tag tag;
+ /* Non-zero if this abbrev has child entries. */
+ int has_children;
+ /* The number of attributes. */
+ size_t num_attrs;
+ /* The attributes. */
+ struct attr *attrs;
+};
+
+/* The DWARF abbreviations for a compilation unit. This structure
+ only exists while reading the compilation unit. Most DWARF readers
+ seem to a hash table to map abbrev ID's to abbrev entries.
+ However, we primarily care about GCC, and GCC simply issues ID's in
+ numerical order starting at 1. So we simply keep a sorted vector,
+ and try to just look up the code. */
+
+struct abbrevs
+{
+ /* The number of abbrevs in the vector. */
+ size_t num_abbrevs;
+ /* The abbrevs, sorted by the code field. */
+ struct abbrev *abbrevs;
+};
+
+/* The different kinds of attribute values. */
+
+enum attr_val_encoding
+{
+ /* No attribute value. */
+ ATTR_VAL_NONE,
+ /* An address. */
+ ATTR_VAL_ADDRESS,
+ /* An index into the .debug_addr section, whose value is relative to
+ * the DW_AT_addr_base attribute of the compilation unit. */
+ ATTR_VAL_ADDRESS_INDEX,
+ /* A unsigned integer. */
+ ATTR_VAL_UINT,
+ /* A sigd integer. */
+ ATTR_VAL_SINT,
+ /* A string. */
+ ATTR_VAL_STRING,
+ /* An index into the .debug_str_offsets section. */
+ ATTR_VAL_STRING_INDEX,
+ /* An offset to other data in the containing unit. */
+ ATTR_VAL_REF_UNIT,
+ /* An offset to other data within the .debug_info section. */
+ ATTR_VAL_REF_INFO,
+ /* An offset to other data within the alt .debug_info section. */
+ ATTR_VAL_REF_ALT_INFO,
+ /* An offset to data in some other section. */
+ ATTR_VAL_REF_SECTION,
+ /* A type signature. */
+ ATTR_VAL_REF_TYPE,
+ /* An index into the .debug_rnglists section. */
+ ATTR_VAL_RNGLISTS_INDEX,
+ /* A block of data (not represented). */
+ ATTR_VAL_BLOCK,
+ /* An expression (not represented). */
+ ATTR_VAL_EXPR,
+};
+
+/* An attribute value. */
+
+struct attr_val
+{
+ /* How the value is stored in the field u. */
+ enum attr_val_encoding encoding;
+ union
+ {
+ /* ATTR_VAL_ADDRESS*, ATTR_VAL_UINT, ATTR_VAL_REF*. */
+ uint64_t uint;
+ /* ATTR_VAL_SINT. */
+ int64_t sint;
+ /* ATTR_VAL_STRING. */
+ const char *string;
+ /* ATTR_VAL_BLOCK not stored. */
+ } u;
+};
+
+/* The line number program header. */
+
+struct line_header
+{
+ /* The version of the line number information. */
+ int version;
+ /* Address size. */
+ int addrsize;
+ /* The minimum instruction length. */
+ unsigned int min_insn_len;
+ /* The maximum number of ops per instruction. */
+ unsigned int max_ops_per_insn;
+ /* The line base for special opcodes. */
+ int line_base;
+ /* The line range for special opcodes. */
+ unsigned int line_range;
+ /* The opcode base--the first special opcode. */
+ unsigned int opcode_base;
+ /* Opcode lengths, indexed by opcode - 1. */
+ const unsigned char *opcode_lengths;
+ /* The number of directory entries. */
+ size_t dirs_count;
+ /* The directory entries. */
+ const char **dirs;
+ /* The number of filenames. */
+ size_t filenames_count;
+ /* The filenames. */
+ const char **filenames;
+};
+
+/* A format description from a line header. */
+
+struct line_header_format
+{
+ int lnct; /* LNCT code. */
+ enum dwarf_form form; /* Form of entry data. */
+};
+
+/* Map a single PC value to a file/line. We will keep a vector of
+ these sorted by PC value. Each file/line will be correct from the
+ PC up to the PC of the next entry if there is one. We allocate one
+ extra entry at the end so that we can use bsearch. */
+
+struct line
+{
+ /* PC. */
+ uintptr_t pc;
+ /* File name. Many entries in the array are expected to point to
+ the same file name. */
+ const char *filename;
+ /* Line number. */
+ int lineno;
+ /* Index of the object in the original array read from the DWARF
+ section, before it has been sorted. The index makes it possible
+ to use Quicksort and maintain stability. */
+ int idx;
+};
+
+/* A growable vector of line number information. This is used while
+ reading the line numbers. */
+
+struct line_vector
+{
+ /* Memory. This is an array of struct line. */
+ struct backtrace_vector vec;
+ /* Number of valid mappings. */
+ size_t count;
+};
+
+/* A function described in the debug info. */
+
+struct function
+{
+ /* The name of the function. */
+ const char *name;
+ /* If this is an inlined function, the filename of the call
+ site. */
+ const char *caller_filename;
+ /* If this is an inlined function, the line number of the call
+ site. */
+ int caller_lineno;
+ /* Map PC ranges to inlined functions. */
+ struct function_addrs *function_addrs;
+ size_t function_addrs_count;
+};
+
+/* An address range for a function. This maps a PC value to a
+ specific function. */
+
+struct function_addrs
+{
+ /* Range is LOW <= PC < HIGH. */
+ uint64_t low;
+ uint64_t high;
+ /* Function for this address range. */
+ struct function *function;
+};
+
+/* A growable vector of function address ranges. */
+
+struct function_vector
+{
+ /* Memory. This is an array of struct function_addrs. */
+ struct backtrace_vector vec;
+ /* Number of address ranges present. */
+ size_t count;
+};
+
+/* A DWARF compilation unit. This only holds the information we need
+ to map a PC to a file and line. */
+
+struct unit
+{
+ /* The first entry for this compilation unit. */
+ const unsigned char *unit_data;
+ /* The length of the data for this compilation unit. */
+ size_t unit_data_len;
+ /* The offset of UNIT_DATA from the start of the information for
+ this compilation unit. */
+ size_t unit_data_offset;
+ /* Offset of the start of the compilation unit from the start of the
+ .debug_info section. */
+ size_t low_offset;
+ /* Offset of the end of the compilation unit from the start of the
+ .debug_info section. */
+ size_t high_offset;
+ /* DWARF version. */
+ int version;
+ /* Whether unit is DWARF64. */
+ int is_dwarf64;
+ /* Address size. */
+ int addrsize;
+ /* Offset into line number information. */
+ off_t lineoff;
+ /* Offset of compilation unit in .debug_str_offsets. */
+ uint64_t str_offsets_base;
+ /* Offset of compilation unit in .debug_addr. */
+ uint64_t addr_base;
+ /* Offset of compilation unit in .debug_rnglists. */
+ uint64_t rnglists_base;
+ /* Primary source file. */
+ const char *filename;
+ /* Compilation command working directory. */
+ const char *comp_dir;
+ /* Absolute file name, only set if needed. */
+ const char *abs_filename;
+ /* The abbreviations for this unit. */
+ struct abbrevs abbrevs;
+
+ /* The fields above this point are read in during initialization and
+ may be accessed freely. The fields below this point are read in
+ as needed, and therefore require care, as different threads may
+ try to initialize them simultaneously. */
+
+ /* PC to line number mapping. This is NULL if the values have not
+ been read. This is (struct line *) -1 if there was an error
+ reading the values. */
+ struct line *lines;
+ /* Number of entries in lines. */
+ size_t lines_count;
+ /* PC ranges to function. */
+ struct function_addrs *function_addrs;
+ size_t function_addrs_count;
+};
+
+/* An address range for a compilation unit. This maps a PC value to a
+ specific compilation unit. Note that we invert the representation
+ in DWARF: instead of listing the units and attaching a list of
+ ranges, we list the ranges and have each one point to the unit.
+ This lets us do a binary search to find the unit. */
+
+struct unit_addrs
+{
+ /* Range is LOW <= PC < HIGH. */
+ uint64_t low;
+ uint64_t high;
+ /* Compilation unit for this address range. */
+ struct unit *u;
+};
+
+/* A growable vector of compilation unit address ranges. */
+
+struct unit_addrs_vector
+{
+ /* Memory. This is an array of struct unit_addrs. */
+ struct backtrace_vector vec;
+ /* Number of address ranges present. */
+ size_t count;
+};
+
+/* A growable vector of compilation unit pointer. */
+
+struct unit_vector
+{
+ struct backtrace_vector vec;
+ size_t count;
+};
+
+/* The information we need to map a PC to a file and line. */
+
+struct dwarf_data
+{
+ /* The data for the next file we know about. */
+ struct dwarf_data *next;
+ /* The data for .gnu_debugaltlink. */
+ struct dwarf_data *altlink;
+ /* The base address for this file. */
+ uintptr_t base_address;
+ /* A sorted list of address ranges. */
+ struct unit_addrs *addrs;
+ /* Number of address ranges in list. */
+ size_t addrs_count;
+ /* A sorted list of units. */
+ struct unit **units;
+ /* Number of units in the list. */
+ size_t units_count;
+ /* The unparsed DWARF debug data. */
+ struct dwarf_sections dwarf_sections;
+ /* Whether the data is big-endian or not. */
+ int is_bigendian;
+ /* A vector used for function addresses. We keep this here so that
+ we can grow the vector as we read more functions. */
+ struct function_vector fvec;
+};
+
+/* Report an error for a DWARF buffer. */
+
+static void
+dwarf_buf_error (struct dwarf_buf *buf, const char *msg, int errnum)
+{
+ char b[200];
+
+ snprintf (b, sizeof b, "%s in %s at %d",
+ msg, buf->name, (int) (buf->buf - buf->start));
+ buf->error_callback (buf->data, b, errnum);
+}
+
+/* Require at least COUNT bytes in BUF. Return 1 if all is well, 0 on
+ error. */
+
+static int
+require (struct dwarf_buf *buf, size_t count)
+{
+ if (buf->left >= count)
+ return 1;
+
+ if (!buf->reported_underflow)
+ {
+ dwarf_buf_error (buf, "DWARF underflow", 0);
+ buf->reported_underflow = 1;
+ }
+
+ return 0;
+}
+
+/* Advance COUNT bytes in BUF. Return 1 if all is well, 0 on
+ error. */
+
+static int
+advance (struct dwarf_buf *buf, size_t count)
+{
+ if (!require (buf, count))
+ return 0;
+ buf->buf += count;
+ buf->left -= count;
+ return 1;
+}
+
+/* Read one zero-terminated string from BUF and advance past the string. */
+
+static const char *
+read_string (struct dwarf_buf *buf)
+{
+ const char *p = (const char *)buf->buf;
+ size_t len = strnlen (p, buf->left);
+
+ /* - If len == left, we ran out of buffer before finding the zero terminator.
+ Generate an error by advancing len + 1.
+ - If len < left, advance by len + 1 to skip past the zero terminator. */
+ size_t count = len + 1;
+
+ if (!advance (buf, count))
+ return NULL;
+
+ return p;
+}
+
+/* Read one byte from BUF and advance 1 byte. */
+
+static unsigned char
+read_byte (struct dwarf_buf *buf)
+{
+ const unsigned char *p = buf->buf;
+
+ if (!advance (buf, 1))
+ return 0;
+ return p[0];
+}
+
+/* Read a signed char from BUF and advance 1 byte. */
+
+static signed char
+read_sbyte (struct dwarf_buf *buf)
+{
+ const unsigned char *p = buf->buf;
+
+ if (!advance (buf, 1))
+ return 0;
+ return (*p ^ 0x80) - 0x80;
+}
+
+/* Read a uint16 from BUF and advance 2 bytes. */
+
+static uint16_t
+read_uint16 (struct dwarf_buf *buf)
+{
+ const unsigned char *p = buf->buf;
+
+ if (!advance (buf, 2))
+ return 0;
+ if (buf->is_bigendian)
+ return ((uint16_t) p[0] << 8) | (uint16_t) p[1];
+ else
+ return ((uint16_t) p[1] << 8) | (uint16_t) p[0];
+}
+
+/* Read a 24 bit value from BUF and advance 3 bytes. */
+
+static uint32_t
+read_uint24 (struct dwarf_buf *buf)
+{
+ const unsigned char *p = buf->buf;
+
+ if (!advance (buf, 3))
+ return 0;
+ if (buf->is_bigendian)
+ return (((uint32_t) p[0] << 16) | ((uint32_t) p[1] << 8)
+ | (uint32_t) p[2]);
+ else
+ return (((uint32_t) p[2] << 16) | ((uint32_t) p[1] << 8)
+ | (uint32_t) p[0]);
+}
+
+/* Read a uint32 from BUF and advance 4 bytes. */
+
+static uint32_t
+read_uint32 (struct dwarf_buf *buf)
+{
+ const unsigned char *p = buf->buf;
+
+ if (!advance (buf, 4))
+ return 0;
+ if (buf->is_bigendian)
+ return (((uint32_t) p[0] << 24) | ((uint32_t) p[1] << 16)
+ | ((uint32_t) p[2] << 8) | (uint32_t) p[3]);
+ else
+ return (((uint32_t) p[3] << 24) | ((uint32_t) p[2] << 16)
+ | ((uint32_t) p[1] << 8) | (uint32_t) p[0]);
+}
+
+/* Read a uint64 from BUF and advance 8 bytes. */
+
+static uint64_t
+read_uint64 (struct dwarf_buf *buf)
+{
+ const unsigned char *p = buf->buf;
+
+ if (!advance (buf, 8))
+ return 0;
+ if (buf->is_bigendian)
+ return (((uint64_t) p[0] << 56) | ((uint64_t) p[1] << 48)
+ | ((uint64_t) p[2] << 40) | ((uint64_t) p[3] << 32)
+ | ((uint64_t) p[4] << 24) | ((uint64_t) p[5] << 16)
+ | ((uint64_t) p[6] << 8) | (uint64_t) p[7]);
+ else
+ return (((uint64_t) p[7] << 56) | ((uint64_t) p[6] << 48)
+ | ((uint64_t) p[5] << 40) | ((uint64_t) p[4] << 32)
+ | ((uint64_t) p[3] << 24) | ((uint64_t) p[2] << 16)
+ | ((uint64_t) p[1] << 8) | (uint64_t) p[0]);
+}
+
+/* Read an offset from BUF and advance the appropriate number of
+ bytes. */
+
+static uint64_t
+read_offset (struct dwarf_buf *buf, int is_dwarf64)
+{
+ if (is_dwarf64)
+ return read_uint64 (buf);
+ else
+ return read_uint32 (buf);
+}
+
+/* Read an address from BUF and advance the appropriate number of
+ bytes. */
+
+static uint64_t
+read_address (struct dwarf_buf *buf, int addrsize)
+{
+ switch (addrsize)
+ {
+ case 1:
+ return read_byte (buf);
+ case 2:
+ return read_uint16 (buf);
+ case 4:
+ return read_uint32 (buf);
+ case 8:
+ return read_uint64 (buf);
+ default:
+ dwarf_buf_error (buf, "unrecognized address size", 0);
+ return 0;
+ }
+}
+
+/* Return whether a value is the highest possible address, given the
+ address size. */
+
+static int
+is_highest_address (uint64_t address, int addrsize)
+{
+ switch (addrsize)
+ {
+ case 1:
+ return address == (unsigned char) -1;
+ case 2:
+ return address == (uint16_t) -1;
+ case 4:
+ return address == (uint32_t) -1;
+ case 8:
+ return address == (uint64_t) -1;
+ default:
+ return 0;
+ }
+}
+
+/* Read an unsigned LEB128 number. */
+
+static uint64_t
+read_uleb128 (struct dwarf_buf *buf)
+{
+ uint64_t ret;
+ unsigned int shift;
+ int overflow;
+ unsigned char b;
+
+ ret = 0;
+ shift = 0;
+ overflow = 0;
+ do
+ {
+ const unsigned char *p;
+
+ p = buf->buf;
+ if (!advance (buf, 1))
+ return 0;
+ b = *p;
+ if (shift < 64)
+ ret |= ((uint64_t) (b & 0x7f)) << shift;
+ else if (!overflow)
+ {
+ dwarf_buf_error (buf, "LEB128 overflows uint64_t", 0);
+ overflow = 1;
+ }
+ shift += 7;
+ }
+ while ((b & 0x80) != 0);
+
+ return ret;
+}
+
+/* Read a signed LEB128 number. */
+
+static int64_t
+read_sleb128 (struct dwarf_buf *buf)
+{
+ uint64_t val;
+ unsigned int shift;
+ int overflow;
+ unsigned char b;
+
+ val = 0;
+ shift = 0;
+ overflow = 0;
+ do
+ {
+ const unsigned char *p;
+
+ p = buf->buf;
+ if (!advance (buf, 1))
+ return 0;
+ b = *p;
+ if (shift < 64)
+ val |= ((uint64_t) (b & 0x7f)) << shift;
+ else if (!overflow)
+ {
+ dwarf_buf_error (buf, "signed LEB128 overflows uint64_t", 0);
+ overflow = 1;
+ }
+ shift += 7;
+ }
+ while ((b & 0x80) != 0);
+
+ if ((b & 0x40) != 0 && shift < 64)
+ val |= ((uint64_t) -1) << shift;
+
+ return (int64_t) val;
+}
+
+/* Return the length of an LEB128 number. */
+
+static size_t
+leb128_len (const unsigned char *p)
+{
+ size_t ret;
+
+ ret = 1;
+ while ((*p & 0x80) != 0)
+ {
+ ++p;
+ ++ret;
+ }
+ return ret;
+}
+
+/* Read initial_length from BUF and advance the appropriate number of bytes. */
+
+static uint64_t
+read_initial_length (struct dwarf_buf *buf, int *is_dwarf64)
+{
+ uint64_t len;
+
+ len = read_uint32 (buf);
+ if (len == 0xffffffff)
+ {
+ len = read_uint64 (buf);
+ *is_dwarf64 = 1;
+ }
+ else
+ *is_dwarf64 = 0;
+
+ return len;
+}
+
+/* Free an abbreviations structure. */
+
+static void
+free_abbrevs (struct backtrace_state *state, struct abbrevs *abbrevs,
+ backtrace_error_callback error_callback, void *data)
+{
+ size_t i;
+
+ for (i = 0; i < abbrevs->num_abbrevs; ++i)
+ backtrace_free (state, abbrevs->abbrevs[i].attrs,
+ abbrevs->abbrevs[i].num_attrs * sizeof (struct attr),
+ error_callback, data);
+ backtrace_free (state, abbrevs->abbrevs,
+ abbrevs->num_abbrevs * sizeof (struct abbrev),
+ error_callback, data);
+ abbrevs->num_abbrevs = 0;
+ abbrevs->abbrevs = NULL;
+}
+
+/* Read an attribute value. Returns 1 on success, 0 on failure. If
+ the value can be represented as a uint64_t, sets *VAL and sets
+ *IS_VALID to 1. We don't try to store the value of other attribute
+ forms, because we don't care about them. */
+
+static int
+read_attribute (enum dwarf_form form, uint64_t implicit_val,
+ struct dwarf_buf *buf, int is_dwarf64, int version,
+ int addrsize, const struct dwarf_sections *dwarf_sections,
+ struct dwarf_data *altlink, struct attr_val *val)
+{
+ /* Avoid warnings about val.u.FIELD may be used uninitialized if
+ this function is inlined. The warnings aren't valid but can
+ occur because the different fields are set and used
+ conditionally. */
+ memset (val, 0, sizeof *val);
+
+ switch (form)
+ {
+ case DW_FORM_addr:
+ val->encoding = ATTR_VAL_ADDRESS;
+ val->u.uint = read_address (buf, addrsize);
+ return 1;
+ case DW_FORM_block2:
+ val->encoding = ATTR_VAL_BLOCK;
+ return advance (buf, read_uint16 (buf));
+ case DW_FORM_block4:
+ val->encoding = ATTR_VAL_BLOCK;
+ return advance (buf, read_uint32 (buf));
+ case DW_FORM_data2:
+ val->encoding = ATTR_VAL_UINT;
+ val->u.uint = read_uint16 (buf);
+ return 1;
+ case DW_FORM_data4:
+ val->encoding = ATTR_VAL_UINT;
+ val->u.uint = read_uint32 (buf);
+ return 1;
+ case DW_FORM_data8:
+ val->encoding = ATTR_VAL_UINT;
+ val->u.uint = read_uint64 (buf);
+ return 1;
+ case DW_FORM_data16:
+ val->encoding = ATTR_VAL_BLOCK;
+ return advance (buf, 16);
+ case DW_FORM_string:
+ val->encoding = ATTR_VAL_STRING;
+ val->u.string = read_string (buf);
+ return val->u.string == NULL ? 0 : 1;
+ case DW_FORM_block:
+ val->encoding = ATTR_VAL_BLOCK;
+ return advance (buf, read_uleb128 (buf));
+ case DW_FORM_block1:
+ val->encoding = ATTR_VAL_BLOCK;
+ return advance (buf, read_byte (buf));
+ case DW_FORM_data1:
+ val->encoding = ATTR_VAL_UINT;
+ val->u.uint = read_byte (buf);
+ return 1;
+ case DW_FORM_flag:
+ val->encoding = ATTR_VAL_UINT;
+ val->u.uint = read_byte (buf);
+ return 1;
+ case DW_FORM_sdata:
+ val->encoding = ATTR_VAL_SINT;
+ val->u.sint = read_sleb128 (buf);
+ return 1;
+ case DW_FORM_strp:
+ {
+ uint64_t offset;
+
+ offset = read_offset (buf, is_dwarf64);
+ if (offset >= dwarf_sections->size[DEBUG_STR])
+ {
+ dwarf_buf_error (buf, "DW_FORM_strp out of range", 0);
+ return 0;
+ }
+ val->encoding = ATTR_VAL_STRING;
+ val->u.string =
+ (const char *) dwarf_sections->data[DEBUG_STR] + offset;
+ return 1;
+ }
+ case DW_FORM_line_strp:
+ {
+ uint64_t offset;
+
+ offset = read_offset (buf, is_dwarf64);
+ if (offset >= dwarf_sections->size[DEBUG_LINE_STR])
+ {
+ dwarf_buf_error (buf, "DW_FORM_line_strp out of range", 0);
+ return 0;
+ }
+ val->encoding = ATTR_VAL_STRING;
+ val->u.string =
+ (const char *) dwarf_sections->data[DEBUG_LINE_STR] + offset;
+ return 1;
+ }
+ case DW_FORM_udata:
+ val->encoding = ATTR_VAL_UINT;
+ val->u.uint = read_uleb128 (buf);
+ return 1;
+ case DW_FORM_ref_addr:
+ val->encoding = ATTR_VAL_REF_INFO;
+ if (version == 2)
+ val->u.uint = read_address (buf, addrsize);
+ else
+ val->u.uint = read_offset (buf, is_dwarf64);
+ return 1;
+ case DW_FORM_ref1:
+ val->encoding = ATTR_VAL_REF_UNIT;
+ val->u.uint = read_byte (buf);
+ return 1;
+ case DW_FORM_ref2:
+ val->encoding = ATTR_VAL_REF_UNIT;
+ val->u.uint = read_uint16 (buf);
+ return 1;
+ case DW_FORM_ref4:
+ val->encoding = ATTR_VAL_REF_UNIT;
+ val->u.uint = read_uint32 (buf);
+ return 1;
+ case DW_FORM_ref8:
+ val->encoding = ATTR_VAL_REF_UNIT;
+ val->u.uint = read_uint64 (buf);
+ return 1;
+ case DW_FORM_ref_udata:
+ val->encoding = ATTR_VAL_REF_UNIT;
+ val->u.uint = read_uleb128 (buf);
+ return 1;
+ case DW_FORM_indirect:
+ {
+ uint64_t form;
+
+ form = read_uleb128 (buf);
+ if (form == DW_FORM_implicit_const)
+ {
+ dwarf_buf_error (buf,
+ "DW_FORM_indirect to DW_FORM_implicit_const",
+ 0);
+ return 0;
+ }
+ return read_attribute ((enum dwarf_form) form, 0, buf, is_dwarf64,
+ version, addrsize, dwarf_sections, altlink,
+ val);
+ }
+ case DW_FORM_sec_offset:
+ val->encoding = ATTR_VAL_REF_SECTION;
+ val->u.uint = read_offset (buf, is_dwarf64);
+ return 1;
+ case DW_FORM_exprloc:
+ val->encoding = ATTR_VAL_EXPR;
+ return advance (buf, read_uleb128 (buf));
+ case DW_FORM_flag_present:
+ val->encoding = ATTR_VAL_UINT;
+ val->u.uint = 1;
+ return 1;
+ case DW_FORM_ref_sig8:
+ val->encoding = ATTR_VAL_REF_TYPE;
+ val->u.uint = read_uint64 (buf);
+ return 1;
+ case DW_FORM_strx: case DW_FORM_strx1: case DW_FORM_strx2:
+ case DW_FORM_strx3: case DW_FORM_strx4:
+ {
+ uint64_t offset;
+
+ switch (form)
+ {
+ case DW_FORM_strx:
+ offset = read_uleb128 (buf);
+ break;
+ case DW_FORM_strx1:
+ offset = read_byte (buf);
+ break;
+ case DW_FORM_strx2:
+ offset = read_uint16 (buf);
+ break;
+ case DW_FORM_strx3:
+ offset = read_uint24 (buf);
+ break;
+ case DW_FORM_strx4:
+ offset = read_uint32 (buf);
+ break;
+ default:
+ /* This case can't happen. */
+ return 0;
+ }
+ val->encoding = ATTR_VAL_STRING_INDEX;
+ val->u.uint = offset;
+ return 1;
+ }
+ case DW_FORM_addrx: case DW_FORM_addrx1: case DW_FORM_addrx2:
+ case DW_FORM_addrx3: case DW_FORM_addrx4:
+ {
+ uint64_t offset;
+
+ switch (form)
+ {
+ case DW_FORM_addrx:
+ offset = read_uleb128 (buf);
+ break;
+ case DW_FORM_addrx1:
+ offset = read_byte (buf);
+ break;
+ case DW_FORM_addrx2:
+ offset = read_uint16 (buf);
+ break;
+ case DW_FORM_addrx3:
+ offset = read_uint24 (buf);
+ break;
+ case DW_FORM_addrx4:
+ offset = read_uint32 (buf);
+ break;
+ default:
+ /* This case can't happen. */
+ return 0;
+ }
+ val->encoding = ATTR_VAL_ADDRESS_INDEX;
+ val->u.uint = offset;
+ return 1;
+ }
+ case DW_FORM_ref_sup4:
+ val->encoding = ATTR_VAL_REF_SECTION;
+ val->u.uint = read_uint32 (buf);
+ return 1;
+ case DW_FORM_ref_sup8:
+ val->encoding = ATTR_VAL_REF_SECTION;
+ val->u.uint = read_uint64 (buf);
+ return 1;
+ case DW_FORM_implicit_const:
+ val->encoding = ATTR_VAL_UINT;
+ val->u.uint = implicit_val;
+ return 1;
+ case DW_FORM_loclistx:
+ /* We don't distinguish this from DW_FORM_sec_offset. It
+ * shouldn't matter since we don't care about loclists. */
+ val->encoding = ATTR_VAL_REF_SECTION;
+ val->u.uint = read_uleb128 (buf);
+ return 1;
+ case DW_FORM_rnglistx:
+ val->encoding = ATTR_VAL_RNGLISTS_INDEX;
+ val->u.uint = read_uleb128 (buf);
+ return 1;
+ case DW_FORM_GNU_addr_index:
+ val->encoding = ATTR_VAL_REF_SECTION;
+ val->u.uint = read_uleb128 (buf);
+ return 1;
+ case DW_FORM_GNU_str_index:
+ val->encoding = ATTR_VAL_REF_SECTION;
+ val->u.uint = read_uleb128 (buf);
+ return 1;
+ case DW_FORM_GNU_ref_alt:
+ val->u.uint = read_offset (buf, is_dwarf64);
+ if (altlink == NULL)
+ {
+ val->encoding = ATTR_VAL_NONE;
+ return 1;
+ }
+ val->encoding = ATTR_VAL_REF_ALT_INFO;
+ return 1;
+ case DW_FORM_strp_sup: case DW_FORM_GNU_strp_alt:
+ {
+ uint64_t offset;
+
+ offset = read_offset (buf, is_dwarf64);
+ if (altlink == NULL)
+ {
+ val->encoding = ATTR_VAL_NONE;
+ return 1;
+ }
+ if (offset >= altlink->dwarf_sections.size[DEBUG_STR])
+ {
+ dwarf_buf_error (buf, "DW_FORM_strp_sup out of range", 0);
+ return 0;
+ }
+ val->encoding = ATTR_VAL_STRING;
+ val->u.string =
+ (const char *) altlink->dwarf_sections.data[DEBUG_STR] + offset;
+ return 1;
+ }
+ default:
+ dwarf_buf_error (buf, "unrecognized DWARF form", -1);
+ return 0;
+ }
+}
+
+/* If we can determine the value of a string attribute, set *STRING to
+ point to the string. Return 1 on success, 0 on error. If we don't
+ know the value, we consider that a success, and we don't change
+ *STRING. An error is only reported for some sort of out of range
+ offset. */
+
+static int
+resolve_string (const struct dwarf_sections *dwarf_sections, int is_dwarf64,
+ int is_bigendian, uint64_t str_offsets_base,
+ const struct attr_val *val,
+ backtrace_error_callback error_callback, void *data,
+ const char **string)
+{
+ switch (val->encoding)
+ {
+ case ATTR_VAL_STRING:
+ *string = val->u.string;
+ return 1;
+
+ case ATTR_VAL_STRING_INDEX:
+ {
+ uint64_t offset;
+ struct dwarf_buf offset_buf;
+
+ offset = val->u.uint * (is_dwarf64 ? 8 : 4) + str_offsets_base;
+ if (offset + (is_dwarf64 ? 8 : 4)
+ > dwarf_sections->size[DEBUG_STR_OFFSETS])
+ {
+ error_callback (data, "DW_FORM_strx value out of range", 0);
+ return 0;
+ }
+
+ offset_buf.name = ".debug_str_offsets";
+ offset_buf.start = dwarf_sections->data[DEBUG_STR_OFFSETS];
+ offset_buf.buf = dwarf_sections->data[DEBUG_STR_OFFSETS] + offset;
+ offset_buf.left = dwarf_sections->size[DEBUG_STR_OFFSETS] - offset;
+ offset_buf.is_bigendian = is_bigendian;
+ offset_buf.error_callback = error_callback;
+ offset_buf.data = data;
+ offset_buf.reported_underflow = 0;
+
+ offset = read_offset (&offset_buf, is_dwarf64);
+ if (offset >= dwarf_sections->size[DEBUG_STR])
+ {
+ dwarf_buf_error (&offset_buf,
+ "DW_FORM_strx offset out of range",
+ 0);
+ return 0;
+ }
+ *string = (const char *) dwarf_sections->data[DEBUG_STR] + offset;
+ return 1;
+ }
+
+ default:
+ return 1;
+ }
+}
+
+/* Set *ADDRESS to the real address for a ATTR_VAL_ADDRESS_INDEX.
+ Return 1 on success, 0 on error. */
+
+static int
+resolve_addr_index (const struct dwarf_sections *dwarf_sections,
+ uint64_t addr_base, int addrsize, int is_bigendian,
+ uint64_t addr_index,
+ backtrace_error_callback error_callback, void *data,
+ uint64_t *address)
+{
+ uint64_t offset;
+ struct dwarf_buf addr_buf;
+
+ offset = addr_index * addrsize + addr_base;
+ if (offset + addrsize > dwarf_sections->size[DEBUG_ADDR])
+ {
+ error_callback (data, "DW_FORM_addrx value out of range", 0);
+ return 0;
+ }
+
+ addr_buf.name = ".debug_addr";
+ addr_buf.start = dwarf_sections->data[DEBUG_ADDR];
+ addr_buf.buf = dwarf_sections->data[DEBUG_ADDR] + offset;
+ addr_buf.left = dwarf_sections->size[DEBUG_ADDR] - offset;
+ addr_buf.is_bigendian = is_bigendian;
+ addr_buf.error_callback = error_callback;
+ addr_buf.data = data;
+ addr_buf.reported_underflow = 0;
+
+ *address = read_address (&addr_buf, addrsize);
+ return 1;
+}
+
+/* Compare a unit offset against a unit for bsearch. */
+
+static int
+units_search (const void *vkey, const void *ventry)
+{
+ const size_t *key = (const size_t *) vkey;
+ const struct unit *entry = *((const struct unit *const *) ventry);
+ size_t offset;
+
+ offset = *key;
+ if (offset < entry->low_offset)
+ return -1;
+ else if (offset >= entry->high_offset)
+ return 1;
+ else
+ return 0;
+}
+
+/* Find a unit in PU containing OFFSET. */
+
+static struct unit *
+find_unit (struct unit **pu, size_t units_count, size_t offset)
+{
+ struct unit **u;
+ u = (struct unit**)bsearch (&offset, pu, units_count, sizeof (struct unit *), units_search);
+ return u == NULL ? NULL : *u;
+}
+
+/* Compare function_addrs for qsort. When ranges are nested, make the
+ smallest one sort last. */
+
+static int
+function_addrs_compare (const void *v1, const void *v2)
+{
+ const struct function_addrs *a1 = (const struct function_addrs *) v1;
+ const struct function_addrs *a2 = (const struct function_addrs *) v2;
+
+ if (a1->low < a2->low)
+ return -1;
+ if (a1->low > a2->low)
+ return 1;
+ if (a1->high < a2->high)
+ return 1;
+ if (a1->high > a2->high)
+ return -1;
+ return strcmp (a1->function->name, a2->function->name);
+}
+
+/* Compare a PC against a function_addrs for bsearch. We always
+ allocate an entra entry at the end of the vector, so that this
+ routine can safely look at the next entry. Note that if there are
+ multiple ranges containing PC, which one will be returned is
+ unpredictable. We compensate for that in dwarf_fileline. */
+
+static int
+function_addrs_search (const void *vkey, const void *ventry)
+{
+ const uintptr_t *key = (const uintptr_t *) vkey;
+ const struct function_addrs *entry = (const struct function_addrs *) ventry;
+ uintptr_t pc;
+
+ pc = *key;
+ if (pc < entry->low)
+ return -1;
+ else if (pc > (entry + 1)->low)
+ return 1;
+ else
+ return 0;
+}
+
+/* Add a new compilation unit address range to a vector. This is
+ called via add_ranges. Returns 1 on success, 0 on failure. */
+
+static int
+add_unit_addr (struct backtrace_state *state, void *rdata,
+ uint64_t lowpc, uint64_t highpc,
+ backtrace_error_callback error_callback, void *data,
+ void *pvec)
+{
+ struct unit *u = (struct unit *) rdata;
+ struct unit_addrs_vector *vec = (struct unit_addrs_vector *) pvec;
+ struct unit_addrs *p;
+
+ /* Try to merge with the last entry. */
+ if (vec->count > 0)
+ {
+ p = (struct unit_addrs *) vec->vec.base + (vec->count - 1);
+ if ((lowpc == p->high || lowpc == p->high + 1)
+ && u == p->u)
+ {
+ if (highpc > p->high)
+ p->high = highpc;
+ return 1;
+ }
+ }
+
+ p = ((struct unit_addrs *)
+ backtrace_vector_grow (state, sizeof (struct unit_addrs),
+ error_callback, data, &vec->vec));
+ if (p == NULL)
+ return 0;
+
+ p->low = lowpc;
+ p->high = highpc;
+ p->u = u;
+
+ ++vec->count;
+
+ return 1;
+}
+
+/* Compare unit_addrs for qsort. When ranges are nested, make the
+ smallest one sort last. */
+
+static int
+unit_addrs_compare (const void *v1, const void *v2)
+{
+ const struct unit_addrs *a1 = (const struct unit_addrs *) v1;
+ const struct unit_addrs *a2 = (const struct unit_addrs *) v2;
+
+ if (a1->low < a2->low)
+ return -1;
+ if (a1->low > a2->low)
+ return 1;
+ if (a1->high < a2->high)
+ return 1;
+ if (a1->high > a2->high)
+ return -1;
+ if (a1->u->lineoff < a2->u->lineoff)
+ return -1;
+ if (a1->u->lineoff > a2->u->lineoff)
+ return 1;
+ return 0;
+}
+
+/* Compare a PC against a unit_addrs for bsearch. We always allocate
+ an entry entry at the end of the vector, so that this routine can
+ safely look at the next entry. Note that if there are multiple
+ ranges containing PC, which one will be returned is unpredictable.
+ We compensate for that in dwarf_fileline. */
+
+static int
+unit_addrs_search (const void *vkey, const void *ventry)
+{
+ const uintptr_t *key = (const uintptr_t *) vkey;
+ const struct unit_addrs *entry = (const struct unit_addrs *) ventry;
+ uintptr_t pc;
+
+ pc = *key;
+ if (pc < entry->low)
+ return -1;
+ else if (pc > (entry + 1)->low)
+ return 1;
+ else
+ return 0;
+}
+
+/* Sort the line vector by PC. We want a stable sort here to maintain
+ the order of lines for the same PC values. Since the sequence is
+ being sorted in place, their addresses cannot be relied on to
+ maintain stability. That is the purpose of the index member. */
+
+static int
+line_compare (const void *v1, const void *v2)
+{
+ const struct line *ln1 = (const struct line *) v1;
+ const struct line *ln2 = (const struct line *) v2;
+
+ if (ln1->pc < ln2->pc)
+ return -1;
+ else if (ln1->pc > ln2->pc)
+ return 1;
+ else if (ln1->idx < ln2->idx)
+ return -1;
+ else if (ln1->idx > ln2->idx)
+ return 1;
+ else
+ return 0;
+}
+
+/* Find a PC in a line vector. We always allocate an extra entry at
+ the end of the lines vector, so that this routine can safely look
+ at the next entry. Note that when there are multiple mappings for
+ the same PC value, this will return the last one. */
+
+static int
+line_search (const void *vkey, const void *ventry)
+{
+ const uintptr_t *key = (const uintptr_t *) vkey;
+ const struct line *entry = (const struct line *) ventry;
+ uintptr_t pc;
+
+ pc = *key;
+ if (pc < entry->pc)
+ return -1;
+ else if (pc >= (entry + 1)->pc)
+ return 1;
+ else
+ return 0;
+}
+
+/* Sort the abbrevs by the abbrev code. This function is passed to
+ both qsort and bsearch. */
+
+static int
+abbrev_compare (const void *v1, const void *v2)
+{
+ const struct abbrev *a1 = (const struct abbrev *) v1;
+ const struct abbrev *a2 = (const struct abbrev *) v2;
+
+ if (a1->code < a2->code)
+ return -1;
+ else if (a1->code > a2->code)
+ return 1;
+ else
+ {
+ /* This really shouldn't happen. It means there are two
+ different abbrevs with the same code, and that means we don't
+ know which one lookup_abbrev should return. */
+ return 0;
+ }
+}
+
+/* Read the abbreviation table for a compilation unit. Returns 1 on
+ success, 0 on failure. */
+
+static int
+read_abbrevs (struct backtrace_state *state, uint64_t abbrev_offset,
+ const unsigned char *dwarf_abbrev, size_t dwarf_abbrev_size,
+ int is_bigendian, backtrace_error_callback error_callback,
+ void *data, struct abbrevs *abbrevs)
+{
+ struct dwarf_buf abbrev_buf;
+ struct dwarf_buf count_buf;
+ size_t num_abbrevs;
+
+ abbrevs->num_abbrevs = 0;
+ abbrevs->abbrevs = NULL;
+
+ if (abbrev_offset >= dwarf_abbrev_size)
+ {
+ error_callback (data, "abbrev offset out of range", 0);
+ return 0;
+ }
+
+ abbrev_buf.name = ".debug_abbrev";
+ abbrev_buf.start = dwarf_abbrev;
+ abbrev_buf.buf = dwarf_abbrev + abbrev_offset;
+ abbrev_buf.left = dwarf_abbrev_size - abbrev_offset;
+ abbrev_buf.is_bigendian = is_bigendian;
+ abbrev_buf.error_callback = error_callback;
+ abbrev_buf.data = data;
+ abbrev_buf.reported_underflow = 0;
+
+ /* Count the number of abbrevs in this list. */
+
+ count_buf = abbrev_buf;
+ num_abbrevs = 0;
+ while (read_uleb128 (&count_buf) != 0)
+ {
+ if (count_buf.reported_underflow)
+ return 0;
+ ++num_abbrevs;
+ // Skip tag.
+ read_uleb128 (&count_buf);
+ // Skip has_children.
+ read_byte (&count_buf);
+ // Skip attributes.
+ while (read_uleb128 (&count_buf) != 0)
+ {
+ uint64_t form;
+
+ form = read_uleb128 (&count_buf);
+ if ((enum dwarf_form) form == DW_FORM_implicit_const)
+ read_sleb128 (&count_buf);
+ }
+ // Skip form of last attribute.
+ read_uleb128 (&count_buf);
+ }
+
+ if (count_buf.reported_underflow)
+ return 0;
+
+ if (num_abbrevs == 0)
+ return 1;
+
+ abbrevs->abbrevs = ((struct abbrev *)
+ backtrace_alloc (state,
+ num_abbrevs * sizeof (struct abbrev),
+ error_callback, data));
+ if (abbrevs->abbrevs == NULL)
+ return 0;
+ abbrevs->num_abbrevs = num_abbrevs;
+ memset (abbrevs->abbrevs, 0, num_abbrevs * sizeof (struct abbrev));
+
+ num_abbrevs = 0;
+ while (1)
+ {
+ uint64_t code;
+ struct abbrev a;
+ size_t num_attrs;
+ struct attr *attrs;
+
+ if (abbrev_buf.reported_underflow)
+ goto fail;
+
+ code = read_uleb128 (&abbrev_buf);
+ if (code == 0)
+ break;
+
+ a.code = code;
+ a.tag = (enum dwarf_tag) read_uleb128 (&abbrev_buf);
+ a.has_children = read_byte (&abbrev_buf);
+
+ count_buf = abbrev_buf;
+ num_attrs = 0;
+ while (read_uleb128 (&count_buf) != 0)
+ {
+ uint64_t form;
+
+ ++num_attrs;
+ form = read_uleb128 (&count_buf);
+ if ((enum dwarf_form) form == DW_FORM_implicit_const)
+ read_sleb128 (&count_buf);
+ }
+
+ if (num_attrs == 0)
+ {
+ attrs = NULL;
+ read_uleb128 (&abbrev_buf);
+ read_uleb128 (&abbrev_buf);
+ }
+ else
+ {
+ attrs = ((struct attr *)
+ backtrace_alloc (state, num_attrs * sizeof *attrs,
+ error_callback, data));
+ if (attrs == NULL)
+ goto fail;
+ num_attrs = 0;
+ while (1)
+ {
+ uint64_t name;
+ uint64_t form;
+
+ name = read_uleb128 (&abbrev_buf);
+ form = read_uleb128 (&abbrev_buf);
+ if (name == 0)
+ break;
+ attrs[num_attrs].name = (enum dwarf_attribute) name;
+ attrs[num_attrs].form = (enum dwarf_form) form;
+ if ((enum dwarf_form) form == DW_FORM_implicit_const)
+ attrs[num_attrs].val = read_sleb128 (&abbrev_buf);
+ else
+ attrs[num_attrs].val = 0;
+ ++num_attrs;
+ }
+ }
+
+ a.num_attrs = num_attrs;
+ a.attrs = attrs;
+
+ abbrevs->abbrevs[num_abbrevs] = a;
+ ++num_abbrevs;
+ }
+
+ backtrace_qsort (abbrevs->abbrevs, abbrevs->num_abbrevs,
+ sizeof (struct abbrev), abbrev_compare);
+
+ return 1;
+
+ fail:
+ free_abbrevs (state, abbrevs, error_callback, data);
+ return 0;
+}
+
+/* Return the abbrev information for an abbrev code. */
+
+static const struct abbrev *
+lookup_abbrev (struct abbrevs *abbrevs, uint64_t code,
+ backtrace_error_callback error_callback, void *data)
+{
+ struct abbrev key;
+ void *p;
+
+ /* With GCC, where abbrevs are simply numbered in order, we should
+ be able to just look up the entry. */
+ if (code - 1 < abbrevs->num_abbrevs
+ && abbrevs->abbrevs[code - 1].code == code)
+ return &abbrevs->abbrevs[code - 1];
+
+ /* Otherwise we have to search. */
+ memset (&key, 0, sizeof key);
+ key.code = code;
+ p = bsearch (&key, abbrevs->abbrevs, abbrevs->num_abbrevs,
+ sizeof (struct abbrev), abbrev_compare);
+ if (p == NULL)
+ {
+ error_callback (data, "invalid abbreviation code", 0);
+ return NULL;
+ }
+ return (const struct abbrev *) p;
+}
+
+/* This struct is used to gather address range information while
+ reading attributes. We use this while building a mapping from
+ address ranges to compilation units and then again while mapping
+ from address ranges to function entries. Normally either
+ lowpc/highpc is set or ranges is set. */
+
+struct pcrange {
+ uint64_t lowpc; /* The low PC value. */
+ int have_lowpc; /* Whether a low PC value was found. */
+ int lowpc_is_addr_index; /* Whether lowpc is in .debug_addr. */
+ uint64_t highpc; /* The high PC value. */
+ int have_highpc; /* Whether a high PC value was found. */
+ int highpc_is_relative; /* Whether highpc is relative to lowpc. */
+ int highpc_is_addr_index; /* Whether highpc is in .debug_addr. */
+ uint64_t ranges; /* Offset in ranges section. */
+ int have_ranges; /* Whether ranges is valid. */
+ int ranges_is_index; /* Whether ranges is DW_FORM_rnglistx. */
+};
+
+/* Update PCRANGE from an attribute value. */
+
+static void
+update_pcrange (const struct attr* attr, const struct attr_val* val,
+ struct pcrange *pcrange)
+{
+ switch (attr->name)
+ {
+ case DW_AT_low_pc:
+ if (val->encoding == ATTR_VAL_ADDRESS)
+ {
+ pcrange->lowpc = val->u.uint;
+ pcrange->have_lowpc = 1;
+ }
+ else if (val->encoding == ATTR_VAL_ADDRESS_INDEX)
+ {
+ pcrange->lowpc = val->u.uint;
+ pcrange->have_lowpc = 1;
+ pcrange->lowpc_is_addr_index = 1;
+ }
+ break;
+
+ case DW_AT_high_pc:
+ if (val->encoding == ATTR_VAL_ADDRESS)
+ {
+ pcrange->highpc = val->u.uint;
+ pcrange->have_highpc = 1;
+ }
+ else if (val->encoding == ATTR_VAL_UINT)
+ {
+ pcrange->highpc = val->u.uint;
+ pcrange->have_highpc = 1;
+ pcrange->highpc_is_relative = 1;
+ }
+ else if (val->encoding == ATTR_VAL_ADDRESS_INDEX)
+ {
+ pcrange->highpc = val->u.uint;
+ pcrange->have_highpc = 1;
+ pcrange->highpc_is_addr_index = 1;
+ }
+ break;
+
+ case DW_AT_ranges:
+ if (val->encoding == ATTR_VAL_UINT
+ || val->encoding == ATTR_VAL_REF_SECTION)
+ {
+ pcrange->ranges = val->u.uint;
+ pcrange->have_ranges = 1;
+ }
+ else if (val->encoding == ATTR_VAL_RNGLISTS_INDEX)
+ {
+ pcrange->ranges = val->u.uint;
+ pcrange->have_ranges = 1;
+ pcrange->ranges_is_index = 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Call ADD_RANGE for a low/high PC pair. Returns 1 on success, 0 on
+ error. */
+
+static int
+add_low_high_range (struct backtrace_state *state,
+ const struct dwarf_sections *dwarf_sections,
+ uintptr_t base_address, int is_bigendian,
+ struct unit *u, const struct pcrange *pcrange,
+ int (*add_range) (struct backtrace_state *state,
+ void *rdata, uint64_t lowpc,
+ uint64_t highpc,
+ backtrace_error_callback error_callback,
+ void *data, void *vec),
+ void *rdata,
+ backtrace_error_callback error_callback, void *data,
+ void *vec)
+{
+ uint64_t lowpc;
+ uint64_t highpc;
+
+ lowpc = pcrange->lowpc;
+ if (pcrange->lowpc_is_addr_index)
+ {
+ if (!resolve_addr_index (dwarf_sections, u->addr_base, u->addrsize,
+ is_bigendian, lowpc, error_callback, data,
+ &lowpc))
+ return 0;
+ }
+
+ highpc = pcrange->highpc;
+ if (pcrange->highpc_is_addr_index)
+ {
+ if (!resolve_addr_index (dwarf_sections, u->addr_base, u->addrsize,
+ is_bigendian, highpc, error_callback, data,
+ &highpc))
+ return 0;
+ }
+ if (pcrange->highpc_is_relative)
+ highpc += lowpc;
+
+ /* Add in the base address of the module when recording PC values,
+ so that we can look up the PC directly. */
+ lowpc += base_address;
+ highpc += base_address;
+
+ return add_range (state, rdata, lowpc, highpc, error_callback, data, vec);
+}
+
+/* Call ADD_RANGE for each range read from .debug_ranges, as used in
+ DWARF versions 2 through 4. */
+
+static int
+add_ranges_from_ranges (
+ struct backtrace_state *state,
+ const struct dwarf_sections *dwarf_sections,
+ uintptr_t base_address, int is_bigendian,
+ struct unit *u, uint64_t base,
+ const struct pcrange *pcrange,
+ int (*add_range) (struct backtrace_state *state, void *rdata,
+ uint64_t lowpc, uint64_t highpc,
+ backtrace_error_callback error_callback, void *data,
+ void *vec),
+ void *rdata,
+ backtrace_error_callback error_callback, void *data,
+ void *vec)
+{
+ struct dwarf_buf ranges_buf;
+
+ if (pcrange->ranges >= dwarf_sections->size[DEBUG_RANGES])
+ {
+ error_callback (data, "ranges offset out of range", 0);
+ return 0;
+ }
+
+ ranges_buf.name = ".debug_ranges";
+ ranges_buf.start = dwarf_sections->data[DEBUG_RANGES];
+ ranges_buf.buf = dwarf_sections->data[DEBUG_RANGES] + pcrange->ranges;
+ ranges_buf.left = dwarf_sections->size[DEBUG_RANGES] - pcrange->ranges;
+ ranges_buf.is_bigendian = is_bigendian;
+ ranges_buf.error_callback = error_callback;
+ ranges_buf.data = data;
+ ranges_buf.reported_underflow = 0;
+
+ while (1)
+ {
+ uint64_t low;
+ uint64_t high;
+
+ if (ranges_buf.reported_underflow)
+ return 0;
+
+ low = read_address (&ranges_buf, u->addrsize);
+ high = read_address (&ranges_buf, u->addrsize);
+
+ if (low == 0 && high == 0)
+ break;
+
+ if (is_highest_address (low, u->addrsize))
+ base = high;
+ else
+ {
+ if (!add_range (state, rdata,
+ low + base + base_address,
+ high + base + base_address,
+ error_callback, data, vec))
+ return 0;
+ }
+ }
+
+ if (ranges_buf.reported_underflow)
+ return 0;
+
+ return 1;
+}
+
+/* Call ADD_RANGE for each range read from .debug_rnglists, as used in
+ DWARF version 5. */
+
+static int
+add_ranges_from_rnglists (
+ struct backtrace_state *state,
+ const struct dwarf_sections *dwarf_sections,
+ uintptr_t base_address, int is_bigendian,
+ struct unit *u, uint64_t base,
+ const struct pcrange *pcrange,
+ int (*add_range) (struct backtrace_state *state, void *rdata,
+ uint64_t lowpc, uint64_t highpc,
+ backtrace_error_callback error_callback, void *data,
+ void *vec),
+ void *rdata,
+ backtrace_error_callback error_callback, void *data,
+ void *vec)
+{
+ uint64_t offset;
+ struct dwarf_buf rnglists_buf;
+
+ if (!pcrange->ranges_is_index)
+ offset = pcrange->ranges;
+ else
+ offset = u->rnglists_base + pcrange->ranges * (u->is_dwarf64 ? 8 : 4);
+ if (offset >= dwarf_sections->size[DEBUG_RNGLISTS])
+ {
+ error_callback (data, "rnglists offset out of range", 0);
+ return 0;
+ }
+
+ rnglists_buf.name = ".debug_rnglists";
+ rnglists_buf.start = dwarf_sections->data[DEBUG_RNGLISTS];
+ rnglists_buf.buf = dwarf_sections->data[DEBUG_RNGLISTS] + offset;
+ rnglists_buf.left = dwarf_sections->size[DEBUG_RNGLISTS] - offset;
+ rnglists_buf.is_bigendian = is_bigendian;
+ rnglists_buf.error_callback = error_callback;
+ rnglists_buf.data = data;
+ rnglists_buf.reported_underflow = 0;
+
+ if (pcrange->ranges_is_index)
+ {
+ offset = read_offset (&rnglists_buf, u->is_dwarf64);
+ offset += u->rnglists_base;
+ if (offset >= dwarf_sections->size[DEBUG_RNGLISTS])
+ {
+ error_callback (data, "rnglists index offset out of range", 0);
+ return 0;
+ }
+ rnglists_buf.buf = dwarf_sections->data[DEBUG_RNGLISTS] + offset;
+ rnglists_buf.left = dwarf_sections->size[DEBUG_RNGLISTS] - offset;
+ }
+
+ while (1)
+ {
+ unsigned char rle;
+
+ rle = read_byte (&rnglists_buf);
+ if (rle == DW_RLE_end_of_list)
+ break;
+ switch (rle)
+ {
+ case DW_RLE_base_addressx:
+ {
+ uint64_t index;
+
+ index = read_uleb128 (&rnglists_buf);
+ if (!resolve_addr_index (dwarf_sections, u->addr_base,
+ u->addrsize, is_bigendian, index,
+ error_callback, data, &base))
+ return 0;
+ }
+ break;
+
+ case DW_RLE_startx_endx:
+ {
+ uint64_t index;
+ uint64_t low;
+ uint64_t high;
+
+ index = read_uleb128 (&rnglists_buf);
+ if (!resolve_addr_index (dwarf_sections, u->addr_base,
+ u->addrsize, is_bigendian, index,
+ error_callback, data, &low))
+ return 0;
+ index = read_uleb128 (&rnglists_buf);
+ if (!resolve_addr_index (dwarf_sections, u->addr_base,
+ u->addrsize, is_bigendian, index,
+ error_callback, data, &high))
+ return 0;
+ if (!add_range (state, rdata, low + base_address,
+ high + base_address, error_callback, data,
+ vec))
+ return 0;
+ }
+ break;
+
+ case DW_RLE_startx_length:
+ {
+ uint64_t index;
+ uint64_t low;
+ uint64_t length;
+
+ index = read_uleb128 (&rnglists_buf);
+ if (!resolve_addr_index (dwarf_sections, u->addr_base,
+ u->addrsize, is_bigendian, index,
+ error_callback, data, &low))
+ return 0;
+ length = read_uleb128 (&rnglists_buf);
+ low += base_address;
+ if (!add_range (state, rdata, low, low + length,
+ error_callback, data, vec))
+ return 0;
+ }
+ break;
+
+ case DW_RLE_offset_pair:
+ {
+ uint64_t low;
+ uint64_t high;
+
+ low = read_uleb128 (&rnglists_buf);
+ high = read_uleb128 (&rnglists_buf);
+ if (!add_range (state, rdata, low + base + base_address,
+ high + base + base_address,
+ error_callback, data, vec))
+ return 0;
+ }
+ break;
+
+ case DW_RLE_base_address:
+ base = read_address (&rnglists_buf, u->addrsize);
+ break;
+
+ case DW_RLE_start_end:
+ {
+ uint64_t low;
+ uint64_t high;
+
+ low = read_address (&rnglists_buf, u->addrsize);
+ high = read_address (&rnglists_buf, u->addrsize);
+ if (!add_range (state, rdata, low + base_address,
+ high + base_address, error_callback, data,
+ vec))
+ return 0;
+ }
+ break;
+
+ case DW_RLE_start_length:
+ {
+ uint64_t low;
+ uint64_t length;
+
+ low = read_address (&rnglists_buf, u->addrsize);
+ length = read_uleb128 (&rnglists_buf);
+ low += base_address;
+ if (!add_range (state, rdata, low, low + length,
+ error_callback, data, vec))
+ return 0;
+ }
+ break;
+
+ default:
+ dwarf_buf_error (&rnglists_buf, "unrecognized DW_RLE value", -1);
+ return 0;
+ }
+ }
+
+ if (rnglists_buf.reported_underflow)
+ return 0;
+
+ return 1;
+}
+
+/* Call ADD_RANGE for each lowpc/highpc pair in PCRANGE. RDATA is
+ passed to ADD_RANGE, and is either a struct unit * or a struct
+ function *. VEC is the vector we are adding ranges to, and is
+ either a struct unit_addrs_vector * or a struct function_vector *.
+ Returns 1 on success, 0 on error. */
+
+static int
+add_ranges (struct backtrace_state *state,
+ const struct dwarf_sections *dwarf_sections,
+ uintptr_t base_address, int is_bigendian,
+ struct unit *u, uint64_t base, const struct pcrange *pcrange,
+ int (*add_range) (struct backtrace_state *state, void *rdata,
+ uint64_t lowpc, uint64_t highpc,
+ backtrace_error_callback error_callback,
+ void *data, void *vec),
+ void *rdata,
+ backtrace_error_callback error_callback, void *data,
+ void *vec)
+{
+ if (pcrange->have_lowpc && pcrange->have_highpc)
+ return add_low_high_range (state, dwarf_sections, base_address,
+ is_bigendian, u, pcrange, add_range, rdata,
+ error_callback, data, vec);
+
+ if (!pcrange->have_ranges)
+ {
+ /* Did not find any address ranges to add. */
+ return 1;
+ }
+
+ if (u->version < 5)
+ return add_ranges_from_ranges (state, dwarf_sections, base_address,
+ is_bigendian, u, base, pcrange, add_range,
+ rdata, error_callback, data, vec);
+ else
+ return add_ranges_from_rnglists (state, dwarf_sections, base_address,
+ is_bigendian, u, base, pcrange, add_range,
+ rdata, error_callback, data, vec);
+}
+
+/* Find the address range covered by a compilation unit, reading from
+ UNIT_BUF and adding values to U. Returns 1 if all data could be
+ read, 0 if there is some error. */
+
+static int
+find_address_ranges (struct backtrace_state *state, uintptr_t base_address,
+ struct dwarf_buf *unit_buf,
+ const struct dwarf_sections *dwarf_sections,
+ int is_bigendian, struct dwarf_data *altlink,
+ backtrace_error_callback error_callback, void *data,
+ struct unit *u, struct unit_addrs_vector *addrs,
+ enum dwarf_tag *unit_tag)
+{
+ while (unit_buf->left > 0)
+ {
+ uint64_t code;
+ const struct abbrev *abbrev;
+ struct pcrange pcrange;
+ struct attr_val name_val;
+ int have_name_val;
+ struct attr_val comp_dir_val;
+ int have_comp_dir_val;
+ size_t i;
+
+ code = read_uleb128 (unit_buf);
+ if (code == 0)
+ return 1;
+
+ abbrev = lookup_abbrev (&u->abbrevs, code, error_callback, data);
+ if (abbrev == NULL)
+ return 0;
+
+ if (unit_tag != NULL)
+ *unit_tag = abbrev->tag;
+
+ memset (&pcrange, 0, sizeof pcrange);
+ memset (&name_val, 0, sizeof name_val);
+ have_name_val = 0;
+ memset (&comp_dir_val, 0, sizeof comp_dir_val);
+ have_comp_dir_val = 0;
+ for (i = 0; i < abbrev->num_attrs; ++i)
+ {
+ struct attr_val val;
+
+ if (!read_attribute (abbrev->attrs[i].form, abbrev->attrs[i].val,
+ unit_buf, u->is_dwarf64, u->version,
+ u->addrsize, dwarf_sections, altlink, &val))
+ return 0;
+
+ switch (abbrev->attrs[i].name)
+ {
+ case DW_AT_low_pc: case DW_AT_high_pc: case DW_AT_ranges:
+ update_pcrange (&abbrev->attrs[i], &val, &pcrange);
+ break;
+
+ case DW_AT_stmt_list:
+ if ((abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ && (val.encoding == ATTR_VAL_UINT
+ || val.encoding == ATTR_VAL_REF_SECTION))
+ u->lineoff = val.u.uint;
+ break;
+
+ case DW_AT_name:
+ if (abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ {
+ name_val = val;
+ have_name_val = 1;
+ }
+ break;
+
+ case DW_AT_comp_dir:
+ if (abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ {
+ comp_dir_val = val;
+ have_comp_dir_val = 1;
+ }
+ break;
+
+ case DW_AT_str_offsets_base:
+ if ((abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ && val.encoding == ATTR_VAL_REF_SECTION)
+ u->str_offsets_base = val.u.uint;
+ break;
+
+ case DW_AT_addr_base:
+ if ((abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ && val.encoding == ATTR_VAL_REF_SECTION)
+ u->addr_base = val.u.uint;
+ break;
+
+ case DW_AT_rnglists_base:
+ if ((abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ && val.encoding == ATTR_VAL_REF_SECTION)
+ u->rnglists_base = val.u.uint;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // Resolve strings after we're sure that we have seen
+ // DW_AT_str_offsets_base.
+ if (have_name_val)
+ {
+ if (!resolve_string (dwarf_sections, u->is_dwarf64, is_bigendian,
+ u->str_offsets_base, &name_val,
+ error_callback, data, &u->filename))
+ return 0;
+ }
+ if (have_comp_dir_val)
+ {
+ if (!resolve_string (dwarf_sections, u->is_dwarf64, is_bigendian,
+ u->str_offsets_base, &comp_dir_val,
+ error_callback, data, &u->comp_dir))
+ return 0;
+ }
+
+ if (abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_subprogram
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ {
+ if (!add_ranges (state, dwarf_sections, base_address,
+ is_bigendian, u, pcrange.lowpc, &pcrange,
+ add_unit_addr, (void *) u, error_callback, data,
+ (void *) addrs))
+ return 0;
+
+ /* If we found the PC range in the DW_TAG_compile_unit or
+ DW_TAG_skeleton_unit, we can stop now. */
+ if ((abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ && (pcrange.have_ranges
+ || (pcrange.have_lowpc && pcrange.have_highpc)))
+ return 1;
+ }
+
+ if (abbrev->has_children)
+ {
+ if (!find_address_ranges (state, base_address, unit_buf,
+ dwarf_sections, is_bigendian, altlink,
+ error_callback, data, u, addrs, NULL))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* Build a mapping from address ranges to the compilation units where
+ the line number information for that range can be found. Returns 1
+ on success, 0 on failure. */
+
+static int
+build_address_map (struct backtrace_state *state, uintptr_t base_address,
+ const struct dwarf_sections *dwarf_sections,
+ int is_bigendian, struct dwarf_data *altlink,
+ backtrace_error_callback error_callback, void *data,
+ struct unit_addrs_vector *addrs,
+ struct unit_vector *unit_vec)
+{
+ struct dwarf_buf info;
+ struct backtrace_vector units;
+ size_t units_count;
+ size_t i;
+ struct unit **pu;
+ size_t unit_offset = 0;
+ struct unit_addrs *pa;
+
+ memset (&addrs->vec, 0, sizeof addrs->vec);
+ memset (&unit_vec->vec, 0, sizeof unit_vec->vec);
+ addrs->count = 0;
+ unit_vec->count = 0;
+
+ /* Read through the .debug_info section. FIXME: Should we use the
+ .debug_aranges section? gdb and addr2line don't use it, but I'm
+ not sure why. */
+
+ info.name = ".debug_info";
+ info.start = dwarf_sections->data[DEBUG_INFO];
+ info.buf = info.start;
+ info.left = dwarf_sections->size[DEBUG_INFO];
+ info.is_bigendian = is_bigendian;
+ info.error_callback = error_callback;
+ info.data = data;
+ info.reported_underflow = 0;
+
+ memset (&units, 0, sizeof units);
+ units_count = 0;
+
+ while (info.left > 0)
+ {
+ const unsigned char *unit_data_start;
+ uint64_t len;
+ int is_dwarf64;
+ struct dwarf_buf unit_buf;
+ int version;
+ int unit_type;
+ uint64_t abbrev_offset;
+ int addrsize;
+ struct unit *u;
+ enum dwarf_tag unit_tag;
+
+ if (info.reported_underflow)
+ goto fail;
+
+ unit_data_start = info.buf;
+
+ len = read_initial_length (&info, &is_dwarf64);
+ unit_buf = info;
+ unit_buf.left = len;
+
+ if (!advance (&info, len))
+ goto fail;
+
+ version = read_uint16 (&unit_buf);
+ if (version < 2 || version > 5)
+ {
+ dwarf_buf_error (&unit_buf, "unrecognized DWARF version", -1);
+ goto fail;
+ }
+
+ if (version < 5)
+ unit_type = 0;
+ else
+ {
+ unit_type = read_byte (&unit_buf);
+ if (unit_type == DW_UT_type || unit_type == DW_UT_split_type)
+ {
+ /* This unit doesn't have anything we need. */
+ continue;
+ }
+ }
+
+ pu = ((struct unit **)
+ backtrace_vector_grow (state, sizeof (struct unit *),
+ error_callback, data, &units));
+ if (pu == NULL)
+ goto fail;
+
+ u = ((struct unit *)
+ backtrace_alloc (state, sizeof *u, error_callback, data));
+ if (u == NULL)
+ goto fail;
+
+ *pu = u;
+ ++units_count;
+
+ if (version < 5)
+ addrsize = 0; /* Set below. */
+ else
+ addrsize = read_byte (&unit_buf);
+
+ memset (&u->abbrevs, 0, sizeof u->abbrevs);
+ abbrev_offset = read_offset (&unit_buf, is_dwarf64);
+ if (!read_abbrevs (state, abbrev_offset,
+ dwarf_sections->data[DEBUG_ABBREV],
+ dwarf_sections->size[DEBUG_ABBREV],
+ is_bigendian, error_callback, data, &u->abbrevs))
+ goto fail;
+
+ if (version < 5)
+ addrsize = read_byte (&unit_buf);
+
+ switch (unit_type)
+ {
+ case 0:
+ break;
+ case DW_UT_compile: case DW_UT_partial:
+ break;
+ case DW_UT_skeleton: case DW_UT_split_compile:
+ read_uint64 (&unit_buf); /* dwo_id */
+ break;
+ default:
+ break;
+ }
+
+ u->low_offset = unit_offset;
+ unit_offset += len + (is_dwarf64 ? 12 : 4);
+ u->high_offset = unit_offset;
+ u->unit_data = unit_buf.buf;
+ u->unit_data_len = unit_buf.left;
+ u->unit_data_offset = unit_buf.buf - unit_data_start;
+ u->version = version;
+ u->is_dwarf64 = is_dwarf64;
+ u->addrsize = addrsize;
+ u->filename = NULL;
+ u->comp_dir = NULL;
+ u->abs_filename = NULL;
+ u->lineoff = 0;
+ u->str_offsets_base = 0;
+ u->addr_base = 0;
+ u->rnglists_base = 0;
+
+ /* The actual line number mappings will be read as needed. */
+ u->lines = NULL;
+ u->lines_count = 0;
+ u->function_addrs = NULL;
+ u->function_addrs_count = 0;
+
+ if (!find_address_ranges (state, base_address, &unit_buf, dwarf_sections,
+ is_bigendian, altlink, error_callback, data,
+ u, addrs, &unit_tag))
+ goto fail;
+
+ if (unit_buf.reported_underflow)
+ goto fail;
+ }
+ if (info.reported_underflow)
+ goto fail;
+
+ /* Add a trailing addrs entry, but don't include it in addrs->count. */
+ pa = ((struct unit_addrs *)
+ backtrace_vector_grow (state, sizeof (struct unit_addrs),
+ error_callback, data, &addrs->vec));
+ if (pa == NULL)
+ goto fail;
+ pa->low = 0;
+ --pa->low;
+ pa->high = pa->low;
+ pa->u = NULL;
+
+ unit_vec->vec = units;
+ unit_vec->count = units_count;
+ return 1;
+
+ fail:
+ if (units_count > 0)
+ {
+ pu = (struct unit **) units.base;
+ for (i = 0; i < units_count; i++)
+ {
+ free_abbrevs (state, &pu[i]->abbrevs, error_callback, data);
+ backtrace_free (state, pu[i], sizeof **pu, error_callback, data);
+ }
+ backtrace_vector_free (state, &units, error_callback, data);
+ }
+ if (addrs->count > 0)
+ {
+ backtrace_vector_free (state, &addrs->vec, error_callback, data);
+ addrs->count = 0;
+ }
+ return 0;
+}
+
+/* Add a new mapping to the vector of line mappings that we are
+ building. Returns 1 on success, 0 on failure. */
+
+static int
+add_line (struct backtrace_state *state, struct dwarf_data *ddata,
+ uintptr_t pc, const char *filename, int lineno,
+ backtrace_error_callback error_callback, void *data,
+ struct line_vector *vec)
+{
+ struct line *ln;
+
+ /* If we are adding the same mapping, ignore it. This can happen
+ when using discriminators. */
+ if (vec->count > 0)
+ {
+ ln = (struct line *) vec->vec.base + (vec->count - 1);
+ if (pc == ln->pc && filename == ln->filename && lineno == ln->lineno)
+ return 1;
+ }
+
+ ln = ((struct line *)
+ backtrace_vector_grow (state, sizeof (struct line), error_callback,
+ data, &vec->vec));
+ if (ln == NULL)
+ return 0;
+
+ /* Add in the base address here, so that we can look up the PC
+ directly. */
+ ln->pc = pc + ddata->base_address;
+
+ ln->filename = filename;
+ ln->lineno = lineno;
+ ln->idx = vec->count;
+
+ ++vec->count;
+
+ return 1;
+}
+
+/* Free the line header information. */
+
+static void
+free_line_header (struct backtrace_state *state, struct line_header *hdr,
+ backtrace_error_callback error_callback, void *data)
+{
+ if (hdr->dirs_count != 0)
+ backtrace_free (state, hdr->dirs, hdr->dirs_count * sizeof (const char *),
+ error_callback, data);
+ backtrace_free (state, hdr->filenames,
+ hdr->filenames_count * sizeof (char *),
+ error_callback, data);
+}
+
+/* Read the directories and file names for a line header for version
+ 2, setting fields in HDR. Return 1 on success, 0 on failure. */
+
+static int
+read_v2_paths (struct backtrace_state *state, struct unit *u,
+ struct dwarf_buf *hdr_buf, struct line_header *hdr)
+{
+ const unsigned char *p;
+ const unsigned char *pend;
+ size_t i;
+
+ /* Count the number of directory entries. */
+ hdr->dirs_count = 0;
+ p = hdr_buf->buf;
+ pend = p + hdr_buf->left;
+ while (p < pend && *p != '\0')
+ {
+ p += strnlen((const char *) p, pend - p) + 1;
+ ++hdr->dirs_count;
+ }
+
+ /* The index of the first entry in the list of directories is 1. Index 0 is
+ used for the current directory of the compilation. To simplify index
+ handling, we set entry 0 to the compilation unit directory. */
+ ++hdr->dirs_count;
+ hdr->dirs = ((const char **)
+ backtrace_alloc (state,
+ hdr->dirs_count * sizeof (const char *),
+ hdr_buf->error_callback,
+ hdr_buf->data));
+ if (hdr->dirs == NULL)
+ return 0;
+
+ hdr->dirs[0] = u->comp_dir;
+ i = 1;
+ while (*hdr_buf->buf != '\0')
+ {
+ if (hdr_buf->reported_underflow)
+ return 0;
+
+ hdr->dirs[i] = read_string (hdr_buf);
+ if (hdr->dirs[i] == NULL)
+ return 0;
+ ++i;
+ }
+ if (!advance (hdr_buf, 1))
+ return 0;
+
+ /* Count the number of file entries. */
+ hdr->filenames_count = 0;
+ p = hdr_buf->buf;
+ pend = p + hdr_buf->left;
+ while (p < pend && *p != '\0')
+ {
+ p += strnlen ((const char *) p, pend - p) + 1;
+ p += leb128_len (p);
+ p += leb128_len (p);
+ p += leb128_len (p);
+ ++hdr->filenames_count;
+ }
+
+ /* The index of the first entry in the list of file names is 1. Index 0 is
+ used for the DW_AT_name of the compilation unit. To simplify index
+ handling, we set entry 0 to the compilation unit file name. */
+ ++hdr->filenames_count;
+ hdr->filenames = ((const char **)
+ backtrace_alloc (state,
+ hdr->filenames_count * sizeof (char *),
+ hdr_buf->error_callback,
+ hdr_buf->data));
+ if (hdr->filenames == NULL)
+ return 0;
+ hdr->filenames[0] = u->filename;
+ i = 1;
+ while (*hdr_buf->buf != '\0')
+ {
+ const char *filename;
+ uint64_t dir_index;
+
+ if (hdr_buf->reported_underflow)
+ return 0;
+
+ filename = read_string (hdr_buf);
+ if (filename == NULL)
+ return 0;
+ dir_index = read_uleb128 (hdr_buf);
+ if (IS_ABSOLUTE_PATH (filename)
+ || (dir_index < hdr->dirs_count && hdr->dirs[dir_index] == NULL))
+ hdr->filenames[i] = filename;
+ else
+ {
+ const char *dir;
+ size_t dir_len;
+ size_t filename_len;
+ char *s;
+
+ if (dir_index < hdr->dirs_count)
+ dir = hdr->dirs[dir_index];
+ else
+ {
+ dwarf_buf_error (hdr_buf,
+ ("invalid directory index in "
+ "line number program header"),
+ 0);
+ return 0;
+ }
+ dir_len = strlen (dir);
+ filename_len = strlen (filename);
+ s = ((char *) backtrace_alloc (state, dir_len + filename_len + 2,
+ hdr_buf->error_callback,
+ hdr_buf->data));
+ if (s == NULL)
+ return 0;
+ memcpy (s, dir, dir_len);
+ /* FIXME: If we are on a DOS-based file system, and the
+ directory or the file name use backslashes, then we
+ should use a backslash here. */
+ s[dir_len] = '/';
+ memcpy (s + dir_len + 1, filename, filename_len + 1);
+ hdr->filenames[i] = s;
+ }
+
+ /* Ignore the modification time and size. */
+ read_uleb128 (hdr_buf);
+ read_uleb128 (hdr_buf);
+
+ ++i;
+ }
+
+ return 1;
+}
+
+/* Read a single version 5 LNCT entry for a directory or file name in a
+ line header. Sets *STRING to the resulting name, ignoring other
+ data. Return 1 on success, 0 on failure. */
+
+static int
+read_lnct (struct backtrace_state *state, struct dwarf_data *ddata,
+ struct unit *u, struct dwarf_buf *hdr_buf,
+ const struct line_header *hdr, size_t formats_count,
+ const struct line_header_format *formats, const char **string)
+{
+ size_t i;
+ const char *dir;
+ const char *path;
+
+ dir = NULL;
+ path = NULL;
+ for (i = 0; i < formats_count; i++)
+ {
+ struct attr_val val;
+
+ if (!read_attribute (formats[i].form, 0, hdr_buf, u->is_dwarf64,
+ u->version, hdr->addrsize, &ddata->dwarf_sections,
+ ddata->altlink, &val))
+ return 0;
+ switch (formats[i].lnct)
+ {
+ case DW_LNCT_path:
+ if (!resolve_string (&ddata->dwarf_sections, u->is_dwarf64,
+ ddata->is_bigendian, u->str_offsets_base,
+ &val, hdr_buf->error_callback, hdr_buf->data,
+ &path))
+ return 0;
+ break;
+ case DW_LNCT_directory_index:
+ if (val.encoding == ATTR_VAL_UINT)
+ {
+ if (val.u.uint >= hdr->dirs_count)
+ {
+ dwarf_buf_error (hdr_buf,
+ ("invalid directory index in "
+ "line number program header"),
+ 0);
+ return 0;
+ }
+ dir = hdr->dirs[val.u.uint];
+ }
+ break;
+ default:
+ /* We don't care about timestamps or sizes or hashes. */
+ break;
+ }
+ }
+
+ if (path == NULL)
+ {
+ dwarf_buf_error (hdr_buf,
+ "missing file name in line number program header",
+ 0);
+ return 0;
+ }
+
+ if (dir == NULL)
+ *string = path;
+ else
+ {
+ size_t dir_len;
+ size_t path_len;
+ char *s;
+
+ dir_len = strlen (dir);
+ path_len = strlen (path);
+ s = (char *) backtrace_alloc (state, dir_len + path_len + 2,
+ hdr_buf->error_callback, hdr_buf->data);
+ if (s == NULL)
+ return 0;
+ memcpy (s, dir, dir_len);
+ /* FIXME: If we are on a DOS-based file system, and the
+ directory or the path name use backslashes, then we should
+ use a backslash here. */
+ s[dir_len] = '/';
+ memcpy (s + dir_len + 1, path, path_len + 1);
+ *string = s;
+ }
+
+ return 1;
+}
+
+/* Read a set of DWARF 5 line header format entries, setting *PCOUNT
+ and *PPATHS. Return 1 on success, 0 on failure. */
+
+static int
+read_line_header_format_entries (struct backtrace_state *state,
+ struct dwarf_data *ddata,
+ struct unit *u,
+ struct dwarf_buf *hdr_buf,
+ struct line_header *hdr,
+ size_t *pcount,
+ const char ***ppaths)
+{
+ size_t formats_count;
+ struct line_header_format *formats;
+ size_t paths_count;
+ const char **paths;
+ size_t i;
+ int ret;
+
+ formats_count = read_byte (hdr_buf);
+ if (formats_count == 0)
+ formats = NULL;
+ else
+ {
+ formats = ((struct line_header_format *)
+ backtrace_alloc (state,
+ (formats_count
+ * sizeof (struct line_header_format)),
+ hdr_buf->error_callback,
+ hdr_buf->data));
+ if (formats == NULL)
+ return 0;
+
+ for (i = 0; i < formats_count; i++)
+ {
+ formats[i].lnct = (int) read_uleb128(hdr_buf);
+ formats[i].form = (enum dwarf_form) read_uleb128 (hdr_buf);
+ }
+ }
+
+ paths_count = read_uleb128 (hdr_buf);
+ if (paths_count == 0)
+ {
+ *pcount = 0;
+ *ppaths = NULL;
+ ret = 1;
+ goto exit;
+ }
+
+ paths = ((const char **)
+ backtrace_alloc (state, paths_count * sizeof (const char *),
+ hdr_buf->error_callback, hdr_buf->data));
+ if (paths == NULL)
+ {
+ ret = 0;
+ goto exit;
+ }
+ for (i = 0; i < paths_count; i++)
+ {
+ if (!read_lnct (state, ddata, u, hdr_buf, hdr, formats_count,
+ formats, &paths[i]))
+ {
+ backtrace_free (state, paths,
+ paths_count * sizeof (const char *),
+ hdr_buf->error_callback, hdr_buf->data);
+ ret = 0;
+ goto exit;
+ }
+ }
+
+ *pcount = paths_count;
+ *ppaths = paths;
+
+ ret = 1;
+
+ exit:
+ if (formats != NULL)
+ backtrace_free (state, formats,
+ formats_count * sizeof (struct line_header_format),
+ hdr_buf->error_callback, hdr_buf->data);
+
+ return ret;
+}
+
+/* Read the line header. Return 1 on success, 0 on failure. */
+
+static int
+read_line_header (struct backtrace_state *state, struct dwarf_data *ddata,
+ struct unit *u, int is_dwarf64, struct dwarf_buf *line_buf,
+ struct line_header *hdr)
+{
+ uint64_t hdrlen;
+ struct dwarf_buf hdr_buf;
+
+ hdr->version = read_uint16 (line_buf);
+ if (hdr->version < 2 || hdr->version > 5)
+ {
+ dwarf_buf_error (line_buf, "unsupported line number version", -1);
+ return 0;
+ }
+
+ if (hdr->version < 5)
+ hdr->addrsize = u->addrsize;
+ else
+ {
+ hdr->addrsize = read_byte (line_buf);
+ /* We could support a non-zero segment_selector_size but I doubt
+ we'll ever see it. */
+ if (read_byte (line_buf) != 0)
+ {
+ dwarf_buf_error (line_buf,
+ "non-zero segment_selector_size not supported",
+ -1);
+ return 0;
+ }
+ }
+
+ hdrlen = read_offset (line_buf, is_dwarf64);
+
+ hdr_buf = *line_buf;
+ hdr_buf.left = hdrlen;
+
+ if (!advance (line_buf, hdrlen))
+ return 0;
+
+ hdr->min_insn_len = read_byte (&hdr_buf);
+ if (hdr->version < 4)
+ hdr->max_ops_per_insn = 1;
+ else
+ hdr->max_ops_per_insn = read_byte (&hdr_buf);
+
+ /* We don't care about default_is_stmt. */
+ read_byte (&hdr_buf);
+
+ hdr->line_base = read_sbyte (&hdr_buf);
+ hdr->line_range = read_byte (&hdr_buf);
+
+ hdr->opcode_base = read_byte (&hdr_buf);
+ hdr->opcode_lengths = hdr_buf.buf;
+ if (!advance (&hdr_buf, hdr->opcode_base - 1))
+ return 0;
+
+ if (hdr->version < 5)
+ {
+ if (!read_v2_paths (state, u, &hdr_buf, hdr))
+ return 0;
+ }
+ else
+ {
+ if (!read_line_header_format_entries (state, ddata, u, &hdr_buf, hdr,
+ &hdr->dirs_count,
+ &hdr->dirs))
+ return 0;
+ if (!read_line_header_format_entries (state, ddata, u, &hdr_buf, hdr,
+ &hdr->filenames_count,
+ &hdr->filenames))
+ return 0;
+ }
+
+ if (hdr_buf.reported_underflow)
+ return 0;
+
+ return 1;
+}
+
+/* Read the line program, adding line mappings to VEC. Return 1 on
+ success, 0 on failure. */
+
+static int
+read_line_program (struct backtrace_state *state, struct dwarf_data *ddata,
+ const struct line_header *hdr, struct dwarf_buf *line_buf,
+ struct line_vector *vec)
+{
+ uint64_t address;
+ unsigned int op_index;
+ const char *reset_filename;
+ const char *filename;
+ int lineno;
+
+ address = 0;
+ op_index = 0;
+ if (hdr->filenames_count > 1)
+ reset_filename = hdr->filenames[1];
+ else
+ reset_filename = "";
+ filename = reset_filename;
+ lineno = 1;
+ while (line_buf->left > 0)
+ {
+ unsigned int op;
+
+ op = read_byte (line_buf);
+ if (op >= hdr->opcode_base)
+ {
+ unsigned int advance;
+
+ /* Special opcode. */
+ op -= hdr->opcode_base;
+ advance = op / hdr->line_range;
+ address += (hdr->min_insn_len * (op_index + advance)
+ / hdr->max_ops_per_insn);
+ op_index = (op_index + advance) % hdr->max_ops_per_insn;
+ lineno += hdr->line_base + (int) (op % hdr->line_range);
+ add_line (state, ddata, address, filename, lineno,
+ line_buf->error_callback, line_buf->data, vec);
+ }
+ else if (op == DW_LNS_extended_op)
+ {
+ uint64_t len;
+
+ len = read_uleb128 (line_buf);
+ op = read_byte (line_buf);
+ switch (op)
+ {
+ case DW_LNE_end_sequence:
+ /* FIXME: Should we mark the high PC here? It seems
+ that we already have that information from the
+ compilation unit. */
+ address = 0;
+ op_index = 0;
+ filename = reset_filename;
+ lineno = 1;
+ break;
+ case DW_LNE_set_address:
+ address = read_address (line_buf, hdr->addrsize);
+ break;
+ case DW_LNE_define_file:
+ {
+ const char *f;
+ unsigned int dir_index;
+
+ f = read_string (line_buf);
+ if (f == NULL)
+ return 0;
+ dir_index = read_uleb128 (line_buf);
+ /* Ignore that time and length. */
+ read_uleb128 (line_buf);
+ read_uleb128 (line_buf);
+ if (IS_ABSOLUTE_PATH (f))
+ filename = f;
+ else
+ {
+ const char *dir;
+ size_t dir_len;
+ size_t f_len;
+ char *p;
+
+ if (dir_index < hdr->dirs_count)
+ dir = hdr->dirs[dir_index];
+ else
+ {
+ dwarf_buf_error (line_buf,
+ ("invalid directory index "
+ "in line number program"),
+ 0);
+ return 0;
+ }
+ dir_len = strlen (dir);
+ f_len = strlen (f);
+ p = ((char *)
+ backtrace_alloc (state, dir_len + f_len + 2,
+ line_buf->error_callback,
+ line_buf->data));
+ if (p == NULL)
+ return 0;
+ memcpy (p, dir, dir_len);
+ /* FIXME: If we are on a DOS-based file system,
+ and the directory or the file name use
+ backslashes, then we should use a backslash
+ here. */
+ p[dir_len] = '/';
+ memcpy (p + dir_len + 1, f, f_len + 1);
+ filename = p;
+ }
+ }
+ break;
+ case DW_LNE_set_discriminator:
+ /* We don't care about discriminators. */
+ read_uleb128 (line_buf);
+ break;
+ default:
+ if (!advance (line_buf, len - 1))
+ return 0;
+ break;
+ }
+ }
+ else
+ {
+ switch (op)
+ {
+ case DW_LNS_copy:
+ add_line (state, ddata, address, filename, lineno,
+ line_buf->error_callback, line_buf->data, vec);
+ break;
+ case DW_LNS_advance_pc:
+ {
+ uint64_t advance;
+
+ advance = read_uleb128 (line_buf);
+ address += (hdr->min_insn_len * (op_index + advance)
+ / hdr->max_ops_per_insn);
+ op_index = (op_index + advance) % hdr->max_ops_per_insn;
+ }
+ break;
+ case DW_LNS_advance_line:
+ lineno += (int) read_sleb128 (line_buf);
+ break;
+ case DW_LNS_set_file:
+ {
+ uint64_t fileno;
+
+ fileno = read_uleb128 (line_buf);
+ if (fileno >= hdr->filenames_count)
+ {
+ dwarf_buf_error (line_buf,
+ ("invalid file number in "
+ "line number program"),
+ 0);
+ return 0;
+ }
+ filename = hdr->filenames[fileno];
+ }
+ break;
+ case DW_LNS_set_column:
+ read_uleb128 (line_buf);
+ break;
+ case DW_LNS_negate_stmt:
+ break;
+ case DW_LNS_set_basic_block:
+ break;
+ case DW_LNS_const_add_pc:
+ {
+ unsigned int advance;
+
+ op = 255 - hdr->opcode_base;
+ advance = op / hdr->line_range;
+ address += (hdr->min_insn_len * (op_index + advance)
+ / hdr->max_ops_per_insn);
+ op_index = (op_index + advance) % hdr->max_ops_per_insn;
+ }
+ break;
+ case DW_LNS_fixed_advance_pc:
+ address += read_uint16 (line_buf);
+ op_index = 0;
+ break;
+ case DW_LNS_set_prologue_end:
+ break;
+ case DW_LNS_set_epilogue_begin:
+ break;
+ case DW_LNS_set_isa:
+ read_uleb128 (line_buf);
+ break;
+ default:
+ {
+ unsigned int i;
+
+ for (i = hdr->opcode_lengths[op - 1]; i > 0; --i)
+ read_uleb128 (line_buf);
+ }
+ break;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Read the line number information for a compilation unit. Returns 1
+ on success, 0 on failure. */
+
+static int
+read_line_info (struct backtrace_state *state, struct dwarf_data *ddata,
+ backtrace_error_callback error_callback, void *data,
+ struct unit *u, struct line_header *hdr, struct line **lines,
+ size_t *lines_count)
+{
+ struct line_vector vec;
+ struct dwarf_buf line_buf;
+ uint64_t len;
+ int is_dwarf64;
+ struct line *ln;
+
+ memset (&vec.vec, 0, sizeof vec.vec);
+ vec.count = 0;
+
+ memset (hdr, 0, sizeof *hdr);
+
+ if (u->lineoff != (off_t) (size_t) u->lineoff
+ || (size_t) u->lineoff >= ddata->dwarf_sections.size[DEBUG_LINE])
+ {
+ error_callback (data, "unit line offset out of range", 0);
+ goto fail;
+ }
+
+ line_buf.name = ".debug_line";
+ line_buf.start = ddata->dwarf_sections.data[DEBUG_LINE];
+ line_buf.buf = ddata->dwarf_sections.data[DEBUG_LINE] + u->lineoff;
+ line_buf.left = ddata->dwarf_sections.size[DEBUG_LINE] - u->lineoff;
+ line_buf.is_bigendian = ddata->is_bigendian;
+ line_buf.error_callback = error_callback;
+ line_buf.data = data;
+ line_buf.reported_underflow = 0;
+
+ len = read_initial_length (&line_buf, &is_dwarf64);
+ line_buf.left = len;
+
+ if (!read_line_header (state, ddata, u, is_dwarf64, &line_buf, hdr))
+ goto fail;
+
+ if (!read_line_program (state, ddata, hdr, &line_buf, &vec))
+ goto fail;
+
+ if (line_buf.reported_underflow)
+ goto fail;
+
+ if (vec.count == 0)
+ {
+ /* This is not a failure in the sense of a generating an error,
+ but it is a failure in that sense that we have no useful
+ information. */
+ goto fail;
+ }
+
+ /* Allocate one extra entry at the end. */
+ ln = ((struct line *)
+ backtrace_vector_grow (state, sizeof (struct line), error_callback,
+ data, &vec.vec));
+ if (ln == NULL)
+ goto fail;
+ ln->pc = (uintptr_t) -1;
+ ln->filename = NULL;
+ ln->lineno = 0;
+ ln->idx = 0;
+
+ if (!backtrace_vector_release (state, &vec.vec, error_callback, data))
+ goto fail;
+
+ ln = (struct line *) vec.vec.base;
+ backtrace_qsort (ln, vec.count, sizeof (struct line), line_compare);
+
+ *lines = ln;
+ *lines_count = vec.count;
+
+ return 1;
+
+ fail:
+ backtrace_vector_free (state, &vec.vec, error_callback, data);
+ free_line_header (state, hdr, error_callback, data);
+ *lines = (struct line *) (uintptr_t) -1;
+ *lines_count = 0;
+ return 0;
+}
+
+static const char *read_referenced_name (struct dwarf_data *, struct unit *,
+ uint64_t, backtrace_error_callback,
+ void *);
+
+/* Read the name of a function from a DIE referenced by ATTR with VAL. */
+
+static const char *
+read_referenced_name_from_attr (struct dwarf_data *ddata, struct unit *u,
+ struct attr *attr, struct attr_val *val,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ switch (attr->name)
+ {
+ case DW_AT_abstract_origin:
+ case DW_AT_specification:
+ break;
+ default:
+ return NULL;
+ }
+
+ if (attr->form == DW_FORM_ref_sig8)
+ return NULL;
+
+ if (val->encoding == ATTR_VAL_REF_INFO)
+ {
+ struct unit *unit
+ = find_unit (ddata->units, ddata->units_count,
+ val->u.uint);
+ if (unit == NULL)
+ return NULL;
+
+ uint64_t offset = val->u.uint - unit->low_offset;
+ return read_referenced_name (ddata, unit, offset, error_callback, data);
+ }
+
+ if (val->encoding == ATTR_VAL_UINT
+ || val->encoding == ATTR_VAL_REF_UNIT)
+ return read_referenced_name (ddata, u, val->u.uint, error_callback, data);
+
+ if (val->encoding == ATTR_VAL_REF_ALT_INFO)
+ {
+ struct unit *alt_unit
+ = find_unit (ddata->altlink->units, ddata->altlink->units_count,
+ val->u.uint);
+ if (alt_unit == NULL)
+ return NULL;
+
+ uint64_t offset = val->u.uint - alt_unit->low_offset;
+ return read_referenced_name (ddata->altlink, alt_unit, offset,
+ error_callback, data);
+ }
+
+ return NULL;
+}
+
+/* Read the name of a function from a DIE referenced by a
+ DW_AT_abstract_origin or DW_AT_specification tag. OFFSET is within
+ the same compilation unit. */
+
+static const char *
+read_referenced_name (struct dwarf_data *ddata, struct unit *u,
+ uint64_t offset, backtrace_error_callback error_callback,
+ void *data)
+{
+ struct dwarf_buf unit_buf;
+ uint64_t code;
+ const struct abbrev *abbrev;
+ const char *ret;
+ size_t i;
+
+ /* OFFSET is from the start of the data for this compilation unit.
+ U->unit_data is the data, but it starts U->unit_data_offset bytes
+ from the beginning. */
+
+ if (offset < u->unit_data_offset
+ || offset - u->unit_data_offset >= u->unit_data_len)
+ {
+ error_callback (data,
+ "abstract origin or specification out of range",
+ 0);
+ return NULL;
+ }
+
+ offset -= u->unit_data_offset;
+
+ unit_buf.name = ".debug_info";
+ unit_buf.start = ddata->dwarf_sections.data[DEBUG_INFO];
+ unit_buf.buf = u->unit_data + offset;
+ unit_buf.left = u->unit_data_len - offset;
+ unit_buf.is_bigendian = ddata->is_bigendian;
+ unit_buf.error_callback = error_callback;
+ unit_buf.data = data;
+ unit_buf.reported_underflow = 0;
+
+ code = read_uleb128 (&unit_buf);
+ if (code == 0)
+ {
+ dwarf_buf_error (&unit_buf,
+ "invalid abstract origin or specification",
+ 0);
+ return NULL;
+ }
+
+ abbrev = lookup_abbrev (&u->abbrevs, code, error_callback, data);
+ if (abbrev == NULL)
+ return NULL;
+
+ ret = NULL;
+ for (i = 0; i < abbrev->num_attrs; ++i)
+ {
+ struct attr_val val;
+
+ if (!read_attribute (abbrev->attrs[i].form, abbrev->attrs[i].val,
+ &unit_buf, u->is_dwarf64, u->version, u->addrsize,
+ &ddata->dwarf_sections, ddata->altlink, &val))
+ return NULL;
+
+ switch (abbrev->attrs[i].name)
+ {
+ case DW_AT_name:
+ /* Third name preference: don't override. A name we found in some
+ other way, will normally be more useful -- e.g., this name is
+ normally not mangled. */
+ if (ret != NULL)
+ break;
+ if (!resolve_string (&ddata->dwarf_sections, u->is_dwarf64,
+ ddata->is_bigendian, u->str_offsets_base,
+ &val, error_callback, data, &ret))
+ return NULL;
+ break;
+
+ case DW_AT_linkage_name:
+ case DW_AT_MIPS_linkage_name:
+ /* First name preference: override all. */
+ {
+ const char *s;
+
+ s = NULL;
+ if (!resolve_string (&ddata->dwarf_sections, u->is_dwarf64,
+ ddata->is_bigendian, u->str_offsets_base,
+ &val, error_callback, data, &s))
+ return NULL;
+ if (s != NULL)
+ return s;
+ }
+ break;
+
+ case DW_AT_specification:
+ /* Second name preference: override DW_AT_name, don't override
+ DW_AT_linkage_name. */
+ {
+ const char *name;
+
+ name = read_referenced_name_from_attr (ddata, u, &abbrev->attrs[i],
+ &val, error_callback, data);
+ if (name != NULL)
+ ret = name;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Add a range to a unit that maps to a function. This is called via
+ add_ranges. Returns 1 on success, 0 on error. */
+
+static int
+add_function_range (struct backtrace_state *state, void *rdata,
+ uint64_t lowpc, uint64_t highpc,
+ backtrace_error_callback error_callback, void *data,
+ void *pvec)
+{
+ struct function *function = (struct function *) rdata;
+ struct function_vector *vec = (struct function_vector *) pvec;
+ struct function_addrs *p;
+
+ if (vec->count > 0)
+ {
+ p = (struct function_addrs *) vec->vec.base + (vec->count - 1);
+ if ((lowpc == p->high || lowpc == p->high + 1)
+ && function == p->function)
+ {
+ if (highpc > p->high)
+ p->high = highpc;
+ return 1;
+ }
+ }
+
+ p = ((struct function_addrs *)
+ backtrace_vector_grow (state, sizeof (struct function_addrs),
+ error_callback, data, &vec->vec));
+ if (p == NULL)
+ return 0;
+
+ p->low = lowpc;
+ p->high = highpc;
+ p->function = function;
+
+ ++vec->count;
+
+ return 1;
+}
+
+/* Read one entry plus all its children. Add function addresses to
+ VEC. Returns 1 on success, 0 on error. */
+
+static int
+read_function_entry (struct backtrace_state *state, struct dwarf_data *ddata,
+ struct unit *u, uint64_t base, struct dwarf_buf *unit_buf,
+ const struct line_header *lhdr,
+ backtrace_error_callback error_callback, void *data,
+ struct function_vector *vec_function,
+ struct function_vector *vec_inlined)
+{
+ while (unit_buf->left > 0)
+ {
+ uint64_t code;
+ const struct abbrev *abbrev;
+ int is_function;
+ struct function *function;
+ struct function_vector *vec;
+ size_t i;
+ struct pcrange pcrange;
+ int have_linkage_name;
+
+ code = read_uleb128 (unit_buf);
+ if (code == 0)
+ return 1;
+
+ abbrev = lookup_abbrev (&u->abbrevs, code, error_callback, data);
+ if (abbrev == NULL)
+ return 0;
+
+ is_function = (abbrev->tag == DW_TAG_subprogram
+ || abbrev->tag == DW_TAG_entry_point
+ || abbrev->tag == DW_TAG_inlined_subroutine);
+
+ if (abbrev->tag == DW_TAG_inlined_subroutine)
+ vec = vec_inlined;
+ else
+ vec = vec_function;
+
+ function = NULL;
+ if (is_function)
+ {
+ function = ((struct function *)
+ backtrace_alloc (state, sizeof *function,
+ error_callback, data));
+ if (function == NULL)
+ return 0;
+ memset (function, 0, sizeof *function);
+ }
+
+ memset (&pcrange, 0, sizeof pcrange);
+ have_linkage_name = 0;
+ for (i = 0; i < abbrev->num_attrs; ++i)
+ {
+ struct attr_val val;
+
+ if (!read_attribute (abbrev->attrs[i].form, abbrev->attrs[i].val,
+ unit_buf, u->is_dwarf64, u->version,
+ u->addrsize, &ddata->dwarf_sections,
+ ddata->altlink, &val))
+ return 0;
+
+ /* The compile unit sets the base address for any address
+ ranges in the function entries. */
+ if ((abbrev->tag == DW_TAG_compile_unit
+ || abbrev->tag == DW_TAG_skeleton_unit)
+ && abbrev->attrs[i].name == DW_AT_low_pc)
+ {
+ if (val.encoding == ATTR_VAL_ADDRESS)
+ base = val.u.uint;
+ else if (val.encoding == ATTR_VAL_ADDRESS_INDEX)
+ {
+ if (!resolve_addr_index (&ddata->dwarf_sections,
+ u->addr_base, u->addrsize,
+ ddata->is_bigendian, val.u.uint,
+ error_callback, data, &base))
+ return 0;
+ }
+ }
+
+ if (is_function)
+ {
+ switch (abbrev->attrs[i].name)
+ {
+ case DW_AT_call_file:
+ if (val.encoding == ATTR_VAL_UINT)
+ {
+ if (val.u.uint >= lhdr->filenames_count)
+ {
+ dwarf_buf_error (unit_buf,
+ ("invalid file number in "
+ "DW_AT_call_file attribute"),
+ 0);
+ return 0;
+ }
+ function->caller_filename = lhdr->filenames[val.u.uint];
+ }
+ break;
+
+ case DW_AT_call_line:
+ if (val.encoding == ATTR_VAL_UINT)
+ function->caller_lineno = val.u.uint;
+ break;
+
+ case DW_AT_abstract_origin:
+ case DW_AT_specification:
+ /* Second name preference: override DW_AT_name, don't override
+ DW_AT_linkage_name. */
+ if (have_linkage_name)
+ break;
+ {
+ const char *name;
+
+ name
+ = read_referenced_name_from_attr (ddata, u,
+ &abbrev->attrs[i], &val,
+ error_callback, data);
+ if (name != NULL)
+ function->name = name;
+ }
+ break;
+
+ case DW_AT_name:
+ /* Third name preference: don't override. */
+ if (function->name != NULL)
+ break;
+ if (!resolve_string (&ddata->dwarf_sections, u->is_dwarf64,
+ ddata->is_bigendian,
+ u->str_offsets_base, &val,
+ error_callback, data, &function->name))
+ return 0;
+ break;
+
+ case DW_AT_linkage_name:
+ case DW_AT_MIPS_linkage_name:
+ /* First name preference: override all. */
+ {
+ const char *s;
+
+ s = NULL;
+ if (!resolve_string (&ddata->dwarf_sections, u->is_dwarf64,
+ ddata->is_bigendian,
+ u->str_offsets_base, &val,
+ error_callback, data, &s))
+ return 0;
+ if (s != NULL)
+ {
+ function->name = s;
+ have_linkage_name = 1;
+ }
+ }
+ break;
+
+ case DW_AT_low_pc: case DW_AT_high_pc: case DW_AT_ranges:
+ update_pcrange (&abbrev->attrs[i], &val, &pcrange);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ /* If we couldn't find a name for the function, we have no use
+ for it. */
+ if (is_function && function->name == NULL)
+ {
+ backtrace_free (state, function, sizeof *function,
+ error_callback, data);
+ is_function = 0;
+ }
+
+ if (is_function)
+ {
+ if (pcrange.have_ranges
+ || (pcrange.have_lowpc && pcrange.have_highpc))
+ {
+ if (!add_ranges (state, &ddata->dwarf_sections,
+ ddata->base_address, ddata->is_bigendian,
+ u, base, &pcrange, add_function_range,
+ (void *) function, error_callback, data,
+ (void *) vec))
+ return 0;
+ }
+ else
+ {
+ backtrace_free (state, function, sizeof *function,
+ error_callback, data);
+ is_function = 0;
+ }
+ }
+
+ if (abbrev->has_children)
+ {
+ if (!is_function)
+ {
+ if (!read_function_entry (state, ddata, u, base, unit_buf, lhdr,
+ error_callback, data, vec_function,
+ vec_inlined))
+ return 0;
+ }
+ else
+ {
+ struct function_vector fvec;
+
+ /* Gather any information for inlined functions in
+ FVEC. */
+
+ memset (&fvec, 0, sizeof fvec);
+
+ if (!read_function_entry (state, ddata, u, base, unit_buf, lhdr,
+ error_callback, data, vec_function,
+ &fvec))
+ return 0;
+
+ if (fvec.count > 0)
+ {
+ struct function_addrs *p;
+ struct function_addrs *faddrs;
+
+ /* Allocate a trailing entry, but don't include it
+ in fvec.count. */
+ p = ((struct function_addrs *)
+ backtrace_vector_grow (state,
+ sizeof (struct function_addrs),
+ error_callback, data,
+ &fvec.vec));
+ if (p == NULL)
+ return 0;
+ p->low = 0;
+ --p->low;
+ p->high = p->low;
+ p->function = NULL;
+
+ if (!backtrace_vector_release (state, &fvec.vec,
+ error_callback, data))
+ return 0;
+
+ faddrs = (struct function_addrs *) fvec.vec.base;
+ backtrace_qsort (faddrs, fvec.count,
+ sizeof (struct function_addrs),
+ function_addrs_compare);
+
+ function->function_addrs = faddrs;
+ function->function_addrs_count = fvec.count;
+ }
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Read function name information for a compilation unit. We look
+ through the whole unit looking for function tags. */
+
+static void
+read_function_info (struct backtrace_state *state, struct dwarf_data *ddata,
+ const struct line_header *lhdr,
+ backtrace_error_callback error_callback, void *data,
+ struct unit *u, struct function_vector *fvec,
+ struct function_addrs **ret_addrs,
+ size_t *ret_addrs_count)
+{
+ struct function_vector lvec;
+ struct function_vector *pfvec;
+ struct dwarf_buf unit_buf;
+ struct function_addrs *p;
+ struct function_addrs *addrs;
+ size_t addrs_count;
+
+ /* Use FVEC if it is not NULL. Otherwise use our own vector. */
+ if (fvec != NULL)
+ pfvec = fvec;
+ else
+ {
+ memset (&lvec, 0, sizeof lvec);
+ pfvec = &lvec;
+ }
+
+ unit_buf.name = ".debug_info";
+ unit_buf.start = ddata->dwarf_sections.data[DEBUG_INFO];
+ unit_buf.buf = u->unit_data;
+ unit_buf.left = u->unit_data_len;
+ unit_buf.is_bigendian = ddata->is_bigendian;
+ unit_buf.error_callback = error_callback;
+ unit_buf.data = data;
+ unit_buf.reported_underflow = 0;
+
+ while (unit_buf.left > 0)
+ {
+ if (!read_function_entry (state, ddata, u, 0, &unit_buf, lhdr,
+ error_callback, data, pfvec, pfvec))
+ return;
+ }
+
+ if (pfvec->count == 0)
+ return;
+
+ /* Allocate a trailing entry, but don't include it in
+ pfvec->count. */
+ p = ((struct function_addrs *)
+ backtrace_vector_grow (state, sizeof (struct function_addrs),
+ error_callback, data, &pfvec->vec));
+ if (p == NULL)
+ return;
+ p->low = 0;
+ --p->low;
+ p->high = p->low;
+ p->function = NULL;
+
+ addrs_count = pfvec->count;
+
+ if (fvec == NULL)
+ {
+ if (!backtrace_vector_release (state, &lvec.vec, error_callback, data))
+ return;
+ addrs = (struct function_addrs *) pfvec->vec.base;
+ }
+ else
+ {
+ /* Finish this list of addresses, but leave the remaining space in
+ the vector available for the next function unit. */
+ addrs = ((struct function_addrs *)
+ backtrace_vector_finish (state, &fvec->vec,
+ error_callback, data));
+ if (addrs == NULL)
+ return;
+ fvec->count = 0;
+ }
+
+ backtrace_qsort (addrs, addrs_count, sizeof (struct function_addrs),
+ function_addrs_compare);
+
+ *ret_addrs = addrs;
+ *ret_addrs_count = addrs_count;
+}
+
+/* See if PC is inlined in FUNCTION. If it is, print out the inlined
+ information, and update FILENAME and LINENO for the caller.
+ Returns whatever CALLBACK returns, or 0 to keep going. */
+
+static int
+report_inlined_functions (uintptr_t pc, struct function *function,
+ backtrace_full_callback callback, void *data,
+ const char **filename, int *lineno)
+{
+ struct function_addrs *p;
+ struct function_addrs *match;
+ struct function *inlined;
+ int ret;
+
+ if (function->function_addrs_count == 0)
+ return 0;
+
+ /* Our search isn't safe if pc == -1, as that is the sentinel
+ value. */
+ if (pc + 1 == 0)
+ return 0;
+
+ p = ((struct function_addrs *)
+ bsearch (&pc, function->function_addrs,
+ function->function_addrs_count,
+ sizeof (struct function_addrs),
+ function_addrs_search));
+ if (p == NULL)
+ return 0;
+
+ /* Here pc >= p->low && pc < (p + 1)->low. The function_addrs are
+ sorted by low, so if pc > p->low we are at the end of a range of
+ function_addrs with the same low value. If pc == p->low walk
+ forward to the end of the range with that low value. Then walk
+ backward and use the first range that includes pc. */
+ while (pc == (p + 1)->low)
+ ++p;
+ match = NULL;
+ while (1)
+ {
+ if (pc < p->high)
+ {
+ match = p;
+ break;
+ }
+ if (p == function->function_addrs)
+ break;
+ if ((p - 1)->low < p->low)
+ break;
+ --p;
+ }
+ if (match == NULL)
+ return 0;
+
+ /* We found an inlined call. */
+
+ inlined = match->function;
+
+ /* Report any calls inlined into this one. */
+ ret = report_inlined_functions (pc, inlined, callback, data,
+ filename, lineno);
+ if (ret != 0)
+ return ret;
+
+ /* Report this inlined call. */
+ ret = callback (data, pc, match->low, *filename, *lineno, inlined->name);
+ if (ret != 0)
+ return ret;
+
+ /* Our caller will report the caller of the inlined function; tell
+ it the appropriate filename and line number. */
+ *filename = inlined->caller_filename;
+ *lineno = inlined->caller_lineno;
+
+ return 0;
+}
+
+/* Look for a PC in the DWARF mapping for one module. On success,
+ call CALLBACK and return whatever it returns. On error, call
+ ERROR_CALLBACK and return 0. Sets *FOUND to 1 if the PC is found,
+ 0 if not. */
+
+static int
+dwarf_lookup_pc (struct backtrace_state *state, struct dwarf_data *ddata,
+ uintptr_t pc, backtrace_full_callback callback,
+ backtrace_error_callback error_callback, void *data,
+ int *found)
+{
+ struct unit_addrs *entry;
+ int found_entry;
+ struct unit *u;
+ int new_data;
+ struct line *lines;
+ struct line *ln;
+ struct function_addrs *p;
+ struct function_addrs *fmatch;
+ struct function *function;
+ const char *filename;
+ int lineno;
+ int ret;
+
+ *found = 1;
+
+ /* Find an address range that includes PC. Our search isn't safe if
+ PC == -1, as we use that as a sentinel value, so skip the search
+ in that case. */
+ entry = (ddata->addrs_count == 0 || pc + 1 == 0
+ ? NULL
+ : (struct unit_addrs*)bsearch (&pc, ddata->addrs, ddata->addrs_count,
+ sizeof (struct unit_addrs), unit_addrs_search));
+
+ if (entry == NULL)
+ {
+ *found = 0;
+ return 0;
+ }
+
+ /* Here pc >= entry->low && pc < (entry + 1)->low. The unit_addrs
+ are sorted by low, so if pc > p->low we are at the end of a range
+ of unit_addrs with the same low value. If pc == p->low walk
+ forward to the end of the range with that low value. Then walk
+ backward and use the first range that includes pc. */
+ while (pc == (entry + 1)->low)
+ ++entry;
+ found_entry = 0;
+ while (1)
+ {
+ if (pc < entry->high)
+ {
+ found_entry = 1;
+ break;
+ }
+ if (entry == ddata->addrs)
+ break;
+ if ((entry - 1)->low < entry->low)
+ break;
+ --entry;
+ }
+ if (!found_entry)
+ {
+ *found = 0;
+ return 0;
+ }
+
+ /* We need the lines, lines_count, function_addrs,
+ function_addrs_count fields of u. If they are not set, we need
+ to set them. When running in threaded mode, we need to allow for
+ the possibility that some other thread is setting them
+ simultaneously. */
+
+ u = entry->u;
+ lines = u->lines;
+
+ /* Skip units with no useful line number information by walking
+ backward. Useless line number information is marked by setting
+ lines == -1. */
+ while (entry > ddata->addrs
+ && pc >= (entry - 1)->low
+ && pc < (entry - 1)->high)
+ {
+ if (state->threaded)
+ lines = (struct line *) backtrace_atomic_load_pointer (&u->lines);
+
+ if (lines != (struct line *) (uintptr_t) -1)
+ break;
+
+ --entry;
+
+ u = entry->u;
+ lines = u->lines;
+ }
+
+ if (state->threaded)
+ lines = backtrace_atomic_load_pointer (&u->lines);
+
+ new_data = 0;
+ if (lines == NULL)
+ {
+ struct function_addrs *function_addrs;
+ size_t function_addrs_count;
+ struct line_header lhdr;
+ size_t count;
+
+ /* We have never read the line information for this unit. Read
+ it now. */
+
+ function_addrs = NULL;
+ function_addrs_count = 0;
+ if (read_line_info (state, ddata, error_callback, data, entry->u, &lhdr,
+ &lines, &count))
+ {
+ struct function_vector *pfvec;
+
+ /* If not threaded, reuse DDATA->FVEC for better memory
+ consumption. */
+ if (state->threaded)
+ pfvec = NULL;
+ else
+ pfvec = &ddata->fvec;
+ read_function_info (state, ddata, &lhdr, error_callback, data,
+ entry->u, pfvec, &function_addrs,
+ &function_addrs_count);
+ free_line_header (state, &lhdr, error_callback, data);
+ new_data = 1;
+ }
+
+ /* Atomically store the information we just read into the unit.
+ If another thread is simultaneously writing, it presumably
+ read the same information, and we don't care which one we
+ wind up with; we just leak the other one. We do have to
+ write the lines field last, so that the acquire-loads above
+ ensure that the other fields are set. */
+
+ if (!state->threaded)
+ {
+ u->lines_count = count;
+ u->function_addrs = function_addrs;
+ u->function_addrs_count = function_addrs_count;
+ u->lines = lines;
+ }
+ else
+ {
+ backtrace_atomic_store_size_t (&u->lines_count, count);
+ backtrace_atomic_store_pointer (&u->function_addrs, function_addrs);
+ backtrace_atomic_store_size_t (&u->function_addrs_count,
+ function_addrs_count);
+ backtrace_atomic_store_pointer (&u->lines, lines);
+ }
+ }
+
+ /* Now all fields of U have been initialized. */
+
+ if (lines == (struct line *) (uintptr_t) -1)
+ {
+ /* If reading the line number information failed in some way,
+ try again to see if there is a better compilation unit for
+ this PC. */
+ if (new_data)
+ return dwarf_lookup_pc (state, ddata, pc, callback, error_callback,
+ data, found);
+ return callback (data, pc, 0, NULL, 0, NULL);
+ }
+
+ /* Search for PC within this unit. */
+
+ ln = (struct line *) bsearch (&pc, lines, entry->u->lines_count,
+ sizeof (struct line), line_search);
+ if (ln == NULL)
+ {
+ /* The PC is between the low_pc and high_pc attributes of the
+ compilation unit, but no entry in the line table covers it.
+ This implies that the start of the compilation unit has no
+ line number information. */
+
+ if (entry->u->abs_filename == NULL)
+ {
+ const char *filename;
+
+ filename = entry->u->filename;
+ if (filename != NULL
+ && !IS_ABSOLUTE_PATH (filename)
+ && entry->u->comp_dir != NULL)
+ {
+ size_t filename_len;
+ const char *dir;
+ size_t dir_len;
+ char *s;
+
+ filename_len = strlen (filename);
+ dir = entry->u->comp_dir;
+ dir_len = strlen (dir);
+ s = (char *) backtrace_alloc (state, dir_len + filename_len + 2,
+ error_callback, data);
+ if (s == NULL)
+ {
+ *found = 0;
+ return 0;
+ }
+ memcpy (s, dir, dir_len);
+ /* FIXME: Should use backslash if DOS file system. */
+ s[dir_len] = '/';
+ memcpy (s + dir_len + 1, filename, filename_len + 1);
+ filename = s;
+ }
+ entry->u->abs_filename = filename;
+ }
+
+ return callback (data, pc, 0, entry->u->abs_filename, 0, NULL);
+ }
+
+ /* Search for function name within this unit. */
+
+ if (entry->u->function_addrs_count == 0)
+ return callback (data, pc, 0, ln->filename, ln->lineno, NULL);
+
+ p = ((struct function_addrs *)
+ bsearch (&pc, entry->u->function_addrs,
+ entry->u->function_addrs_count,
+ sizeof (struct function_addrs),
+ function_addrs_search));
+ if (p == NULL)
+ return callback (data, pc, 0, ln->filename, ln->lineno, NULL);
+
+ /* Here pc >= p->low && pc < (p + 1)->low. The function_addrs are
+ sorted by low, so if pc > p->low we are at the end of a range of
+ function_addrs with the same low value. If pc == p->low walk
+ forward to the end of the range with that low value. Then walk
+ backward and use the first range that includes pc. */
+ while (pc == (p + 1)->low)
+ ++p;
+ fmatch = NULL;
+ while (1)
+ {
+ if (pc < p->high)
+ {
+ fmatch = p;
+ break;
+ }
+ if (p == entry->u->function_addrs)
+ break;
+ if ((p - 1)->low < p->low)
+ break;
+ --p;
+ }
+ if (fmatch == NULL)
+ return callback (data, pc, 0, ln->filename, ln->lineno, NULL);
+
+ function = fmatch->function;
+
+ filename = ln->filename;
+ lineno = ln->lineno;
+
+ ret = report_inlined_functions (pc, function, callback, data,
+ &filename, &lineno);
+ if (ret != 0)
+ return ret;
+
+ return callback (data, pc, fmatch->low, filename, lineno, function->name);
+}
+
+
+/* Return the file/line information for a PC using the DWARF mapping
+ we built earlier. */
+
+static int
+dwarf_fileline (struct backtrace_state *state, uintptr_t pc,
+ backtrace_full_callback callback,
+ backtrace_error_callback error_callback, void *data)
+{
+ struct dwarf_data *ddata;
+ int found;
+ int ret;
+
+ if (!state->threaded)
+ {
+ for (ddata = (struct dwarf_data *) state->fileline_data;
+ ddata != NULL;
+ ddata = ddata->next)
+ {
+ ret = dwarf_lookup_pc (state, ddata, pc, callback, error_callback,
+ data, &found);
+ if (ret != 0 || found)
+ return ret;
+ }
+ }
+ else
+ {
+ struct dwarf_data **pp;
+
+ pp = (struct dwarf_data **) (void *) &state->fileline_data;
+ while (1)
+ {
+ ddata = backtrace_atomic_load_pointer (pp);
+ if (ddata == NULL)
+ break;
+
+ ret = dwarf_lookup_pc (state, ddata, pc, callback, error_callback,
+ data, &found);
+ if (ret != 0 || found)
+ return ret;
+
+ pp = &ddata->next;
+ }
+ }
+
+ /* FIXME: See if any libraries have been dlopen'ed. */
+
+ return callback (data, pc, 0, NULL, 0, NULL);
+}
+
+/* Initialize our data structures from the DWARF debug info for a
+ file. Return NULL on failure. */
+
+static struct dwarf_data *
+build_dwarf_data (struct backtrace_state *state,
+ uintptr_t base_address,
+ const struct dwarf_sections *dwarf_sections,
+ int is_bigendian,
+ struct dwarf_data *altlink,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ struct unit_addrs_vector addrs_vec;
+ struct unit_addrs *addrs;
+ size_t addrs_count;
+ struct unit_vector units_vec;
+ struct unit **units;
+ size_t units_count;
+ struct dwarf_data *fdata;
+
+ if (!build_address_map (state, base_address, dwarf_sections, is_bigendian,
+ altlink, error_callback, data, &addrs_vec,
+ &units_vec))
+ return NULL;
+
+ if (!backtrace_vector_release (state, &addrs_vec.vec, error_callback, data))
+ return NULL;
+ if (!backtrace_vector_release (state, &units_vec.vec, error_callback, data))
+ return NULL;
+ addrs = (struct unit_addrs *) addrs_vec.vec.base;
+ units = (struct unit **) units_vec.vec.base;
+ addrs_count = addrs_vec.count;
+ units_count = units_vec.count;
+ backtrace_qsort (addrs, addrs_count, sizeof (struct unit_addrs),
+ unit_addrs_compare);
+ /* No qsort for units required, already sorted. */
+
+ fdata = ((struct dwarf_data *)
+ backtrace_alloc (state, sizeof (struct dwarf_data),
+ error_callback, data));
+ if (fdata == NULL)
+ return NULL;
+
+ fdata->next = NULL;
+ fdata->altlink = altlink;
+ fdata->base_address = base_address;
+ fdata->addrs = addrs;
+ fdata->addrs_count = addrs_count;
+ fdata->units = units;
+ fdata->units_count = units_count;
+ fdata->dwarf_sections = *dwarf_sections;
+ fdata->is_bigendian = is_bigendian;
+ memset (&fdata->fvec, 0, sizeof fdata->fvec);
+
+ return fdata;
+}
+
+/* Build our data structures from the DWARF sections for a module.
+ Set FILELINE_FN and STATE->FILELINE_DATA. Return 1 on success, 0
+ on failure. */
+
+int
+backtrace_dwarf_add (struct backtrace_state *state,
+ uintptr_t base_address,
+ const struct dwarf_sections *dwarf_sections,
+ int is_bigendian,
+ struct dwarf_data *fileline_altlink,
+ backtrace_error_callback error_callback,
+ void *data, fileline *fileline_fn,
+ struct dwarf_data **fileline_entry)
+{
+ struct dwarf_data *fdata;
+
+ fdata = build_dwarf_data (state, base_address, dwarf_sections, is_bigendian,
+ fileline_altlink, error_callback, data);
+ if (fdata == NULL)
+ return 0;
+
+ if (fileline_entry != NULL)
+ *fileline_entry = fdata;
+
+ if (!state->threaded)
+ {
+ struct dwarf_data **pp;
+
+ for (pp = (struct dwarf_data **) (void *) &state->fileline_data;
+ *pp != NULL;
+ pp = &(*pp)->next)
+ ;
+ *pp = fdata;
+ }
+ else
+ {
+ while (1)
+ {
+ struct dwarf_data **pp;
+
+ pp = (struct dwarf_data **) (void *) &state->fileline_data;
+
+ while (1)
+ {
+ struct dwarf_data *p;
+
+ p = backtrace_atomic_load_pointer (pp);
+
+ if (p == NULL)
+ break;
+
+ pp = &p->next;
+ }
+
+ if (__sync_bool_compare_and_swap (pp, NULL, fdata))
+ break;
+ }
+ }
+
+ *fileline_fn = dwarf_fileline;
+
+ return 1;
+}
+
+}
diff --git a/3rdparty/tracy/tracy/libbacktrace/elf.cpp b/3rdparty/tracy/tracy/libbacktrace/elf.cpp
new file mode 100644
index 0000000..50715bf
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/elf.cpp
@@ -0,0 +1,4928 @@
+/* elf.c -- Get debug data from an ELF file for backtraces.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#ifdef HAVE_DL_ITERATE_PHDR
+#include <link.h>
+#endif
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+#ifndef S_ISLNK
+ #ifndef S_IFLNK
+ #define S_IFLNK 0120000
+ #endif
+ #ifndef S_IFMT
+ #define S_IFMT 0170000
+ #endif
+ #define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+#endif
+
+#ifndef __GNUC__
+#define __builtin_prefetch(p, r, l)
+#ifndef unlikely
+#define unlikely(x) (x)
+#endif
+#else
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+#endif
+
+namespace tracy
+{
+
+#if !defined(HAVE_DECL_STRNLEN) || !HAVE_DECL_STRNLEN
+
+/* If strnlen is not declared, provide our own version. */
+
+static size_t
+xstrnlen (const char *s, size_t maxlen)
+{
+ size_t i;
+
+ for (i = 0; i < maxlen; ++i)
+ if (s[i] == '\0')
+ break;
+ return i;
+}
+
+#define strnlen xstrnlen
+
+#endif
+
+#ifndef HAVE_LSTAT
+
+/* Dummy version of lstat for systems that don't have it. */
+
+static int
+xlstat (const char *path ATTRIBUTE_UNUSED, struct stat *st ATTRIBUTE_UNUSED)
+{
+ return -1;
+}
+
+#define lstat xlstat
+
+#endif
+
+#ifndef HAVE_READLINK
+
+/* Dummy version of readlink for systems that don't have it. */
+
+static ssize_t
+xreadlink (const char *path ATTRIBUTE_UNUSED, char *buf ATTRIBUTE_UNUSED,
+ size_t bufsz ATTRIBUTE_UNUSED)
+{
+ return -1;
+}
+
+#define readlink xreadlink
+
+#endif
+
+#ifndef HAVE_DL_ITERATE_PHDR
+
+/* Dummy version of dl_iterate_phdr for systems that don't have it. */
+
+#define dl_phdr_info x_dl_phdr_info
+#define dl_iterate_phdr x_dl_iterate_phdr
+
+struct dl_phdr_info
+{
+ uintptr_t dlpi_addr;
+ const char *dlpi_name;
+};
+
+static int
+dl_iterate_phdr (int (*callback) (struct dl_phdr_info *,
+ size_t, void *) ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+#endif /* ! defined (HAVE_DL_ITERATE_PHDR) */
+
+/* The configure script must tell us whether we are 32-bit or 64-bit
+ ELF. We could make this code test and support either possibility,
+ but there is no point. This code only works for the currently
+ running executable, which means that we know the ELF mode at
+ configure time. */
+
+#if BACKTRACE_ELF_SIZE != 32 && BACKTRACE_ELF_SIZE != 64
+#error "Unknown BACKTRACE_ELF_SIZE"
+#endif
+
+/* <link.h> might #include <elf.h> which might define our constants
+ with slightly different values. Undefine them to be safe. */
+
+#undef EI_NIDENT
+#undef EI_MAG0
+#undef EI_MAG1
+#undef EI_MAG2
+#undef EI_MAG3
+#undef EI_CLASS
+#undef EI_DATA
+#undef EI_VERSION
+#undef ELF_MAG0
+#undef ELF_MAG1
+#undef ELF_MAG2
+#undef ELF_MAG3
+#undef ELFCLASS32
+#undef ELFCLASS64
+#undef ELFDATA2LSB
+#undef ELFDATA2MSB
+#undef EV_CURRENT
+#undef ET_DYN
+#undef EM_PPC64
+#undef EF_PPC64_ABI
+#undef SHN_LORESERVE
+#undef SHN_XINDEX
+#undef SHN_UNDEF
+#undef SHT_PROGBITS
+#undef SHT_SYMTAB
+#undef SHT_STRTAB
+#undef SHT_DYNSYM
+#undef SHF_COMPRESSED
+#undef STT_OBJECT
+#undef STT_FUNC
+#undef NT_GNU_BUILD_ID
+#undef ELFCOMPRESS_ZLIB
+
+/* Basic types. */
+
+typedef uint16_t b_elf_half; /* Elf_Half. */
+typedef uint32_t b_elf_word; /* Elf_Word. */
+typedef int32_t b_elf_sword; /* Elf_Sword. */
+
+#if BACKTRACE_ELF_SIZE == 32
+
+typedef uint32_t b_elf_addr; /* Elf_Addr. */
+typedef uint32_t b_elf_off; /* Elf_Off. */
+
+typedef uint32_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */
+
+#else
+
+typedef uint64_t b_elf_addr; /* Elf_Addr. */
+typedef uint64_t b_elf_off; /* Elf_Off. */
+typedef uint64_t b_elf_xword; /* Elf_Xword. */
+typedef int64_t b_elf_sxword; /* Elf_Sxword. */
+
+typedef uint64_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */
+
+#endif
+
+/* Data structures and associated constants. */
+
+#define EI_NIDENT 16
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */
+ b_elf_half e_type; /* Identifies object file type */
+ b_elf_half e_machine; /* Specifies required architecture */
+ b_elf_word e_version; /* Identifies object file version */
+ b_elf_addr e_entry; /* Entry point virtual address */
+ b_elf_off e_phoff; /* Program header table file offset */
+ b_elf_off e_shoff; /* Section header table file offset */
+ b_elf_word e_flags; /* Processor-specific flags */
+ b_elf_half e_ehsize; /* ELF header size in bytes */
+ b_elf_half e_phentsize; /* Program header table entry size */
+ b_elf_half e_phnum; /* Program header table entry count */
+ b_elf_half e_shentsize; /* Section header table entry size */
+ b_elf_half e_shnum; /* Section header table entry count */
+ b_elf_half e_shstrndx; /* Section header string table index */
+} b_elf_ehdr; /* Elf_Ehdr. */
+
+#define EI_MAG0 0
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+
+#define ELFMAG0 0x7f
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
+
+#define EV_CURRENT 1
+
+#define ET_DYN 3
+
+#define EM_PPC64 21
+#define EF_PPC64_ABI 3
+
+typedef struct {
+ b_elf_word sh_name; /* Section name, index in string tbl */
+ b_elf_word sh_type; /* Type of section */
+ b_elf_wxword sh_flags; /* Miscellaneous section attributes */
+ b_elf_addr sh_addr; /* Section virtual addr at execution */
+ b_elf_off sh_offset; /* Section file offset */
+ b_elf_wxword sh_size; /* Size of section in bytes */
+ b_elf_word sh_link; /* Index of another section */
+ b_elf_word sh_info; /* Additional section information */
+ b_elf_wxword sh_addralign; /* Section alignment */
+ b_elf_wxword sh_entsize; /* Entry size if section holds table */
+} b_elf_shdr; /* Elf_Shdr. */
+
+#define SHN_UNDEF 0x0000 /* Undefined section */
+#define SHN_LORESERVE 0xFF00 /* Begin range of reserved indices */
+#define SHN_XINDEX 0xFFFF /* Section index is held elsewhere */
+
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_DYNSYM 11
+
+#define SHF_COMPRESSED 0x800
+
+#if BACKTRACE_ELF_SIZE == 32
+
+typedef struct
+{
+ b_elf_word st_name; /* Symbol name, index in string tbl */
+ b_elf_addr st_value; /* Symbol value */
+ b_elf_word st_size; /* Symbol size */
+ unsigned char st_info; /* Symbol binding and type */
+ unsigned char st_other; /* Visibility and other data */
+ b_elf_half st_shndx; /* Symbol section index */
+} b_elf_sym; /* Elf_Sym. */
+
+#else /* BACKTRACE_ELF_SIZE != 32 */
+
+typedef struct
+{
+ b_elf_word st_name; /* Symbol name, index in string tbl */
+ unsigned char st_info; /* Symbol binding and type */
+ unsigned char st_other; /* Visibility and other data */
+ b_elf_half st_shndx; /* Symbol section index */
+ b_elf_addr st_value; /* Symbol value */
+ b_elf_xword st_size; /* Symbol size */
+} b_elf_sym; /* Elf_Sym. */
+
+#endif /* BACKTRACE_ELF_SIZE != 32 */
+
+#define STT_OBJECT 1
+#define STT_FUNC 2
+
+typedef struct
+{
+ uint32_t namesz;
+ uint32_t descsz;
+ uint32_t type;
+ char name[1];
+} b_elf_note;
+
+#define NT_GNU_BUILD_ID 3
+
+#if BACKTRACE_ELF_SIZE == 32
+
+typedef struct
+{
+ b_elf_word ch_type; /* Compresstion algorithm */
+ b_elf_word ch_size; /* Uncompressed size */
+ b_elf_word ch_addralign; /* Alignment for uncompressed data */
+} b_elf_chdr; /* Elf_Chdr */
+
+#else /* BACKTRACE_ELF_SIZE != 32 */
+
+typedef struct
+{
+ b_elf_word ch_type; /* Compression algorithm */
+ b_elf_word ch_reserved; /* Reserved */
+ b_elf_xword ch_size; /* Uncompressed size */
+ b_elf_xword ch_addralign; /* Alignment for uncompressed data */
+} b_elf_chdr; /* Elf_Chdr */
+
+#endif /* BACKTRACE_ELF_SIZE != 32 */
+
+#define ELFCOMPRESS_ZLIB 1
+
+/* Names of sections, indexed by enum dwarf_section in internal.h. */
+
+static const char * const dwarf_section_names[DEBUG_MAX] =
+{
+ ".debug_info",
+ ".debug_line",
+ ".debug_abbrev",
+ ".debug_ranges",
+ ".debug_str",
+ ".debug_addr",
+ ".debug_str_offsets",
+ ".debug_line_str",
+ ".debug_rnglists"
+};
+
+/* Information we gather for the sections we care about. */
+
+struct debug_section_info
+{
+ /* Section file offset. */
+ off_t offset;
+ /* Section size. */
+ size_t size;
+ /* Section contents, after read from file. */
+ const unsigned char *data;
+ /* Whether the SHF_COMPRESSED flag is set for the section. */
+ int compressed;
+};
+
+/* Information we keep for an ELF symbol. */
+
+struct elf_symbol
+{
+ /* The name of the symbol. */
+ const char *name;
+ /* The address of the symbol. */
+ uintptr_t address;
+ /* The size of the symbol. */
+ size_t size;
+};
+
+/* Information to pass to elf_syminfo. */
+
+struct elf_syminfo_data
+{
+ /* Symbols for the next module. */
+ struct elf_syminfo_data *next;
+ /* The ELF symbols, sorted by address. */
+ struct elf_symbol *symbols;
+ /* The number of symbols. */
+ size_t count;
+};
+
+/* A view that works for either a file or memory. */
+
+struct elf_view
+{
+ struct backtrace_view view;
+ int release; /* If non-zero, must call backtrace_release_view. */
+};
+
+/* Information about PowerPC64 ELFv1 .opd section. */
+
+struct elf_ppc64_opd_data
+{
+ /* Address of the .opd section. */
+ b_elf_addr addr;
+ /* Section data. */
+ const char *data;
+ /* Size of the .opd section. */
+ size_t size;
+ /* Corresponding section view. */
+ struct elf_view view;
+};
+
+/* Create a view of SIZE bytes from DESCRIPTOR/MEMORY at OFFSET. */
+
+static int
+elf_get_view (struct backtrace_state *state, int descriptor,
+ const unsigned char *memory, size_t memory_size, off_t offset,
+ uint64_t size, backtrace_error_callback error_callback,
+ void *data, struct elf_view *view)
+{
+ if (memory == NULL)
+ {
+ view->release = 1;
+ return backtrace_get_view (state, descriptor, offset, size,
+ error_callback, data, &view->view);
+ }
+ else
+ {
+ if ((uint64_t) offset + size > (uint64_t) memory_size)
+ {
+ error_callback (data, "out of range for in-memory file", 0);
+ return 0;
+ }
+ view->view.data = (const void *) (memory + offset);
+ view->view.base = NULL;
+ view->view.len = size;
+ view->release = 0;
+ return 1;
+ }
+}
+
+/* Release a view read by elf_get_view. */
+
+static void
+elf_release_view (struct backtrace_state *state, struct elf_view *view,
+ backtrace_error_callback error_callback, void *data)
+{
+ if (view->release)
+ backtrace_release_view (state, &view->view, error_callback, data);
+}
+
+/* Compute the CRC-32 of BUF/LEN. This uses the CRC used for
+ .gnu_debuglink files. */
+
+static uint32_t
+elf_crc32 (uint32_t crc, const unsigned char *buf, size_t len)
+{
+ static const uint32_t crc32_table[256] =
+ {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
+ 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
+ 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
+ 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
+ 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
+ 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
+ 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
+ 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
+ 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
+ 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
+ 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
+ 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
+ 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
+ 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
+ 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
+ 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
+ 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
+ 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
+ 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
+ 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
+ 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
+ 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
+ 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
+ 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
+ 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
+ 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
+ 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
+ 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
+ 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
+ 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
+ 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
+ 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
+ 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
+ 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
+ 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
+ 0x2d02ef8d
+ };
+ const unsigned char *end;
+
+ crc = ~crc;
+ for (end = buf + len; buf < end; ++ buf)
+ crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8);
+ return ~crc;
+}
+
+/* Return the CRC-32 of the entire file open at DESCRIPTOR. */
+
+static uint32_t
+elf_crc32_file (struct backtrace_state *state, int descriptor,
+ backtrace_error_callback error_callback, void *data)
+{
+ struct stat st;
+ struct backtrace_view file_view;
+ uint32_t ret;
+
+ if (fstat (descriptor, &st) < 0)
+ {
+ error_callback (data, "fstat", errno);
+ return 0;
+ }
+
+ if (!backtrace_get_view (state, descriptor, 0, st.st_size, error_callback,
+ data, &file_view))
+ return 0;
+
+ ret = elf_crc32 (0, (const unsigned char *) file_view.data, st.st_size);
+
+ backtrace_release_view (state, &file_view, error_callback, data);
+
+ return ret;
+}
+
+/* A dummy callback function used when we can't find a symbol
+ table. */
+
+static void
+elf_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ uintptr_t addr ATTRIBUTE_UNUSED,
+ backtrace_syminfo_callback callback ATTRIBUTE_UNUSED,
+ backtrace_error_callback error_callback, void *data)
+{
+ error_callback (data, "no symbol table in ELF executable", -1);
+}
+
+/* A callback function used when we can't find any debug info. */
+
+static int
+elf_nodebug (struct backtrace_state *state, uintptr_t pc,
+ backtrace_full_callback callback,
+ backtrace_error_callback error_callback, void *data)
+{
+ if (state->syminfo_fn != NULL && state->syminfo_fn != elf_nosyms)
+ {
+ struct backtrace_call_full bdata;
+
+ /* Fetch symbol information so that we can least get the
+ function name. */
+
+ bdata.full_callback = callback;
+ bdata.full_error_callback = error_callback;
+ bdata.full_data = data;
+ bdata.ret = 0;
+ state->syminfo_fn (state, pc, backtrace_syminfo_to_full_callback,
+ backtrace_syminfo_to_full_error_callback, &bdata);
+ return bdata.ret;
+ }
+
+ error_callback (data, "no debug info in ELF executable", -1);
+ return 0;
+}
+
+/* Compare struct elf_symbol for qsort. */
+
+static int
+elf_symbol_compare (const void *v1, const void *v2)
+{
+ const struct elf_symbol *e1 = (const struct elf_symbol *) v1;
+ const struct elf_symbol *e2 = (const struct elf_symbol *) v2;
+
+ if (e1->address < e2->address)
+ return -1;
+ else if (e1->address > e2->address)
+ return 1;
+ else
+ return 0;
+}
+
+/* Compare an ADDR against an elf_symbol for bsearch. We allocate one
+ extra entry in the array so that this can look safely at the next
+ entry. */
+
+static int
+elf_symbol_search (const void *vkey, const void *ventry)
+{
+ const uintptr_t *key = (const uintptr_t *) vkey;
+ const struct elf_symbol *entry = (const struct elf_symbol *) ventry;
+ uintptr_t addr;
+
+ addr = *key;
+ if (addr < entry->address)
+ return -1;
+ else if (addr >= entry->address + entry->size)
+ return 1;
+ else
+ return 0;
+}
+
+/* Initialize the symbol table info for elf_syminfo. */
+
+static int
+elf_initialize_syminfo (struct backtrace_state *state,
+ uintptr_t base_address,
+ const unsigned char *symtab_data, size_t symtab_size,
+ const unsigned char *strtab, size_t strtab_size,
+ backtrace_error_callback error_callback,
+ void *data, struct elf_syminfo_data *sdata,
+ struct elf_ppc64_opd_data *opd)
+{
+ size_t sym_count;
+ const b_elf_sym *sym;
+ size_t elf_symbol_count;
+ size_t elf_symbol_size;
+ struct elf_symbol *elf_symbols;
+ size_t i;
+ unsigned int j;
+
+ sym_count = symtab_size / sizeof (b_elf_sym);
+
+ /* We only care about function symbols. Count them. */
+ sym = (const b_elf_sym *) symtab_data;
+ elf_symbol_count = 0;
+ for (i = 0; i < sym_count; ++i, ++sym)
+ {
+ int info;
+
+ info = sym->st_info & 0xf;
+ if ((info == STT_FUNC || info == STT_OBJECT)
+ && sym->st_shndx != SHN_UNDEF)
+ ++elf_symbol_count;
+ }
+
+ elf_symbol_size = elf_symbol_count * sizeof (struct elf_symbol);
+ elf_symbols = ((struct elf_symbol *)
+ backtrace_alloc (state, elf_symbol_size, error_callback,
+ data));
+ if (elf_symbols == NULL)
+ return 0;
+
+ sym = (const b_elf_sym *) symtab_data;
+ j = 0;
+ for (i = 0; i < sym_count; ++i, ++sym)
+ {
+ int info;
+
+ info = sym->st_info & 0xf;
+ if (info != STT_FUNC && info != STT_OBJECT)
+ continue;
+ if (sym->st_shndx == SHN_UNDEF)
+ continue;
+ if (sym->st_name >= strtab_size)
+ {
+ error_callback (data, "symbol string index out of range", 0);
+ backtrace_free (state, elf_symbols, elf_symbol_size, error_callback,
+ data);
+ return 0;
+ }
+ elf_symbols[j].name = (const char *) strtab + sym->st_name;
+ /* Special case PowerPC64 ELFv1 symbols in .opd section, if the symbol
+ is a function descriptor, read the actual code address from the
+ descriptor. */
+ if (opd
+ && sym->st_value >= opd->addr
+ && sym->st_value < opd->addr + opd->size)
+ elf_symbols[j].address
+ = *(const b_elf_addr *) (opd->data + (sym->st_value - opd->addr));
+ else
+ elf_symbols[j].address = sym->st_value;
+ elf_symbols[j].address += base_address;
+ elf_symbols[j].size = sym->st_size;
+ ++j;
+ }
+
+ backtrace_qsort (elf_symbols, elf_symbol_count, sizeof (struct elf_symbol),
+ elf_symbol_compare);
+
+ sdata->next = NULL;
+ sdata->symbols = elf_symbols;
+ sdata->count = elf_symbol_count;
+
+ return 1;
+}
+
+/* Add EDATA to the list in STATE. */
+
+static void
+elf_add_syminfo_data (struct backtrace_state *state,
+ struct elf_syminfo_data *edata)
+{
+ if (!state->threaded)
+ {
+ struct elf_syminfo_data **pp;
+
+ for (pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data;
+ *pp != NULL;
+ pp = &(*pp)->next)
+ ;
+ *pp = edata;
+ }
+ else
+ {
+ while (1)
+ {
+ struct elf_syminfo_data **pp;
+
+ pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data;
+
+ while (1)
+ {
+ struct elf_syminfo_data *p;
+
+ p = backtrace_atomic_load_pointer (pp);
+
+ if (p == NULL)
+ break;
+
+ pp = &p->next;
+ }
+
+ if (__sync_bool_compare_and_swap (pp, NULL, edata))
+ break;
+ }
+ }
+}
+
+/* Return the symbol name and value for an ADDR. */
+
+static void
+elf_syminfo (struct backtrace_state *state, uintptr_t addr,
+ backtrace_syminfo_callback callback,
+ backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
+ void *data)
+{
+ struct elf_syminfo_data *edata;
+ struct elf_symbol *sym = NULL;
+
+ if (!state->threaded)
+ {
+ for (edata = (struct elf_syminfo_data *) state->syminfo_data;
+ edata != NULL;
+ edata = edata->next)
+ {
+ sym = ((struct elf_symbol *)
+ bsearch (&addr, edata->symbols, edata->count,
+ sizeof (struct elf_symbol), elf_symbol_search));
+ if (sym != NULL)
+ break;
+ }
+ }
+ else
+ {
+ struct elf_syminfo_data **pp;
+
+ pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data;
+ while (1)
+ {
+ edata = backtrace_atomic_load_pointer (pp);
+ if (edata == NULL)
+ break;
+
+ sym = ((struct elf_symbol *)
+ bsearch (&addr, edata->symbols, edata->count,
+ sizeof (struct elf_symbol), elf_symbol_search));
+ if (sym != NULL)
+ break;
+
+ pp = &edata->next;
+ }
+ }
+
+ if (sym == NULL)
+ callback (data, addr, NULL, 0, 0);
+ else
+ callback (data, addr, sym->name, sym->address, sym->size);
+}
+
+/* Return whether FILENAME is a symlink. */
+
+static int
+elf_is_symlink (const char *filename)
+{
+ struct stat st;
+
+ if (lstat (filename, &st) < 0)
+ return 0;
+ return S_ISLNK (st.st_mode);
+}
+
+/* Return the results of reading the symlink FILENAME in a buffer
+ allocated by backtrace_alloc. Return the length of the buffer in
+ *LEN. */
+
+static char *
+elf_readlink (struct backtrace_state *state, const char *filename,
+ backtrace_error_callback error_callback, void *data,
+ size_t *plen)
+{
+ size_t len;
+ char *buf;
+
+ len = 128;
+ while (1)
+ {
+ ssize_t rl;
+
+ buf = (char*)backtrace_alloc (state, len, error_callback, data);
+ if (buf == NULL)
+ return NULL;
+ rl = readlink (filename, buf, len);
+ if (rl < 0)
+ {
+ backtrace_free (state, buf, len, error_callback, data);
+ return NULL;
+ }
+ if ((size_t) rl < len - 1)
+ {
+ buf[rl] = '\0';
+ *plen = len;
+ return buf;
+ }
+ backtrace_free (state, buf, len, error_callback, data);
+ len *= 2;
+ }
+}
+
+#define SYSTEM_BUILD_ID_DIR "/usr/lib/debug/.build-id/"
+
+/* Open a separate debug info file, using the build ID to find it.
+ Returns an open file descriptor, or -1.
+
+ The GDB manual says that the only place gdb looks for a debug file
+ when the build ID is known is in /usr/lib/debug/.build-id. */
+
+static int
+elf_open_debugfile_by_buildid (struct backtrace_state *state,
+ const char *buildid_data, size_t buildid_size,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ const char * const prefix = SYSTEM_BUILD_ID_DIR;
+ const size_t prefix_len = strlen (prefix);
+ const char * const suffix = ".debug";
+ const size_t suffix_len = strlen (suffix);
+ size_t len;
+ char *bd_filename;
+ char *t;
+ size_t i;
+ int ret;
+ int does_not_exist;
+
+ len = prefix_len + buildid_size * 2 + suffix_len + 2;
+ bd_filename = (char*)backtrace_alloc (state, len, error_callback, data);
+ if (bd_filename == NULL)
+ return -1;
+
+ t = bd_filename;
+ memcpy (t, prefix, prefix_len);
+ t += prefix_len;
+ for (i = 0; i < buildid_size; i++)
+ {
+ unsigned char b;
+ unsigned char nib;
+
+ b = (unsigned char) buildid_data[i];
+ nib = (b & 0xf0) >> 4;
+ *t++ = nib < 10 ? '0' + nib : 'a' + nib - 10;
+ nib = b & 0x0f;
+ *t++ = nib < 10 ? '0' + nib : 'a' + nib - 10;
+ if (i == 0)
+ *t++ = '/';
+ }
+ memcpy (t, suffix, suffix_len);
+ t[suffix_len] = '\0';
+
+ ret = backtrace_open (bd_filename, error_callback, data, &does_not_exist);
+
+ backtrace_free (state, bd_filename, len, error_callback, data);
+
+ /* gdb checks that the debuginfo file has the same build ID note.
+ That seems kind of pointless to me--why would it have the right
+ name but not the right build ID?--so skipping the check. */
+
+ return ret;
+}
+
+/* Try to open a file whose name is PREFIX (length PREFIX_LEN)
+ concatenated with PREFIX2 (length PREFIX2_LEN) concatenated with
+ DEBUGLINK_NAME. Returns an open file descriptor, or -1. */
+
+static int
+elf_try_debugfile (struct backtrace_state *state, const char *prefix,
+ size_t prefix_len, const char *prefix2, size_t prefix2_len,
+ const char *debuglink_name,
+ backtrace_error_callback error_callback, void *data)
+{
+ size_t debuglink_len;
+ size_t try_len;
+ char *Try;
+ int does_not_exist;
+ int ret;
+
+ debuglink_len = strlen (debuglink_name);
+ try_len = prefix_len + prefix2_len + debuglink_len + 1;
+ Try = (char*)backtrace_alloc (state, try_len, error_callback, data);
+ if (Try == NULL)
+ return -1;
+
+ memcpy (Try, prefix, prefix_len);
+ memcpy (Try + prefix_len, prefix2, prefix2_len);
+ memcpy (Try + prefix_len + prefix2_len, debuglink_name, debuglink_len);
+ Try[prefix_len + prefix2_len + debuglink_len] = '\0';
+
+ ret = backtrace_open (Try, error_callback, data, &does_not_exist);
+
+ backtrace_free (state, Try, try_len, error_callback, data);
+
+ return ret;
+}
+
+/* Find a separate debug info file, using the debuglink section data
+ to find it. Returns an open file descriptor, or -1. */
+
+static int
+elf_find_debugfile_by_debuglink (struct backtrace_state *state,
+ const char *filename,
+ const char *debuglink_name,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ int ret;
+ char *alc;
+ size_t alc_len;
+ const char *slash;
+ int ddescriptor;
+ const char *prefix;
+ size_t prefix_len;
+
+ /* Resolve symlinks in FILENAME. Since FILENAME is fairly likely to
+ be /proc/self/exe, symlinks are common. We don't try to resolve
+ the whole path name, just the base name. */
+ ret = -1;
+ alc = NULL;
+ alc_len = 0;
+ while (elf_is_symlink (filename))
+ {
+ char *new_buf;
+ size_t new_len;
+
+ new_buf = elf_readlink (state, filename, error_callback, data, &new_len);
+ if (new_buf == NULL)
+ break;
+
+ if (new_buf[0] == '/')
+ filename = new_buf;
+ else
+ {
+ slash = strrchr (filename, '/');
+ if (slash == NULL)
+ filename = new_buf;
+ else
+ {
+ size_t clen;
+ char *c;
+
+ slash++;
+ clen = slash - filename + strlen (new_buf) + 1;
+ c = (char*)backtrace_alloc (state, clen, error_callback, data);
+ if (c == NULL)
+ goto done;
+
+ memcpy (c, filename, slash - filename);
+ memcpy (c + (slash - filename), new_buf, strlen (new_buf));
+ c[slash - filename + strlen (new_buf)] = '\0';
+ backtrace_free (state, new_buf, new_len, error_callback, data);
+ filename = c;
+ new_buf = c;
+ new_len = clen;
+ }
+ }
+
+ if (alc != NULL)
+ backtrace_free (state, alc, alc_len, error_callback, data);
+ alc = new_buf;
+ alc_len = new_len;
+ }
+
+ /* Look for DEBUGLINK_NAME in the same directory as FILENAME. */
+
+ slash = strrchr (filename, '/');
+ if (slash == NULL)
+ {
+ prefix = "";
+ prefix_len = 0;
+ }
+ else
+ {
+ slash++;
+ prefix = filename;
+ prefix_len = slash - filename;
+ }
+
+ ddescriptor = elf_try_debugfile (state, prefix, prefix_len, "", 0,
+ debuglink_name, error_callback, data);
+ if (ddescriptor >= 0)
+ {
+ ret = ddescriptor;
+ goto done;
+ }
+
+ /* Look for DEBUGLINK_NAME in a .debug subdirectory of FILENAME. */
+
+ ddescriptor = elf_try_debugfile (state, prefix, prefix_len, ".debug/",
+ strlen (".debug/"), debuglink_name,
+ error_callback, data);
+ if (ddescriptor >= 0)
+ {
+ ret = ddescriptor;
+ goto done;
+ }
+
+ /* Look for DEBUGLINK_NAME in /usr/lib/debug. */
+
+ ddescriptor = elf_try_debugfile (state, "/usr/lib/debug/",
+ strlen ("/usr/lib/debug/"), prefix,
+ prefix_len, debuglink_name,
+ error_callback, data);
+ if (ddescriptor >= 0)
+ ret = ddescriptor;
+
+ done:
+ if (alc != NULL && alc_len > 0)
+ backtrace_free (state, alc, alc_len, error_callback, data);
+ return ret;
+}
+
+/* Open a separate debug info file, using the debuglink section data
+ to find it. Returns an open file descriptor, or -1. */
+
+static int
+elf_open_debugfile_by_debuglink (struct backtrace_state *state,
+ const char *filename,
+ const char *debuglink_name,
+ uint32_t debuglink_crc,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ int ddescriptor;
+
+ ddescriptor = elf_find_debugfile_by_debuglink (state, filename,
+ debuglink_name,
+ error_callback, data);
+ if (ddescriptor < 0)
+ return -1;
+
+ if (debuglink_crc != 0)
+ {
+ uint32_t got_crc;
+
+ got_crc = elf_crc32_file (state, ddescriptor, error_callback, data);
+ if (got_crc != debuglink_crc)
+ {
+ backtrace_close (ddescriptor, error_callback, data);
+ return -1;
+ }
+ }
+
+ return ddescriptor;
+}
+
+/* A function useful for setting a breakpoint for an inflation failure
+ when this code is compiled with -g. */
+
+static void
+elf_uncompress_failed(void)
+{
+}
+
+/* *PVAL is the current value being read from the stream, and *PBITS
+ is the number of valid bits. Ensure that *PVAL holds at least 15
+ bits by reading additional bits from *PPIN, up to PINEND, as
+ needed. Updates *PPIN, *PVAL and *PBITS. Returns 1 on success, 0
+ on error. */
+
+static int
+elf_zlib_fetch (const unsigned char **ppin, const unsigned char *pinend,
+ uint64_t *pval, unsigned int *pbits)
+{
+ unsigned int bits;
+ const unsigned char *pin;
+ uint64_t val;
+ uint32_t next;
+
+ bits = *pbits;
+ if (bits >= 15)
+ return 1;
+ pin = *ppin;
+ val = *pval;
+
+ if (unlikely (pinend - pin < 4))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \
+ && defined(__ORDER_BIG_ENDIAN__) \
+ && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \
+ || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ /* We've ensured that PIN is aligned. */
+ next = *(const uint32_t *)pin;
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ next = __builtin_bswap32 (next);
+#endif
+#else
+ next = pin[0] | (pin[1] << 8) | (pin[2] << 16) | (pin[3] << 24);
+#endif
+
+ val |= (uint64_t)next << bits;
+ bits += 32;
+ pin += 4;
+
+ /* We will need the next four bytes soon. */
+ __builtin_prefetch (pin, 0, 0);
+
+ *ppin = pin;
+ *pval = val;
+ *pbits = bits;
+ return 1;
+}
+
+/* Huffman code tables, like the rest of the zlib format, are defined
+ by RFC 1951. We store a Huffman code table as a series of tables
+ stored sequentially in memory. Each entry in a table is 16 bits.
+ The first, main, table has 256 entries. It is followed by a set of
+ secondary tables of length 2 to 128 entries. The maximum length of
+ a code sequence in the deflate format is 15 bits, so that is all we
+ need. Each secondary table has an index, which is the offset of
+ the table in the overall memory storage.
+
+ The deflate format says that all codes of a given bit length are
+ lexicographically consecutive. Perhaps we could have 130 values
+ that require a 15-bit code, perhaps requiring three secondary
+ tables of size 128. I don't know if this is actually possible, but
+ it suggests that the maximum size required for secondary tables is
+ 3 * 128 + 3 * 64 ... == 768. The zlib enough program reports 660
+ as the maximum. We permit 768, since in addition to the 256 for
+ the primary table, with two bytes per entry, and with the two
+ tables we need, that gives us a page.
+
+ A single table entry needs to store a value or (for the main table
+ only) the index and size of a secondary table. Values range from 0
+ to 285, inclusive. Secondary table indexes, per above, range from
+ 0 to 510. For a value we need to store the number of bits we need
+ to determine that value (one value may appear multiple times in the
+ table), which is 1 to 8. For a secondary table we need to store
+ the number of bits used to index into the table, which is 1 to 7.
+ And of course we need 1 bit to decide whether we have a value or a
+ secondary table index. So each entry needs 9 bits for value/table
+ index, 3 bits for size, 1 bit what it is. For simplicity we use 16
+ bits per entry. */
+
+/* Number of entries we allocate to for one code table. We get a page
+ for the two code tables we need. */
+
+#define HUFFMAN_TABLE_SIZE (1024)
+
+/* Bit masks and shifts for the values in the table. */
+
+#define HUFFMAN_VALUE_MASK 0x01ff
+#define HUFFMAN_BITS_SHIFT 9
+#define HUFFMAN_BITS_MASK 0x7
+#define HUFFMAN_SECONDARY_SHIFT 12
+
+/* For working memory while inflating we need two code tables, we need
+ an array of code lengths (max value 15, so we use unsigned char),
+ and an array of unsigned shorts used while building a table. The
+ latter two arrays must be large enough to hold the maximum number
+ of code lengths, which RFC 1951 defines as 286 + 30. */
+
+#define ZDEBUG_TABLE_SIZE \
+ (2 * HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \
+ + (286 + 30) * sizeof (uint16_t) \
+ + (286 + 30) * sizeof (unsigned char))
+
+#define ZDEBUG_TABLE_CODELEN_OFFSET \
+ (2 * HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \
+ + (286 + 30) * sizeof (uint16_t))
+
+#define ZDEBUG_TABLE_WORK_OFFSET \
+ (2 * HUFFMAN_TABLE_SIZE * sizeof (uint16_t))
+
+#ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
+
+/* Used by the main function that generates the fixed table to learn
+ the table size. */
+static size_t final_next_secondary;
+
+#endif
+
+/* Build a Huffman code table from an array of lengths in CODES of
+ length CODES_LEN. The table is stored into *TABLE. ZDEBUG_TABLE
+ is the same as for elf_zlib_inflate, used to find some work space.
+ Returns 1 on success, 0 on error. */
+
+static int
+elf_zlib_inflate_table (unsigned char *codes, size_t codes_len,
+ uint16_t *zdebug_table, uint16_t *table)
+{
+ uint16_t count[16];
+ uint16_t start[16];
+ uint16_t prev[16];
+ uint16_t firstcode[7];
+ uint16_t *next;
+ size_t i;
+ size_t j;
+ unsigned int code;
+ size_t next_secondary;
+
+ /* Count the number of code of each length. Set NEXT[val] to be the
+ next value after VAL with the same bit length. */
+
+ next = (uint16_t *) (((unsigned char *) zdebug_table)
+ + ZDEBUG_TABLE_WORK_OFFSET);
+
+ memset (&count[0], 0, 16 * sizeof (uint16_t));
+ for (i = 0; i < codes_len; ++i)
+ {
+ if (unlikely (codes[i] >= 16))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ if (count[codes[i]] == 0)
+ {
+ start[codes[i]] = i;
+ prev[codes[i]] = i;
+ }
+ else
+ {
+ next[prev[codes[i]]] = i;
+ prev[codes[i]] = i;
+ }
+
+ ++count[codes[i]];
+ }
+
+ /* For each length, fill in the table for the codes of that
+ length. */
+
+ memset (table, 0, HUFFMAN_TABLE_SIZE * sizeof (uint16_t));
+
+ /* Handle the values that do not require a secondary table. */
+
+ code = 0;
+ for (j = 1; j <= 8; ++j)
+ {
+ unsigned int jcnt;
+ unsigned int val;
+
+ jcnt = count[j];
+ if (jcnt == 0)
+ continue;
+
+ if (unlikely (jcnt > (1U << j)))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* There are JCNT values that have this length, the values
+ starting from START[j] continuing through NEXT[VAL]. Those
+ values are assigned consecutive values starting at CODE. */
+
+ val = start[j];
+ for (i = 0; i < jcnt; ++i)
+ {
+ uint16_t tval;
+ size_t ind;
+ unsigned int incr;
+
+ /* In the compressed bit stream, the value VAL is encoded as
+ J bits with the value C. */
+
+ if (unlikely ((val & ~HUFFMAN_VALUE_MASK) != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ tval = val | ((j - 1) << HUFFMAN_BITS_SHIFT);
+
+ /* The table lookup uses 8 bits. If J is less than 8, we
+ don't know what the other bits will be. We need to fill
+ in all possibilities in the table. Since the Huffman
+ code is unambiguous, those entries can't be used for any
+ other code. */
+
+ for (ind = code; ind < 0x100; ind += 1 << j)
+ {
+ if (unlikely (table[ind] != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ table[ind] = tval;
+ }
+
+ /* Advance to the next value with this length. */
+ if (i + 1 < jcnt)
+ val = next[val];
+
+ /* The Huffman codes are stored in the bitstream with the
+ most significant bit first, as is required to make them
+ unambiguous. The effect is that when we read them from
+ the bitstream we see the bit sequence in reverse order:
+ the most significant bit of the Huffman code is the least
+ significant bit of the value we read from the bitstream.
+ That means that to make our table lookups work, we need
+ to reverse the bits of CODE. Since reversing bits is
+ tedious and in general requires using a table, we instead
+ increment CODE in reverse order. That is, if the number
+ of bits we are currently using, here named J, is 3, we
+ count as 000, 100, 010, 110, 001, 101, 011, 111, which is
+ to say the numbers from 0 to 7 but with the bits
+ reversed. Going to more bits, aka incrementing J,
+ effectively just adds more zero bits as the beginning,
+ and as such does not change the numeric value of CODE.
+
+ To increment CODE of length J in reverse order, find the
+ most significant zero bit and set it to one while
+ clearing all higher bits. In other words, add 1 modulo
+ 2^J, only reversed. */
+
+ incr = 1U << (j - 1);
+ while ((code & incr) != 0)
+ incr >>= 1;
+ if (incr == 0)
+ code = 0;
+ else
+ {
+ code &= incr - 1;
+ code += incr;
+ }
+ }
+ }
+
+ /* Handle the values that require a secondary table. */
+
+ /* Set FIRSTCODE, the number at which the codes start, for each
+ length. */
+
+ for (j = 9; j < 16; j++)
+ {
+ unsigned int jcnt;
+ unsigned int k;
+
+ jcnt = count[j];
+ if (jcnt == 0)
+ continue;
+
+ /* There are JCNT values that have this length, the values
+ starting from START[j]. Those values are assigned
+ consecutive values starting at CODE. */
+
+ firstcode[j - 9] = code;
+
+ /* Reverse add JCNT to CODE modulo 2^J. */
+ for (k = 0; k < j; ++k)
+ {
+ if ((jcnt & (1U << k)) != 0)
+ {
+ unsigned int m;
+ unsigned int bit;
+
+ bit = 1U << (j - k - 1);
+ for (m = 0; m < j - k; ++m, bit >>= 1)
+ {
+ if ((code & bit) == 0)
+ {
+ code += bit;
+ break;
+ }
+ code &= ~bit;
+ }
+ jcnt &= ~(1U << k);
+ }
+ }
+ if (unlikely (jcnt != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ }
+
+ /* For J from 9 to 15, inclusive, we store COUNT[J] consecutive
+ values starting at START[J] with consecutive codes starting at
+ FIRSTCODE[J - 9]. In the primary table we need to point to the
+ secondary table, and the secondary table will be indexed by J - 9
+ bits. We count down from 15 so that we install the larger
+ secondary tables first, as the smaller ones may be embedded in
+ the larger ones. */
+
+ next_secondary = 0; /* Index of next secondary table (after primary). */
+ for (j = 15; j >= 9; j--)
+ {
+ unsigned int jcnt;
+ unsigned int val;
+ size_t primary; /* Current primary index. */
+ size_t secondary; /* Offset to current secondary table. */
+ size_t secondary_bits; /* Bit size of current secondary table. */
+
+ jcnt = count[j];
+ if (jcnt == 0)
+ continue;
+
+ val = start[j];
+ code = firstcode[j - 9];
+ primary = 0x100;
+ secondary = 0;
+ secondary_bits = 0;
+ for (i = 0; i < jcnt; ++i)
+ {
+ uint16_t tval;
+ size_t ind;
+ unsigned int incr;
+
+ if ((code & 0xff) != primary)
+ {
+ uint16_t tprimary;
+
+ /* Fill in a new primary table entry. */
+
+ primary = code & 0xff;
+
+ tprimary = table[primary];
+ if (tprimary == 0)
+ {
+ /* Start a new secondary table. */
+
+ if (unlikely ((next_secondary & HUFFMAN_VALUE_MASK)
+ != next_secondary))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ secondary = next_secondary;
+ secondary_bits = j - 8;
+ next_secondary += 1 << secondary_bits;
+ table[primary] = (secondary
+ + ((j - 8) << HUFFMAN_BITS_SHIFT)
+ + (1U << HUFFMAN_SECONDARY_SHIFT));
+ }
+ else
+ {
+ /* There is an existing entry. It had better be a
+ secondary table with enough bits. */
+ if (unlikely ((tprimary & (1U << HUFFMAN_SECONDARY_SHIFT))
+ == 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ secondary = tprimary & HUFFMAN_VALUE_MASK;
+ secondary_bits = ((tprimary >> HUFFMAN_BITS_SHIFT)
+ & HUFFMAN_BITS_MASK);
+ if (unlikely (secondary_bits < j - 8))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ }
+ }
+
+ /* Fill in secondary table entries. */
+
+ tval = val | ((j - 8) << HUFFMAN_BITS_SHIFT);
+
+ for (ind = code >> 8;
+ ind < (1U << secondary_bits);
+ ind += 1U << (j - 8))
+ {
+ if (unlikely (table[secondary + 0x100 + ind] != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ table[secondary + 0x100 + ind] = tval;
+ }
+
+ if (i + 1 < jcnt)
+ val = next[val];
+
+ incr = 1U << (j - 1);
+ while ((code & incr) != 0)
+ incr >>= 1;
+ if (incr == 0)
+ code = 0;
+ else
+ {
+ code &= incr - 1;
+ code += incr;
+ }
+ }
+ }
+
+#ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
+ final_next_secondary = next_secondary;
+#endif
+
+ return 1;
+}
+
+#ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
+
+/* Used to generate the fixed Huffman table for block type 1. */
+
+#include <stdio.h>
+
+static uint16_t table[ZDEBUG_TABLE_SIZE];
+static unsigned char codes[288];
+
+int
+main ()
+{
+ size_t i;
+
+ for (i = 0; i <= 143; ++i)
+ codes[i] = 8;
+ for (i = 144; i <= 255; ++i)
+ codes[i] = 9;
+ for (i = 256; i <= 279; ++i)
+ codes[i] = 7;
+ for (i = 280; i <= 287; ++i)
+ codes[i] = 8;
+ if (!elf_zlib_inflate_table (&codes[0], 288, &table[0], &table[0]))
+ {
+ fprintf (stderr, "elf_zlib_inflate_table failed\n");
+ exit (EXIT_FAILURE);
+ }
+
+ printf ("static const uint16_t elf_zlib_default_table[%#zx] =\n",
+ final_next_secondary + 0x100);
+ printf ("{\n");
+ for (i = 0; i < final_next_secondary + 0x100; i += 8)
+ {
+ size_t j;
+
+ printf (" ");
+ for (j = i; j < final_next_secondary + 0x100 && j < i + 8; ++j)
+ printf (" %#x,", table[j]);
+ printf ("\n");
+ }
+ printf ("};\n");
+ printf ("\n");
+
+ for (i = 0; i < 32; ++i)
+ codes[i] = 5;
+ if (!elf_zlib_inflate_table (&codes[0], 32, &table[0], &table[0]))
+ {
+ fprintf (stderr, "elf_zlib_inflate_table failed\n");
+ exit (EXIT_FAILURE);
+ }
+
+ printf ("static const uint16_t elf_zlib_default_dist_table[%#zx] =\n",
+ final_next_secondary + 0x100);
+ printf ("{\n");
+ for (i = 0; i < final_next_secondary + 0x100; i += 8)
+ {
+ size_t j;
+
+ printf (" ");
+ for (j = i; j < final_next_secondary + 0x100 && j < i + 8; ++j)
+ printf (" %#x,", table[j]);
+ printf ("\n");
+ }
+ printf ("};\n");
+
+ return 0;
+}
+
+#endif
+
+/* The fixed tables generated by the #ifdef'ed out main function
+ above. */
+
+static const uint16_t elf_zlib_default_table[0x170] =
+{
+ 0xd00, 0xe50, 0xe10, 0xf18, 0xd10, 0xe70, 0xe30, 0x1230,
+ 0xd08, 0xe60, 0xe20, 0x1210, 0xe00, 0xe80, 0xe40, 0x1250,
+ 0xd04, 0xe58, 0xe18, 0x1200, 0xd14, 0xe78, 0xe38, 0x1240,
+ 0xd0c, 0xe68, 0xe28, 0x1220, 0xe08, 0xe88, 0xe48, 0x1260,
+ 0xd02, 0xe54, 0xe14, 0xf1c, 0xd12, 0xe74, 0xe34, 0x1238,
+ 0xd0a, 0xe64, 0xe24, 0x1218, 0xe04, 0xe84, 0xe44, 0x1258,
+ 0xd06, 0xe5c, 0xe1c, 0x1208, 0xd16, 0xe7c, 0xe3c, 0x1248,
+ 0xd0e, 0xe6c, 0xe2c, 0x1228, 0xe0c, 0xe8c, 0xe4c, 0x1268,
+ 0xd01, 0xe52, 0xe12, 0xf1a, 0xd11, 0xe72, 0xe32, 0x1234,
+ 0xd09, 0xe62, 0xe22, 0x1214, 0xe02, 0xe82, 0xe42, 0x1254,
+ 0xd05, 0xe5a, 0xe1a, 0x1204, 0xd15, 0xe7a, 0xe3a, 0x1244,
+ 0xd0d, 0xe6a, 0xe2a, 0x1224, 0xe0a, 0xe8a, 0xe4a, 0x1264,
+ 0xd03, 0xe56, 0xe16, 0xf1e, 0xd13, 0xe76, 0xe36, 0x123c,
+ 0xd0b, 0xe66, 0xe26, 0x121c, 0xe06, 0xe86, 0xe46, 0x125c,
+ 0xd07, 0xe5e, 0xe1e, 0x120c, 0xd17, 0xe7e, 0xe3e, 0x124c,
+ 0xd0f, 0xe6e, 0xe2e, 0x122c, 0xe0e, 0xe8e, 0xe4e, 0x126c,
+ 0xd00, 0xe51, 0xe11, 0xf19, 0xd10, 0xe71, 0xe31, 0x1232,
+ 0xd08, 0xe61, 0xe21, 0x1212, 0xe01, 0xe81, 0xe41, 0x1252,
+ 0xd04, 0xe59, 0xe19, 0x1202, 0xd14, 0xe79, 0xe39, 0x1242,
+ 0xd0c, 0xe69, 0xe29, 0x1222, 0xe09, 0xe89, 0xe49, 0x1262,
+ 0xd02, 0xe55, 0xe15, 0xf1d, 0xd12, 0xe75, 0xe35, 0x123a,
+ 0xd0a, 0xe65, 0xe25, 0x121a, 0xe05, 0xe85, 0xe45, 0x125a,
+ 0xd06, 0xe5d, 0xe1d, 0x120a, 0xd16, 0xe7d, 0xe3d, 0x124a,
+ 0xd0e, 0xe6d, 0xe2d, 0x122a, 0xe0d, 0xe8d, 0xe4d, 0x126a,
+ 0xd01, 0xe53, 0xe13, 0xf1b, 0xd11, 0xe73, 0xe33, 0x1236,
+ 0xd09, 0xe63, 0xe23, 0x1216, 0xe03, 0xe83, 0xe43, 0x1256,
+ 0xd05, 0xe5b, 0xe1b, 0x1206, 0xd15, 0xe7b, 0xe3b, 0x1246,
+ 0xd0d, 0xe6b, 0xe2b, 0x1226, 0xe0b, 0xe8b, 0xe4b, 0x1266,
+ 0xd03, 0xe57, 0xe17, 0xf1f, 0xd13, 0xe77, 0xe37, 0x123e,
+ 0xd0b, 0xe67, 0xe27, 0x121e, 0xe07, 0xe87, 0xe47, 0x125e,
+ 0xd07, 0xe5f, 0xe1f, 0x120e, 0xd17, 0xe7f, 0xe3f, 0x124e,
+ 0xd0f, 0xe6f, 0xe2f, 0x122e, 0xe0f, 0xe8f, 0xe4f, 0x126e,
+ 0x290, 0x291, 0x292, 0x293, 0x294, 0x295, 0x296, 0x297,
+ 0x298, 0x299, 0x29a, 0x29b, 0x29c, 0x29d, 0x29e, 0x29f,
+ 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x2a4, 0x2a5, 0x2a6, 0x2a7,
+ 0x2a8, 0x2a9, 0x2aa, 0x2ab, 0x2ac, 0x2ad, 0x2ae, 0x2af,
+ 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4, 0x2b5, 0x2b6, 0x2b7,
+ 0x2b8, 0x2b9, 0x2ba, 0x2bb, 0x2bc, 0x2bd, 0x2be, 0x2bf,
+ 0x2c0, 0x2c1, 0x2c2, 0x2c3, 0x2c4, 0x2c5, 0x2c6, 0x2c7,
+ 0x2c8, 0x2c9, 0x2ca, 0x2cb, 0x2cc, 0x2cd, 0x2ce, 0x2cf,
+ 0x2d0, 0x2d1, 0x2d2, 0x2d3, 0x2d4, 0x2d5, 0x2d6, 0x2d7,
+ 0x2d8, 0x2d9, 0x2da, 0x2db, 0x2dc, 0x2dd, 0x2de, 0x2df,
+ 0x2e0, 0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x2e5, 0x2e6, 0x2e7,
+ 0x2e8, 0x2e9, 0x2ea, 0x2eb, 0x2ec, 0x2ed, 0x2ee, 0x2ef,
+ 0x2f0, 0x2f1, 0x2f2, 0x2f3, 0x2f4, 0x2f5, 0x2f6, 0x2f7,
+ 0x2f8, 0x2f9, 0x2fa, 0x2fb, 0x2fc, 0x2fd, 0x2fe, 0x2ff,
+};
+
+static const uint16_t elf_zlib_default_dist_table[0x100] =
+{
+ 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
+ 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
+ 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
+ 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
+ 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
+ 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
+ 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
+ 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
+ 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
+ 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
+ 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
+ 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
+ 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
+ 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
+ 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
+ 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
+ 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
+ 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
+ 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
+ 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
+ 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
+ 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
+ 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
+ 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
+ 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
+ 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
+ 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
+ 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
+ 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
+ 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
+ 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
+ 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
+};
+
+/* Inflate a zlib stream from PIN/SIN to POUT/SOUT. Return 1 on
+ success, 0 on some error parsing the stream. */
+
+static int
+elf_zlib_inflate (const unsigned char *pin, size_t sin, uint16_t *zdebug_table,
+ unsigned char *pout, size_t sout)
+{
+ unsigned char *porigout;
+ const unsigned char *pinend;
+ unsigned char *poutend;
+
+ /* We can apparently see multiple zlib streams concatenated
+ together, so keep going as long as there is something to read.
+ The last 4 bytes are the checksum. */
+ porigout = pout;
+ pinend = pin + sin;
+ poutend = pout + sout;
+ while ((pinend - pin) > 4)
+ {
+ uint64_t val;
+ unsigned int bits;
+ int last;
+
+ /* Read the two byte zlib header. */
+
+ if (unlikely ((pin[0] & 0xf) != 8)) /* 8 is zlib encoding. */
+ {
+ /* Unknown compression method. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+ if (unlikely ((pin[0] >> 4) > 7))
+ {
+ /* Window size too large. Other than this check, we don't
+ care about the window size. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+ if (unlikely ((pin[1] & 0x20) != 0))
+ {
+ /* Stream expects a predefined dictionary, but we have no
+ dictionary. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+ val = (pin[0] << 8) | pin[1];
+ if (unlikely (val % 31 != 0))
+ {
+ /* Header check failure. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+ pin += 2;
+
+ /* Align PIN to a 32-bit boundary. */
+
+ val = 0;
+ bits = 0;
+ while ((((uintptr_t) pin) & 3) != 0)
+ {
+ val |= (uint64_t)*pin << bits;
+ bits += 8;
+ ++pin;
+ }
+
+ /* Read blocks until one is marked last. */
+
+ last = 0;
+
+ while (!last)
+ {
+ unsigned int type;
+ const uint16_t *tlit;
+ const uint16_t *tdist;
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ last = val & 1;
+ type = (val >> 1) & 3;
+ val >>= 3;
+ bits -= 3;
+
+ if (unlikely (type == 3))
+ {
+ /* Invalid block type. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ if (type == 0)
+ {
+ uint16_t len;
+ uint16_t lenc;
+
+ /* An uncompressed block. */
+
+ /* If we've read ahead more than a byte, back up. */
+ while (bits > 8)
+ {
+ --pin;
+ bits -= 8;
+ }
+
+ val = 0;
+ bits = 0;
+ if (unlikely ((pinend - pin) < 4))
+ {
+ /* Missing length. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+ len = pin[0] | (pin[1] << 8);
+ lenc = pin[2] | (pin[3] << 8);
+ pin += 4;
+ lenc = ~lenc;
+ if (unlikely (len != lenc))
+ {
+ /* Corrupt data. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+ if (unlikely (len > (unsigned int) (pinend - pin)
+ || len > (unsigned int) (poutend - pout)))
+ {
+ /* Not enough space in buffers. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+ memcpy (pout, pin, len);
+ pout += len;
+ pin += len;
+
+ /* Align PIN. */
+ while ((((uintptr_t) pin) & 3) != 0)
+ {
+ val |= (uint64_t)*pin << bits;
+ bits += 8;
+ ++pin;
+ }
+
+ /* Go around to read the next block. */
+ continue;
+ }
+
+ if (type == 1)
+ {
+ tlit = elf_zlib_default_table;
+ tdist = elf_zlib_default_dist_table;
+ }
+ else
+ {
+ unsigned int nlit;
+ unsigned int ndist;
+ unsigned int nclen;
+ unsigned char codebits[19];
+ unsigned char *plenbase;
+ unsigned char *plen;
+ unsigned char *plenend;
+
+ /* Read a Huffman encoding table. The various magic
+ numbers here are from RFC 1951. */
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ nlit = (val & 0x1f) + 257;
+ val >>= 5;
+ ndist = (val & 0x1f) + 1;
+ val >>= 5;
+ nclen = (val & 0xf) + 4;
+ val >>= 4;
+ bits -= 14;
+ if (unlikely (nlit > 286 || ndist > 30))
+ {
+ /* Values out of range. */
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* Read and build the table used to compress the
+ literal, length, and distance codes. */
+
+ memset(&codebits[0], 0, 19);
+
+ /* There are always at least 4 elements in the
+ table. */
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ codebits[16] = val & 7;
+ codebits[17] = (val >> 3) & 7;
+ codebits[18] = (val >> 6) & 7;
+ codebits[0] = (val >> 9) & 7;
+ val >>= 12;
+ bits -= 12;
+
+ if (nclen == 4)
+ goto codebitsdone;
+
+ codebits[8] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 5)
+ goto codebitsdone;
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ codebits[7] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 6)
+ goto codebitsdone;
+
+ codebits[9] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 7)
+ goto codebitsdone;
+
+ codebits[6] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 8)
+ goto codebitsdone;
+
+ codebits[10] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 9)
+ goto codebitsdone;
+
+ codebits[5] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 10)
+ goto codebitsdone;
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ codebits[11] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 11)
+ goto codebitsdone;
+
+ codebits[4] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 12)
+ goto codebitsdone;
+
+ codebits[12] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 13)
+ goto codebitsdone;
+
+ codebits[3] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 14)
+ goto codebitsdone;
+
+ codebits[13] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 15)
+ goto codebitsdone;
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ codebits[2] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 16)
+ goto codebitsdone;
+
+ codebits[14] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 17)
+ goto codebitsdone;
+
+ codebits[1] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ if (nclen == 18)
+ goto codebitsdone;
+
+ codebits[15] = val & 7;
+ val >>= 3;
+ bits -= 3;
+
+ codebitsdone:
+
+ if (!elf_zlib_inflate_table (codebits, 19, zdebug_table,
+ zdebug_table))
+ return 0;
+
+ /* Read the compressed bit lengths of the literal,
+ length, and distance codes. We have allocated space
+ at the end of zdebug_table to hold them. */
+
+ plenbase = (((unsigned char *) zdebug_table)
+ + ZDEBUG_TABLE_CODELEN_OFFSET);
+ plen = plenbase;
+ plenend = plen + nlit + ndist;
+ while (plen < plenend)
+ {
+ uint16_t t;
+ unsigned int b;
+ uint16_t v;
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ t = zdebug_table[val & 0xff];
+
+ /* The compression here uses bit lengths up to 7, so
+ a secondary table is never necessary. */
+ if (unlikely ((t & (1U << HUFFMAN_SECONDARY_SHIFT)) != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ b = (t >> HUFFMAN_BITS_SHIFT) & HUFFMAN_BITS_MASK;
+ val >>= b + 1;
+ bits -= b + 1;
+
+ v = t & HUFFMAN_VALUE_MASK;
+ if (v < 16)
+ *plen++ = v;
+ else if (v == 16)
+ {
+ unsigned int c;
+ unsigned int prev;
+
+ /* Copy previous entry 3 to 6 times. */
+
+ if (unlikely (plen == plenbase))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* We used up to 7 bits since the last
+ elf_zlib_fetch, so we have at least 8 bits
+ available here. */
+
+ c = 3 + (val & 0x3);
+ val >>= 2;
+ bits -= 2;
+ if (unlikely ((unsigned int) (plenend - plen) < c))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ prev = plen[-1];
+ switch (c)
+ {
+ case 6:
+ *plen++ = prev;
+ ATTRIBUTE_FALLTHROUGH;
+ case 5:
+ *plen++ = prev;
+ ATTRIBUTE_FALLTHROUGH;
+ case 4:
+ *plen++ = prev;
+ }
+ *plen++ = prev;
+ *plen++ = prev;
+ *plen++ = prev;
+ }
+ else if (v == 17)
+ {
+ unsigned int c;
+
+ /* Store zero 3 to 10 times. */
+
+ /* We used up to 7 bits since the last
+ elf_zlib_fetch, so we have at least 8 bits
+ available here. */
+
+ c = 3 + (val & 0x7);
+ val >>= 3;
+ bits -= 3;
+ if (unlikely ((unsigned int) (plenend - plen) < c))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ switch (c)
+ {
+ case 10:
+ *plen++ = 0;
+ ATTRIBUTE_FALLTHROUGH;
+ case 9:
+ *plen++ = 0;
+ ATTRIBUTE_FALLTHROUGH;
+ case 8:
+ *plen++ = 0;
+ ATTRIBUTE_FALLTHROUGH;
+ case 7:
+ *plen++ = 0;
+ ATTRIBUTE_FALLTHROUGH;
+ case 6:
+ *plen++ = 0;
+ ATTRIBUTE_FALLTHROUGH;
+ case 5:
+ *plen++ = 0;
+ ATTRIBUTE_FALLTHROUGH;
+ case 4:
+ *plen++ = 0;
+ }
+ *plen++ = 0;
+ *plen++ = 0;
+ *plen++ = 0;
+ }
+ else if (v == 18)
+ {
+ unsigned int c;
+
+ /* Store zero 11 to 138 times. */
+
+ /* We used up to 7 bits since the last
+ elf_zlib_fetch, so we have at least 8 bits
+ available here. */
+
+ c = 11 + (val & 0x7f);
+ val >>= 7;
+ bits -= 7;
+ if (unlikely ((unsigned int) (plenend - plen) < c))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ memset (plen, 0, c);
+ plen += c;
+ }
+ else
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ }
+
+ /* Make sure that the stop code can appear. */
+
+ plen = plenbase;
+ if (unlikely (plen[256] == 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* Build the decompression tables. */
+
+ if (!elf_zlib_inflate_table (plen, nlit, zdebug_table,
+ zdebug_table))
+ return 0;
+ if (!elf_zlib_inflate_table (plen + nlit, ndist, zdebug_table,
+ zdebug_table + HUFFMAN_TABLE_SIZE))
+ return 0;
+ tlit = zdebug_table;
+ tdist = zdebug_table + HUFFMAN_TABLE_SIZE;
+ }
+
+ /* Inflate values until the end of the block. This is the
+ main loop of the inflation code. */
+
+ while (1)
+ {
+ uint16_t t;
+ unsigned int b;
+ uint16_t v;
+ unsigned int lit;
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ t = tlit[val & 0xff];
+ b = (t >> HUFFMAN_BITS_SHIFT) & HUFFMAN_BITS_MASK;
+ v = t & HUFFMAN_VALUE_MASK;
+
+ if ((t & (1U << HUFFMAN_SECONDARY_SHIFT)) == 0)
+ {
+ lit = v;
+ val >>= b + 1;
+ bits -= b + 1;
+ }
+ else
+ {
+ t = tlit[v + 0x100 + ((val >> 8) & ((1U << b) - 1))];
+ b = (t >> HUFFMAN_BITS_SHIFT) & HUFFMAN_BITS_MASK;
+ lit = t & HUFFMAN_VALUE_MASK;
+ val >>= b + 8;
+ bits -= b + 8;
+ }
+
+ if (lit < 256)
+ {
+ if (unlikely (pout == poutend))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ *pout++ = lit;
+
+ /* We will need to write the next byte soon. We ask
+ for high temporal locality because we will write
+ to the whole cache line soon. */
+ __builtin_prefetch (pout, 1, 3);
+ }
+ else if (lit == 256)
+ {
+ /* The end of the block. */
+ break;
+ }
+ else
+ {
+ unsigned int dist;
+ unsigned int len;
+
+ /* Convert lit into a length. */
+
+ if (lit < 265)
+ len = lit - 257 + 3;
+ else if (lit == 285)
+ len = 258;
+ else if (unlikely (lit > 285))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ else
+ {
+ unsigned int extra;
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ /* This is an expression for the table of length
+ codes in RFC 1951 3.2.5. */
+ lit -= 265;
+ extra = (lit >> 2) + 1;
+ len = (lit & 3) << extra;
+ len += 11;
+ len += ((1U << (extra - 1)) - 1) << 3;
+ len += val & ((1U << extra) - 1);
+ val >>= extra;
+ bits -= extra;
+ }
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ t = tdist[val & 0xff];
+ b = (t >> HUFFMAN_BITS_SHIFT) & HUFFMAN_BITS_MASK;
+ v = t & HUFFMAN_VALUE_MASK;
+
+ if ((t & (1U << HUFFMAN_SECONDARY_SHIFT)) == 0)
+ {
+ dist = v;
+ val >>= b + 1;
+ bits -= b + 1;
+ }
+ else
+ {
+ t = tdist[v + 0x100 + ((val >> 8) & ((1U << b) - 1))];
+ b = (t >> HUFFMAN_BITS_SHIFT) & HUFFMAN_BITS_MASK;
+ dist = t & HUFFMAN_VALUE_MASK;
+ val >>= b + 8;
+ bits -= b + 8;
+ }
+
+ /* Convert dist to a distance. */
+
+ if (dist == 0)
+ {
+ /* A distance of 1. A common case, meaning
+ repeat the last character LEN times. */
+
+ if (unlikely (pout == porigout))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ if (unlikely ((unsigned int) (poutend - pout) < len))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ memset (pout, pout[-1], len);
+ pout += len;
+ }
+ else if (unlikely (dist > 29))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ else
+ {
+ if (dist < 4)
+ dist = dist + 1;
+ else
+ {
+ unsigned int extra;
+
+ if (!elf_zlib_fetch (&pin, pinend, &val, &bits))
+ return 0;
+
+ /* This is an expression for the table of
+ distance codes in RFC 1951 3.2.5. */
+ dist -= 4;
+ extra = (dist >> 1) + 1;
+ dist = (dist & 1) << extra;
+ dist += 5;
+ dist += ((1U << (extra - 1)) - 1) << 2;
+ dist += val & ((1U << extra) - 1);
+ val >>= extra;
+ bits -= extra;
+ }
+
+ /* Go back dist bytes, and copy len bytes from
+ there. */
+
+ if (unlikely ((unsigned int) (pout - porigout) < dist))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ if (unlikely ((unsigned int) (poutend - pout) < len))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ if (dist >= len)
+ {
+ memcpy (pout, pout - dist, len);
+ pout += len;
+ }
+ else
+ {
+ while (len > 0)
+ {
+ unsigned int copy;
+
+ copy = len < dist ? len : dist;
+ memcpy (pout, pout - dist, copy);
+ len -= copy;
+ pout += copy;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* We should have filled the output buffer. */
+ if (unlikely (pout != poutend))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Verify the zlib checksum. The checksum is in the 4 bytes at
+ CHECKBYTES, and the uncompressed data is at UNCOMPRESSED /
+ UNCOMPRESSED_SIZE. Returns 1 on success, 0 on failure. */
+
+static int
+elf_zlib_verify_checksum (const unsigned char *checkbytes,
+ const unsigned char *uncompressed,
+ size_t uncompressed_size)
+{
+ unsigned int i;
+ unsigned int cksum;
+ const unsigned char *p;
+ uint32_t s1;
+ uint32_t s2;
+ size_t hsz;
+
+ cksum = 0;
+ for (i = 0; i < 4; i++)
+ cksum = (cksum << 8) | checkbytes[i];
+
+ s1 = 1;
+ s2 = 0;
+
+ /* Minimize modulo operations. */
+
+ p = uncompressed;
+ hsz = uncompressed_size;
+ while (hsz >= 5552)
+ {
+ for (i = 0; i < 5552; i += 16)
+ {
+ /* Manually unroll loop 16 times. */
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ }
+ hsz -= 5552;
+ s1 %= 65521;
+ s2 %= 65521;
+ }
+
+ while (hsz >= 16)
+ {
+ /* Manually unroll loop 16 times. */
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+
+ hsz -= 16;
+ }
+
+ for (i = 0; i < hsz; ++i)
+ {
+ s1 = s1 + *p++;
+ s2 = s2 + s1;
+ }
+
+ s1 %= 65521;
+ s2 %= 65521;
+
+ if (unlikely ((s2 << 16) + s1 != cksum))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Inflate a zlib stream from PIN/SIN to POUT/SOUT, and verify the
+ checksum. Return 1 on success, 0 on error. */
+
+static int
+elf_zlib_inflate_and_verify (const unsigned char *pin, size_t sin,
+ uint16_t *zdebug_table, unsigned char *pout,
+ size_t sout)
+{
+ if (!elf_zlib_inflate (pin, sin, zdebug_table, pout, sout))
+ return 0;
+ if (!elf_zlib_verify_checksum (pin + sin - 4, pout, sout))
+ return 0;
+ return 1;
+}
+
+/* Uncompress the old compressed debug format, the one emitted by
+ --compress-debug-sections=zlib-gnu. The compressed data is in
+ COMPRESSED / COMPRESSED_SIZE, and the function writes to
+ *UNCOMPRESSED / *UNCOMPRESSED_SIZE. ZDEBUG_TABLE is work space to
+ hold Huffman tables. Returns 0 on error, 1 on successful
+ decompression or if something goes wrong. In general we try to
+ carry on, by returning 1, even if we can't decompress. */
+
+static int
+elf_uncompress_zdebug (struct backtrace_state *state,
+ const unsigned char *compressed, size_t compressed_size,
+ uint16_t *zdebug_table,
+ backtrace_error_callback error_callback, void *data,
+ unsigned char **uncompressed, size_t *uncompressed_size)
+{
+ size_t sz;
+ size_t i;
+ unsigned char *po;
+
+ *uncompressed = NULL;
+ *uncompressed_size = 0;
+
+ /* The format starts with the four bytes ZLIB, followed by the 8
+ byte length of the uncompressed data in big-endian order,
+ followed by a zlib stream. */
+
+ if (compressed_size < 12 || memcmp (compressed, "ZLIB", 4) != 0)
+ return 1;
+
+ sz = 0;
+ for (i = 0; i < 8; i++)
+ sz = (sz << 8) | compressed[i + 4];
+
+ if (*uncompressed != NULL && *uncompressed_size >= sz)
+ po = *uncompressed;
+ else
+ {
+ po = (unsigned char *) backtrace_alloc (state, sz, error_callback, data);
+ if (po == NULL)
+ return 0;
+ }
+
+ if (!elf_zlib_inflate_and_verify (compressed + 12, compressed_size - 12,
+ zdebug_table, po, sz))
+ return 1;
+
+ *uncompressed = po;
+ *uncompressed_size = sz;
+
+ return 1;
+}
+
+/* Uncompress the new compressed debug format, the official standard
+ ELF approach emitted by --compress-debug-sections=zlib-gabi. The
+ compressed data is in COMPRESSED / COMPRESSED_SIZE, and the
+ function writes to *UNCOMPRESSED / *UNCOMPRESSED_SIZE.
+ ZDEBUG_TABLE is work space as for elf_uncompress_zdebug. Returns 0
+ on error, 1 on successful decompression or if something goes wrong.
+ In general we try to carry on, by returning 1, even if we can't
+ decompress. */
+
+static int
+elf_uncompress_chdr (struct backtrace_state *state,
+ const unsigned char *compressed, size_t compressed_size,
+ uint16_t *zdebug_table,
+ backtrace_error_callback error_callback, void *data,
+ unsigned char **uncompressed, size_t *uncompressed_size)
+{
+ const b_elf_chdr *chdr;
+ unsigned char *po;
+
+ *uncompressed = NULL;
+ *uncompressed_size = 0;
+
+ /* The format starts with an ELF compression header. */
+ if (compressed_size < sizeof (b_elf_chdr))
+ return 1;
+
+ chdr = (const b_elf_chdr *) compressed;
+
+ if (chdr->ch_type != ELFCOMPRESS_ZLIB)
+ {
+ /* Unsupported compression algorithm. */
+ return 1;
+ }
+
+ if (*uncompressed != NULL && *uncompressed_size >= chdr->ch_size)
+ po = *uncompressed;
+ else
+ {
+ po = (unsigned char *) backtrace_alloc (state, chdr->ch_size,
+ error_callback, data);
+ if (po == NULL)
+ return 0;
+ }
+
+ if (!elf_zlib_inflate_and_verify (compressed + sizeof (b_elf_chdr),
+ compressed_size - sizeof (b_elf_chdr),
+ zdebug_table, po, chdr->ch_size))
+ return 1;
+
+ *uncompressed = po;
+ *uncompressed_size = chdr->ch_size;
+
+ return 1;
+}
+
+/* This function is a hook for testing the zlib support. It is only
+ used by tests. */
+
+int
+backtrace_uncompress_zdebug (struct backtrace_state *state,
+ const unsigned char *compressed,
+ size_t compressed_size,
+ backtrace_error_callback error_callback,
+ void *data, unsigned char **uncompressed,
+ size_t *uncompressed_size)
+{
+ uint16_t *zdebug_table;
+ int ret;
+
+ zdebug_table = ((uint16_t *) backtrace_alloc (state, ZDEBUG_TABLE_SIZE,
+ error_callback, data));
+ if (zdebug_table == NULL)
+ return 0;
+ ret = elf_uncompress_zdebug (state, compressed, compressed_size,
+ zdebug_table, error_callback, data,
+ uncompressed, uncompressed_size);
+ backtrace_free (state, zdebug_table, ZDEBUG_TABLE_SIZE,
+ error_callback, data);
+ return ret;
+}
+
+/* Number of LZMA states. */
+#define LZMA_STATES (12)
+
+/* Number of LZMA position states. The pb value of the property byte
+ is the number of bits to include in these states, and the maximum
+ value of pb is 4. */
+#define LZMA_POS_STATES (16)
+
+/* Number of LZMA distance states. These are used match distances
+ with a short match length: up to 4 bytes. */
+#define LZMA_DIST_STATES (4)
+
+/* Number of LZMA distance slots. LZMA uses six bits to encode larger
+ match lengths, so 1 << 6 possible probabilities. */
+#define LZMA_DIST_SLOTS (64)
+
+/* LZMA distances 0 to 3 are encoded directly, larger values use a
+ probability model. */
+#define LZMA_DIST_MODEL_START (4)
+
+/* The LZMA probability model ends at 14. */
+#define LZMA_DIST_MODEL_END (14)
+
+/* LZMA distance slots for distances less than 127. */
+#define LZMA_FULL_DISTANCES (128)
+
+/* LZMA uses four alignment bits. */
+#define LZMA_ALIGN_SIZE (16)
+
+/* LZMA match length is encoded with 4, 5, or 10 bits, some of which
+ are already known. */
+#define LZMA_LEN_LOW_SYMBOLS (8)
+#define LZMA_LEN_MID_SYMBOLS (8)
+#define LZMA_LEN_HIGH_SYMBOLS (256)
+
+/* LZMA literal encoding. */
+#define LZMA_LITERAL_CODERS_MAX (16)
+#define LZMA_LITERAL_CODER_SIZE (0x300)
+
+/* LZMA is based on a large set of probabilities, each managed
+ independently. Each probability is an 11 bit number that we store
+ in a uint16_t. We use a single large array of probabilities. */
+
+/* Lengths of entries in the LZMA probabilities array. The names used
+ here are copied from the Linux kernel implementation. */
+
+#define LZMA_PROB_IS_MATCH_LEN (LZMA_STATES * LZMA_POS_STATES)
+#define LZMA_PROB_IS_REP_LEN LZMA_STATES
+#define LZMA_PROB_IS_REP0_LEN LZMA_STATES
+#define LZMA_PROB_IS_REP1_LEN LZMA_STATES
+#define LZMA_PROB_IS_REP2_LEN LZMA_STATES
+#define LZMA_PROB_IS_REP0_LONG_LEN (LZMA_STATES * LZMA_POS_STATES)
+#define LZMA_PROB_DIST_SLOT_LEN (LZMA_DIST_STATES * LZMA_DIST_SLOTS)
+#define LZMA_PROB_DIST_SPECIAL_LEN (LZMA_FULL_DISTANCES - LZMA_DIST_MODEL_END)
+#define LZMA_PROB_DIST_ALIGN_LEN LZMA_ALIGN_SIZE
+#define LZMA_PROB_MATCH_LEN_CHOICE_LEN 1
+#define LZMA_PROB_MATCH_LEN_CHOICE2_LEN 1
+#define LZMA_PROB_MATCH_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS)
+#define LZMA_PROB_MATCH_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS)
+#define LZMA_PROB_MATCH_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS
+#define LZMA_PROB_REP_LEN_CHOICE_LEN 1
+#define LZMA_PROB_REP_LEN_CHOICE2_LEN 1
+#define LZMA_PROB_REP_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS)
+#define LZMA_PROB_REP_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS)
+#define LZMA_PROB_REP_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS
+#define LZMA_PROB_LITERAL_LEN \
+ (LZMA_LITERAL_CODERS_MAX * LZMA_LITERAL_CODER_SIZE)
+
+/* Offsets into the LZMA probabilities array. This is mechanically
+ generated from the above lengths. */
+
+#define LZMA_PROB_IS_MATCH_OFFSET 0
+#define LZMA_PROB_IS_REP_OFFSET \
+ (LZMA_PROB_IS_MATCH_OFFSET + LZMA_PROB_IS_MATCH_LEN)
+#define LZMA_PROB_IS_REP0_OFFSET \
+ (LZMA_PROB_IS_REP_OFFSET + LZMA_PROB_IS_REP_LEN)
+#define LZMA_PROB_IS_REP1_OFFSET \
+ (LZMA_PROB_IS_REP0_OFFSET + LZMA_PROB_IS_REP0_LEN)
+#define LZMA_PROB_IS_REP2_OFFSET \
+ (LZMA_PROB_IS_REP1_OFFSET + LZMA_PROB_IS_REP1_LEN)
+#define LZMA_PROB_IS_REP0_LONG_OFFSET \
+ (LZMA_PROB_IS_REP2_OFFSET + LZMA_PROB_IS_REP2_LEN)
+#define LZMA_PROB_DIST_SLOT_OFFSET \
+ (LZMA_PROB_IS_REP0_LONG_OFFSET + LZMA_PROB_IS_REP0_LONG_LEN)
+#define LZMA_PROB_DIST_SPECIAL_OFFSET \
+ (LZMA_PROB_DIST_SLOT_OFFSET + LZMA_PROB_DIST_SLOT_LEN)
+#define LZMA_PROB_DIST_ALIGN_OFFSET \
+ (LZMA_PROB_DIST_SPECIAL_OFFSET + LZMA_PROB_DIST_SPECIAL_LEN)
+#define LZMA_PROB_MATCH_LEN_CHOICE_OFFSET \
+ (LZMA_PROB_DIST_ALIGN_OFFSET + LZMA_PROB_DIST_ALIGN_LEN)
+#define LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET \
+ (LZMA_PROB_MATCH_LEN_CHOICE_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE_LEN)
+#define LZMA_PROB_MATCH_LEN_LOW_OFFSET \
+ (LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE2_LEN)
+#define LZMA_PROB_MATCH_LEN_MID_OFFSET \
+ (LZMA_PROB_MATCH_LEN_LOW_OFFSET + LZMA_PROB_MATCH_LEN_LOW_LEN)
+#define LZMA_PROB_MATCH_LEN_HIGH_OFFSET \
+ (LZMA_PROB_MATCH_LEN_MID_OFFSET + LZMA_PROB_MATCH_LEN_MID_LEN)
+#define LZMA_PROB_REP_LEN_CHOICE_OFFSET \
+ (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + LZMA_PROB_MATCH_LEN_HIGH_LEN)
+#define LZMA_PROB_REP_LEN_CHOICE2_OFFSET \
+ (LZMA_PROB_REP_LEN_CHOICE_OFFSET + LZMA_PROB_REP_LEN_CHOICE_LEN)
+#define LZMA_PROB_REP_LEN_LOW_OFFSET \
+ (LZMA_PROB_REP_LEN_CHOICE2_OFFSET + LZMA_PROB_REP_LEN_CHOICE2_LEN)
+#define LZMA_PROB_REP_LEN_MID_OFFSET \
+ (LZMA_PROB_REP_LEN_LOW_OFFSET + LZMA_PROB_REP_LEN_LOW_LEN)
+#define LZMA_PROB_REP_LEN_HIGH_OFFSET \
+ (LZMA_PROB_REP_LEN_MID_OFFSET + LZMA_PROB_REP_LEN_MID_LEN)
+#define LZMA_PROB_LITERAL_OFFSET \
+ (LZMA_PROB_REP_LEN_HIGH_OFFSET + LZMA_PROB_REP_LEN_HIGH_LEN)
+
+#define LZMA_PROB_TOTAL_COUNT \
+ (LZMA_PROB_LITERAL_OFFSET + LZMA_PROB_LITERAL_LEN)
+
+/* Check that the number of LZMA probabilities is the same as the
+ Linux kernel implementation. */
+
+#if LZMA_PROB_TOTAL_COUNT != 1846 + (1 << 4) * 0x300
+ #error Wrong number of LZMA probabilities
+#endif
+
+/* Expressions for the offset in the LZMA probabilities array of a
+ specific probability. */
+
+#define LZMA_IS_MATCH(state, pos) \
+ (LZMA_PROB_IS_MATCH_OFFSET + (state) * LZMA_POS_STATES + (pos))
+#define LZMA_IS_REP(state) \
+ (LZMA_PROB_IS_REP_OFFSET + (state))
+#define LZMA_IS_REP0(state) \
+ (LZMA_PROB_IS_REP0_OFFSET + (state))
+#define LZMA_IS_REP1(state) \
+ (LZMA_PROB_IS_REP1_OFFSET + (state))
+#define LZMA_IS_REP2(state) \
+ (LZMA_PROB_IS_REP2_OFFSET + (state))
+#define LZMA_IS_REP0_LONG(state, pos) \
+ (LZMA_PROB_IS_REP0_LONG_OFFSET + (state) * LZMA_POS_STATES + (pos))
+#define LZMA_DIST_SLOT(dist, slot) \
+ (LZMA_PROB_DIST_SLOT_OFFSET + (dist) * LZMA_DIST_SLOTS + (slot))
+#define LZMA_DIST_SPECIAL(dist) \
+ (LZMA_PROB_DIST_SPECIAL_OFFSET + (dist))
+#define LZMA_DIST_ALIGN(dist) \
+ (LZMA_PROB_DIST_ALIGN_OFFSET + (dist))
+#define LZMA_MATCH_LEN_CHOICE \
+ LZMA_PROB_MATCH_LEN_CHOICE_OFFSET
+#define LZMA_MATCH_LEN_CHOICE2 \
+ LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET
+#define LZMA_MATCH_LEN_LOW(pos, sym) \
+ (LZMA_PROB_MATCH_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym))
+#define LZMA_MATCH_LEN_MID(pos, sym) \
+ (LZMA_PROB_MATCH_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym))
+#define LZMA_MATCH_LEN_HIGH(sym) \
+ (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + (sym))
+#define LZMA_REP_LEN_CHOICE \
+ LZMA_PROB_REP_LEN_CHOICE_OFFSET
+#define LZMA_REP_LEN_CHOICE2 \
+ LZMA_PROB_REP_LEN_CHOICE2_OFFSET
+#define LZMA_REP_LEN_LOW(pos, sym) \
+ (LZMA_PROB_REP_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym))
+#define LZMA_REP_LEN_MID(pos, sym) \
+ (LZMA_PROB_REP_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym))
+#define LZMA_REP_LEN_HIGH(sym) \
+ (LZMA_PROB_REP_LEN_HIGH_OFFSET + (sym))
+#define LZMA_LITERAL(code, size) \
+ (LZMA_PROB_LITERAL_OFFSET + (code) * LZMA_LITERAL_CODER_SIZE + (size))
+
+/* Read an LZMA varint from BUF, reading and updating *POFFSET,
+ setting *VAL. Returns 0 on error, 1 on success. */
+
+static int
+elf_lzma_varint (const unsigned char *compressed, size_t compressed_size,
+ size_t *poffset, uint64_t *val)
+{
+ size_t off;
+ int i;
+ uint64_t v;
+ unsigned char b;
+
+ off = *poffset;
+ i = 0;
+ v = 0;
+ while (1)
+ {
+ if (unlikely (off >= compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ b = compressed[off];
+ v |= (b & 0x7f) << (i * 7);
+ ++off;
+ if ((b & 0x80) == 0)
+ {
+ *poffset = off;
+ *val = v;
+ return 1;
+ }
+ ++i;
+ if (unlikely (i >= 9))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ }
+}
+
+/* Normalize the LZMA range decoder, pulling in an extra input byte if
+ needed. */
+
+static void
+elf_lzma_range_normalize (const unsigned char *compressed,
+ size_t compressed_size, size_t *poffset,
+ uint32_t *prange, uint32_t *pcode)
+{
+ if (*prange < (1U << 24))
+ {
+ if (unlikely (*poffset >= compressed_size))
+ {
+ /* We assume this will be caught elsewhere. */
+ elf_uncompress_failed ();
+ return;
+ }
+ *prange <<= 8;
+ *pcode <<= 8;
+ *pcode += compressed[*poffset];
+ ++*poffset;
+ }
+}
+
+/* Read and return a single bit from the LZMA stream, reading and
+ updating *PROB. Each bit comes from the range coder. */
+
+static int
+elf_lzma_bit (const unsigned char *compressed, size_t compressed_size,
+ uint16_t *prob, size_t *poffset, uint32_t *prange,
+ uint32_t *pcode)
+{
+ uint32_t bound;
+
+ elf_lzma_range_normalize (compressed, compressed_size, poffset,
+ prange, pcode);
+ bound = (*prange >> 11) * (uint32_t) *prob;
+ if (*pcode < bound)
+ {
+ *prange = bound;
+ *prob += ((1U << 11) - *prob) >> 5;
+ return 0;
+ }
+ else
+ {
+ *prange -= bound;
+ *pcode -= bound;
+ *prob -= *prob >> 5;
+ return 1;
+ }
+}
+
+/* Read an integer of size BITS from the LZMA stream, most significant
+ bit first. The bits are predicted using PROBS. */
+
+static uint32_t
+elf_lzma_integer (const unsigned char *compressed, size_t compressed_size,
+ uint16_t *probs, uint32_t bits, size_t *poffset,
+ uint32_t *prange, uint32_t *pcode)
+{
+ uint32_t sym;
+ uint32_t i;
+
+ sym = 1;
+ for (i = 0; i < bits; i++)
+ {
+ int bit;
+
+ bit = elf_lzma_bit (compressed, compressed_size, probs + sym, poffset,
+ prange, pcode);
+ sym <<= 1;
+ sym += bit;
+ }
+ return sym - (1 << bits);
+}
+
+/* Read an integer of size BITS from the LZMA stream, least
+ significant bit first. The bits are predicted using PROBS. */
+
+static uint32_t
+elf_lzma_reverse_integer (const unsigned char *compressed,
+ size_t compressed_size, uint16_t *probs,
+ uint32_t bits, size_t *poffset, uint32_t *prange,
+ uint32_t *pcode)
+{
+ uint32_t sym;
+ uint32_t val;
+ uint32_t i;
+
+ sym = 1;
+ val = 0;
+ for (i = 0; i < bits; i++)
+ {
+ int bit;
+
+ bit = elf_lzma_bit (compressed, compressed_size, probs + sym, poffset,
+ prange, pcode);
+ sym <<= 1;
+ sym += bit;
+ val += bit << i;
+ }
+ return val;
+}
+
+/* Read a length from the LZMA stream. IS_REP picks either LZMA_MATCH
+ or LZMA_REP probabilities. */
+
+static uint32_t
+elf_lzma_len (const unsigned char *compressed, size_t compressed_size,
+ uint16_t *probs, int is_rep, unsigned int pos_state,
+ size_t *poffset, uint32_t *prange, uint32_t *pcode)
+{
+ uint16_t *probs_choice;
+ uint16_t *probs_sym;
+ uint32_t bits;
+ uint32_t len;
+
+ probs_choice = probs + (is_rep
+ ? LZMA_REP_LEN_CHOICE
+ : LZMA_MATCH_LEN_CHOICE);
+ if (elf_lzma_bit (compressed, compressed_size, probs_choice, poffset,
+ prange, pcode))
+ {
+ probs_choice = probs + (is_rep
+ ? LZMA_REP_LEN_CHOICE2
+ : LZMA_MATCH_LEN_CHOICE2);
+ if (elf_lzma_bit (compressed, compressed_size, probs_choice,
+ poffset, prange, pcode))
+ {
+ probs_sym = probs + (is_rep
+ ? LZMA_REP_LEN_HIGH (0)
+ : LZMA_MATCH_LEN_HIGH (0));
+ bits = 8;
+ len = 2 + 8 + 8;
+ }
+ else
+ {
+ probs_sym = probs + (is_rep
+ ? LZMA_REP_LEN_MID (pos_state, 0)
+ : LZMA_MATCH_LEN_MID (pos_state, 0));
+ bits = 3;
+ len = 2 + 8;
+ }
+ }
+ else
+ {
+ probs_sym = probs + (is_rep
+ ? LZMA_REP_LEN_LOW (pos_state, 0)
+ : LZMA_MATCH_LEN_LOW (pos_state, 0));
+ bits = 3;
+ len = 2;
+ }
+
+ len += elf_lzma_integer (compressed, compressed_size, probs_sym, bits,
+ poffset, prange, pcode);
+ return len;
+}
+
+/* Uncompress one LZMA block from a minidebug file. The compressed
+ data is at COMPRESSED + *POFFSET. Update *POFFSET. Store the data
+ into the memory at UNCOMPRESSED, size UNCOMPRESSED_SIZE. CHECK is
+ the stream flag from the xz header. Return 1 on successful
+ decompression. */
+
+static int
+elf_uncompress_lzma_block (const unsigned char *compressed,
+ size_t compressed_size, unsigned char check,
+ uint16_t *probs, unsigned char *uncompressed,
+ size_t uncompressed_size, size_t *poffset)
+{
+ size_t off;
+ size_t block_header_offset;
+ size_t block_header_size;
+ unsigned char block_flags;
+ uint64_t header_compressed_size;
+ uint64_t header_uncompressed_size;
+ unsigned char lzma2_properties;
+ uint32_t computed_crc;
+ uint32_t stream_crc;
+ size_t uncompressed_offset;
+ size_t dict_start_offset;
+ unsigned int lc;
+ unsigned int lp;
+ unsigned int pb;
+ uint32_t range;
+ uint32_t code;
+ uint32_t lstate;
+ uint32_t dist[4];
+
+ off = *poffset;
+ block_header_offset = off;
+
+ /* Block header size is a single byte. */
+ if (unlikely (off >= compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ block_header_size = (compressed[off] + 1) * 4;
+ if (unlikely (off + block_header_size > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* Block flags. */
+ block_flags = compressed[off + 1];
+ if (unlikely ((block_flags & 0x3c) != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ off += 2;
+
+ /* Optional compressed size. */
+ header_compressed_size = 0;
+ if ((block_flags & 0x40) != 0)
+ {
+ *poffset = off;
+ if (!elf_lzma_varint (compressed, compressed_size, poffset,
+ &header_compressed_size))
+ return 0;
+ off = *poffset;
+ }
+
+ /* Optional uncompressed size. */
+ header_uncompressed_size = 0;
+ if ((block_flags & 0x80) != 0)
+ {
+ *poffset = off;
+ if (!elf_lzma_varint (compressed, compressed_size, poffset,
+ &header_uncompressed_size))
+ return 0;
+ off = *poffset;
+ }
+
+ /* The recipe for creating a minidebug file is to run the xz program
+ with no arguments, so we expect exactly one filter: lzma2. */
+
+ if (unlikely ((block_flags & 0x3) != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ if (unlikely (off + 2 >= block_header_offset + block_header_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* The filter ID for LZMA2 is 0x21. */
+ if (unlikely (compressed[off] != 0x21))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ ++off;
+
+ /* The size of the filter properties for LZMA2 is 1. */
+ if (unlikely (compressed[off] != 1))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ ++off;
+
+ lzma2_properties = compressed[off];
+ ++off;
+
+ if (unlikely (lzma2_properties > 40))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* The properties describe the dictionary size, but we don't care
+ what that is. */
+
+ /* Block header padding. */
+ if (unlikely (off + 4 > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ off = (off + 3) &~ (size_t) 3;
+
+ if (unlikely (off + 4 > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* Block header CRC. */
+ computed_crc = elf_crc32 (0, compressed + block_header_offset,
+ block_header_size - 4);
+ stream_crc = (compressed[off]
+ | (compressed[off + 1] << 8)
+ | (compressed[off + 2] << 16)
+ | (compressed[off + 3] << 24));
+ if (unlikely (computed_crc != stream_crc))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ off += 4;
+
+ /* Read a sequence of LZMA2 packets. */
+
+ uncompressed_offset = 0;
+ dict_start_offset = 0;
+ lc = 0;
+ lp = 0;
+ pb = 0;
+ lstate = 0;
+ while (off < compressed_size)
+ {
+ unsigned char control;
+
+ range = 0xffffffff;
+ code = 0;
+
+ control = compressed[off];
+ ++off;
+ if (unlikely (control == 0))
+ {
+ /* End of packets. */
+ break;
+ }
+
+ if (control == 1 || control >= 0xe0)
+ {
+ /* Reset dictionary to empty. */
+ dict_start_offset = uncompressed_offset;
+ }
+
+ if (control < 0x80)
+ {
+ size_t chunk_size;
+
+ /* The only valid values here are 1 or 2. A 1 means to
+ reset the dictionary (done above). Then we see an
+ uncompressed chunk. */
+
+ if (unlikely (control > 2))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* An uncompressed chunk is a two byte size followed by
+ data. */
+
+ if (unlikely (off + 2 > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ chunk_size = compressed[off] << 8;
+ chunk_size += compressed[off + 1];
+ ++chunk_size;
+
+ off += 2;
+
+ if (unlikely (off + chunk_size > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ if (unlikely (uncompressed_offset + chunk_size > uncompressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ memcpy (uncompressed + uncompressed_offset, compressed + off,
+ chunk_size);
+ uncompressed_offset += chunk_size;
+ off += chunk_size;
+ }
+ else
+ {
+ size_t uncompressed_chunk_start;
+ size_t uncompressed_chunk_size;
+ size_t compressed_chunk_size;
+ size_t limit;
+
+ /* An LZMA chunk. This starts with an uncompressed size and
+ a compressed size. */
+
+ if (unlikely (off + 4 >= compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ uncompressed_chunk_start = uncompressed_offset;
+
+ uncompressed_chunk_size = (control & 0x1f) << 16;
+ uncompressed_chunk_size += compressed[off] << 8;
+ uncompressed_chunk_size += compressed[off + 1];
+ ++uncompressed_chunk_size;
+
+ compressed_chunk_size = compressed[off + 2] << 8;
+ compressed_chunk_size += compressed[off + 3];
+ ++compressed_chunk_size;
+
+ off += 4;
+
+ /* Bit 7 (0x80) is set.
+ Bits 6 and 5 (0x40 and 0x20) are as follows:
+ 0: don't reset anything
+ 1: reset state
+ 2: reset state, read properties
+ 3: reset state, read properties, reset dictionary (done above) */
+
+ if (control >= 0xc0)
+ {
+ unsigned char props;
+
+ /* Bit 6 is set, read properties. */
+
+ if (unlikely (off >= compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ props = compressed[off];
+ ++off;
+ if (unlikely (props > (4 * 5 + 4) * 9 + 8))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ pb = 0;
+ while (props >= 9 * 5)
+ {
+ props -= 9 * 5;
+ ++pb;
+ }
+ lp = 0;
+ while (props > 9)
+ {
+ props -= 9;
+ ++lp;
+ }
+ lc = props;
+ if (unlikely (lc + lp > 4))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ }
+
+ if (control >= 0xa0)
+ {
+ size_t i;
+
+ /* Bit 5 or 6 is set, reset LZMA state. */
+
+ lstate = 0;
+ memset (&dist, 0, sizeof dist);
+ for (i = 0; i < LZMA_PROB_TOTAL_COUNT; i++)
+ probs[i] = 1 << 10;
+ range = 0xffffffff;
+ code = 0;
+ }
+
+ /* Read the range code. */
+
+ if (unlikely (off + 5 > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* The byte at compressed[off] is ignored for some
+ reason. */
+
+ code = ((compressed[off + 1] << 24)
+ + (compressed[off + 2] << 16)
+ + (compressed[off + 3] << 8)
+ + compressed[off + 4]);
+ off += 5;
+
+ /* This is the main LZMA decode loop. */
+
+ limit = off + compressed_chunk_size;
+ *poffset = off;
+ while (*poffset < limit)
+ {
+ unsigned int pos_state;
+
+ if (unlikely (uncompressed_offset
+ == (uncompressed_chunk_start
+ + uncompressed_chunk_size)))
+ {
+ /* We've decompressed all the expected bytes. */
+ break;
+ }
+
+ pos_state = ((uncompressed_offset - dict_start_offset)
+ & ((1 << pb) - 1));
+
+ if (elf_lzma_bit (compressed, compressed_size,
+ probs + LZMA_IS_MATCH (lstate, pos_state),
+ poffset, &range, &code))
+ {
+ uint32_t len;
+
+ if (elf_lzma_bit (compressed, compressed_size,
+ probs + LZMA_IS_REP (lstate),
+ poffset, &range, &code))
+ {
+ int short_rep;
+ uint32_t next_dist;
+
+ /* Repeated match. */
+
+ short_rep = 0;
+ if (elf_lzma_bit (compressed, compressed_size,
+ probs + LZMA_IS_REP0 (lstate),
+ poffset, &range, &code))
+ {
+ if (elf_lzma_bit (compressed, compressed_size,
+ probs + LZMA_IS_REP1 (lstate),
+ poffset, &range, &code))
+ {
+ if (elf_lzma_bit (compressed, compressed_size,
+ probs + LZMA_IS_REP2 (lstate),
+ poffset, &range, &code))
+ {
+ next_dist = dist[3];
+ dist[3] = dist[2];
+ }
+ else
+ {
+ next_dist = dist[2];
+ }
+ dist[2] = dist[1];
+ }
+ else
+ {
+ next_dist = dist[1];
+ }
+
+ dist[1] = dist[0];
+ dist[0] = next_dist;
+ }
+ else
+ {
+ if (!elf_lzma_bit (compressed, compressed_size,
+ (probs
+ + LZMA_IS_REP0_LONG (lstate,
+ pos_state)),
+ poffset, &range, &code))
+ short_rep = 1;
+ }
+
+ if (lstate < 7)
+ lstate = short_rep ? 9 : 8;
+ else
+ lstate = 11;
+
+ if (short_rep)
+ len = 1;
+ else
+ len = elf_lzma_len (compressed, compressed_size,
+ probs, 1, pos_state, poffset,
+ &range, &code);
+ }
+ else
+ {
+ uint32_t dist_state;
+ uint32_t dist_slot;
+ uint16_t *probs_dist;
+
+ /* Match. */
+
+ if (lstate < 7)
+ lstate = 7;
+ else
+ lstate = 10;
+ dist[3] = dist[2];
+ dist[2] = dist[1];
+ dist[1] = dist[0];
+ len = elf_lzma_len (compressed, compressed_size,
+ probs, 0, pos_state, poffset,
+ &range, &code);
+
+ if (len < 4 + 2)
+ dist_state = len - 2;
+ else
+ dist_state = 3;
+ probs_dist = probs + LZMA_DIST_SLOT (dist_state, 0);
+ dist_slot = elf_lzma_integer (compressed,
+ compressed_size,
+ probs_dist, 6,
+ poffset, &range,
+ &code);
+ if (dist_slot < LZMA_DIST_MODEL_START)
+ dist[0] = dist_slot;
+ else
+ {
+ uint32_t limit;
+
+ limit = (dist_slot >> 1) - 1;
+ dist[0] = 2 + (dist_slot & 1);
+ if (dist_slot < LZMA_DIST_MODEL_END)
+ {
+ dist[0] <<= limit;
+ probs_dist = (probs
+ + LZMA_DIST_SPECIAL(dist[0]
+ - dist_slot
+ - 1));
+ dist[0] +=
+ elf_lzma_reverse_integer (compressed,
+ compressed_size,
+ probs_dist,
+ limit, poffset,
+ &range, &code);
+ }
+ else
+ {
+ uint32_t dist0;
+ uint32_t i;
+
+ dist0 = dist[0];
+ for (i = 0; i < limit - 4; i++)
+ {
+ uint32_t mask;
+
+ elf_lzma_range_normalize (compressed,
+ compressed_size,
+ poffset,
+ &range, &code);
+ range >>= 1;
+ code -= range;
+ mask = -(code >> 31);
+ code += range & mask;
+ dist0 <<= 1;
+ dist0 += mask + 1;
+ }
+ dist0 <<= 4;
+ probs_dist = probs + LZMA_DIST_ALIGN (0);
+ dist0 +=
+ elf_lzma_reverse_integer (compressed,
+ compressed_size,
+ probs_dist, 4,
+ poffset,
+ &range, &code);
+ dist[0] = dist0;
+ }
+ }
+ }
+
+ if (unlikely (uncompressed_offset
+ - dict_start_offset < dist[0] + 1))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ if (unlikely (uncompressed_offset + len > uncompressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ if (dist[0] == 0)
+ {
+ /* A common case, meaning repeat the last
+ character LEN times. */
+ memset (uncompressed + uncompressed_offset,
+ uncompressed[uncompressed_offset - 1],
+ len);
+ uncompressed_offset += len;
+ }
+ else if (dist[0] + 1 >= len)
+ {
+ memcpy (uncompressed + uncompressed_offset,
+ uncompressed + uncompressed_offset - dist[0] - 1,
+ len);
+ uncompressed_offset += len;
+ }
+ else
+ {
+ while (len > 0)
+ {
+ uint32_t copy;
+
+ copy = len < dist[0] + 1 ? len : dist[0] + 1;
+ memcpy (uncompressed + uncompressed_offset,
+ (uncompressed + uncompressed_offset
+ - dist[0] - 1),
+ copy);
+ len -= copy;
+ uncompressed_offset += copy;
+ }
+ }
+ }
+ else
+ {
+ unsigned char prev;
+ unsigned char low;
+ size_t high;
+ uint16_t *lit_probs;
+ unsigned int sym;
+
+ /* Literal value. */
+
+ if (uncompressed_offset > 0)
+ prev = uncompressed[uncompressed_offset - 1];
+ else
+ prev = 0;
+ low = prev >> (8 - lc);
+ high = (((uncompressed_offset - dict_start_offset)
+ & ((1 << lp) - 1))
+ << lc);
+ lit_probs = probs + LZMA_LITERAL (low + high, 0);
+ if (lstate < 7)
+ sym = elf_lzma_integer (compressed, compressed_size,
+ lit_probs, 8, poffset, &range,
+ &code);
+ else
+ {
+ unsigned int match;
+ unsigned int bit;
+ unsigned int match_bit;
+ unsigned int idx;
+
+ sym = 1;
+ if (uncompressed_offset >= dist[0] + 1)
+ match = uncompressed[uncompressed_offset - dist[0] - 1];
+ else
+ match = 0;
+ match <<= 1;
+ bit = 0x100;
+ do
+ {
+ match_bit = match & bit;
+ match <<= 1;
+ idx = bit + match_bit + sym;
+ sym <<= 1;
+ if (elf_lzma_bit (compressed, compressed_size,
+ lit_probs + idx, poffset,
+ &range, &code))
+ {
+ ++sym;
+ bit &= match_bit;
+ }
+ else
+ {
+ bit &= ~ match_bit;
+ }
+ }
+ while (sym < 0x100);
+ }
+
+ if (unlikely (uncompressed_offset >= uncompressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ uncompressed[uncompressed_offset] = (unsigned char) sym;
+ ++uncompressed_offset;
+ if (lstate <= 3)
+ lstate = 0;
+ else if (lstate <= 9)
+ lstate -= 3;
+ else
+ lstate -= 6;
+ }
+ }
+
+ elf_lzma_range_normalize (compressed, compressed_size, poffset,
+ &range, &code);
+
+ off = *poffset;
+ }
+ }
+
+ /* We have reached the end of the block. Pad to four byte
+ boundary. */
+ off = (off + 3) &~ (size_t) 3;
+ if (unlikely (off > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ switch (check)
+ {
+ case 0:
+ /* No check. */
+ break;
+
+ case 1:
+ /* CRC32 */
+ if (unlikely (off + 4 > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ computed_crc = elf_crc32 (0, uncompressed, uncompressed_offset);
+ stream_crc = (compressed[off]
+ | (compressed[off + 1] << 8)
+ | (compressed[off + 2] << 16)
+ | (compressed[off + 3] << 24));
+ if (computed_crc != stream_crc)
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ off += 4;
+ break;
+
+ case 4:
+ /* CRC64. We don't bother computing a CRC64 checksum. */
+ if (unlikely (off + 8 > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ off += 8;
+ break;
+
+ case 10:
+ /* SHA. We don't bother computing a SHA checksum. */
+ if (unlikely (off + 32 > compressed_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ off += 32;
+ break;
+
+ default:
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ *poffset = off;
+
+ return 1;
+}
+
+/* Uncompress LZMA data found in a minidebug file. The minidebug
+ format is described at
+ https://sourceware.org/gdb/current/onlinedocs/gdb/MiniDebugInfo.html.
+ Returns 0 on error, 1 on successful decompression. For this
+ function we return 0 on failure to decompress, as the calling code
+ will carry on in that case. */
+
+static int
+elf_uncompress_lzma (struct backtrace_state *state,
+ const unsigned char *compressed, size_t compressed_size,
+ backtrace_error_callback error_callback, void *data,
+ unsigned char **uncompressed, size_t *uncompressed_size)
+{
+ size_t header_size;
+ size_t footer_size;
+ unsigned char check;
+ uint32_t computed_crc;
+ uint32_t stream_crc;
+ size_t offset;
+ size_t index_size;
+ size_t footer_offset;
+ size_t index_offset;
+ uint64_t index_compressed_size;
+ uint64_t index_uncompressed_size;
+ unsigned char *mem;
+ uint16_t *probs;
+ size_t compressed_block_size;
+
+ /* The format starts with a stream header and ends with a stream
+ footer. */
+ header_size = 12;
+ footer_size = 12;
+ if (unlikely (compressed_size < header_size + footer_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* The stream header starts with a magic string. */
+ if (unlikely (memcmp (compressed, "\375" "7zXZ\0", 6) != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* Next come stream flags. The first byte is zero, the second byte
+ is the check. */
+ if (unlikely (compressed[6] != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ check = compressed[7];
+ if (unlikely ((check & 0xf8) != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* Next comes a CRC of the stream flags. */
+ computed_crc = elf_crc32 (0, compressed + 6, 2);
+ stream_crc = (compressed[8]
+ | (compressed[9] << 8)
+ | (compressed[10] << 16)
+ | (compressed[11] << 24));
+ if (unlikely (computed_crc != stream_crc))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* Now that we've parsed the header, parse the footer, so that we
+ can get the uncompressed size. */
+
+ /* The footer ends with two magic bytes. */
+
+ offset = compressed_size;
+ if (unlikely (memcmp (compressed + offset - 2, "YZ", 2) != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ offset -= 2;
+
+ /* Before that are the stream flags, which should be the same as the
+ flags in the header. */
+ if (unlikely (compressed[offset - 2] != 0
+ || compressed[offset - 1] != check))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ offset -= 2;
+
+ /* Before that is the size of the index field, which precedes the
+ footer. */
+ index_size = (compressed[offset - 4]
+ | (compressed[offset - 3] << 8)
+ | (compressed[offset - 2] << 16)
+ | (compressed[offset - 1] << 24));
+ index_size = (index_size + 1) * 4;
+ offset -= 4;
+
+ /* Before that is a footer CRC. */
+ computed_crc = elf_crc32 (0, compressed + offset, 6);
+ stream_crc = (compressed[offset - 4]
+ | (compressed[offset - 3] << 8)
+ | (compressed[offset - 2] << 16)
+ | (compressed[offset - 1] << 24));
+ if (unlikely (computed_crc != stream_crc))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ offset -= 4;
+
+ /* The index comes just before the footer. */
+ if (unlikely (offset < index_size + header_size))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ footer_offset = offset;
+ offset -= index_size;
+ index_offset = offset;
+
+ /* The index starts with a zero byte. */
+ if (unlikely (compressed[offset] != 0))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ ++offset;
+
+ /* Next is the number of blocks. We expect zero blocks for an empty
+ stream, and otherwise a single block. */
+ if (unlikely (compressed[offset] == 0))
+ {
+ *uncompressed = NULL;
+ *uncompressed_size = 0;
+ return 1;
+ }
+ if (unlikely (compressed[offset] != 1))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ ++offset;
+
+ /* Next is the compressed size and the uncompressed size. */
+ if (!elf_lzma_varint (compressed, compressed_size, &offset,
+ &index_compressed_size))
+ return 0;
+ if (!elf_lzma_varint (compressed, compressed_size, &offset,
+ &index_uncompressed_size))
+ return 0;
+
+ /* Pad to a four byte boundary. */
+ offset = (offset + 3) &~ (size_t) 3;
+
+ /* Next is a CRC of the index. */
+ computed_crc = elf_crc32 (0, compressed + index_offset,
+ offset - index_offset);
+ stream_crc = (compressed[offset]
+ | (compressed[offset + 1] << 8)
+ | (compressed[offset + 2] << 16)
+ | (compressed[offset + 3] << 24));
+ if (unlikely (computed_crc != stream_crc))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+ offset += 4;
+
+ /* We should now be back at the footer. */
+ if (unlikely (offset != footer_offset))
+ {
+ elf_uncompress_failed ();
+ return 0;
+ }
+
+ /* Allocate space to hold the uncompressed data. If we succeed in
+ uncompressing the LZMA data, we never free this memory. */
+ mem = (unsigned char *) backtrace_alloc (state, index_uncompressed_size,
+ error_callback, data);
+ if (unlikely (mem == NULL))
+ return 0;
+ *uncompressed = mem;
+ *uncompressed_size = index_uncompressed_size;
+
+ /* Allocate space for probabilities. */
+ probs = ((uint16_t *)
+ backtrace_alloc (state,
+ LZMA_PROB_TOTAL_COUNT * sizeof (uint16_t),
+ error_callback, data));
+ if (unlikely (probs == NULL))
+ {
+ backtrace_free (state, mem, index_uncompressed_size, error_callback,
+ data);
+ return 0;
+ }
+
+ /* Uncompress the block, which follows the header. */
+ offset = 12;
+ if (!elf_uncompress_lzma_block (compressed, compressed_size, check, probs,
+ mem, index_uncompressed_size, &offset))
+ {
+ backtrace_free (state, mem, index_uncompressed_size, error_callback,
+ data);
+ return 0;
+ }
+
+ compressed_block_size = offset - 12;
+ if (unlikely (compressed_block_size
+ != ((index_compressed_size + 3) &~ (size_t) 3)))
+ {
+ elf_uncompress_failed ();
+ backtrace_free (state, mem, index_uncompressed_size, error_callback,
+ data);
+ return 0;
+ }
+
+ offset = (offset + 3) &~ (size_t) 3;
+ if (unlikely (offset != index_offset))
+ {
+ elf_uncompress_failed ();
+ backtrace_free (state, mem, index_uncompressed_size, error_callback,
+ data);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* This function is a hook for testing the LZMA support. It is only
+ used by tests. */
+
+int
+backtrace_uncompress_lzma (struct backtrace_state *state,
+ const unsigned char *compressed,
+ size_t compressed_size,
+ backtrace_error_callback error_callback,
+ void *data, unsigned char **uncompressed,
+ size_t *uncompressed_size)
+{
+ return elf_uncompress_lzma (state, compressed, compressed_size,
+ error_callback, data, uncompressed,
+ uncompressed_size);
+}
+
+/* Add the backtrace data for one ELF file. Returns 1 on success,
+ 0 on failure (in both cases descriptor is closed) or -1 if exe
+ is non-zero and the ELF file is ET_DYN, which tells the caller that
+ elf_add will need to be called on the descriptor again after
+ base_address is determined. */
+
+static int
+elf_add (struct backtrace_state *state, const char *filename, int descriptor,
+ const unsigned char *memory, size_t memory_size,
+ uintptr_t base_address, backtrace_error_callback error_callback,
+ void *data, fileline *fileline_fn, int *found_sym, int *found_dwarf,
+ struct dwarf_data **fileline_entry, int exe, int debuginfo,
+ const char *with_buildid_data, uint32_t with_buildid_size)
+{
+ struct elf_view ehdr_view;
+ b_elf_ehdr ehdr;
+ off_t shoff;
+ unsigned int shnum;
+ unsigned int shstrndx;
+ struct elf_view shdrs_view;
+ int shdrs_view_valid;
+ const b_elf_shdr *shdrs;
+ const b_elf_shdr *shstrhdr;
+ size_t shstr_size;
+ off_t shstr_off;
+ struct elf_view names_view;
+ int names_view_valid;
+ const char *names;
+ unsigned int symtab_shndx;
+ unsigned int dynsym_shndx;
+ unsigned int i;
+ struct debug_section_info sections[DEBUG_MAX];
+ struct debug_section_info zsections[DEBUG_MAX];
+ struct elf_view symtab_view;
+ int symtab_view_valid;
+ struct elf_view strtab_view;
+ int strtab_view_valid;
+ struct elf_view buildid_view;
+ int buildid_view_valid;
+ const char *buildid_data;
+ uint32_t buildid_size;
+ struct elf_view debuglink_view;
+ int debuglink_view_valid;
+ const char *debuglink_name;
+ uint32_t debuglink_crc;
+ struct elf_view debugaltlink_view;
+ int debugaltlink_view_valid;
+ const char *debugaltlink_name;
+ const char *debugaltlink_buildid_data;
+ uint32_t debugaltlink_buildid_size;
+ struct elf_view gnu_debugdata_view;
+ int gnu_debugdata_view_valid;
+ size_t gnu_debugdata_size;
+ unsigned char *gnu_debugdata_uncompressed;
+ size_t gnu_debugdata_uncompressed_size;
+ off_t min_offset;
+ off_t max_offset;
+ off_t debug_size;
+ struct elf_view debug_view;
+ int debug_view_valid;
+ unsigned int using_debug_view;
+ uint16_t *zdebug_table;
+ struct elf_view split_debug_view[DEBUG_MAX];
+ unsigned char split_debug_view_valid[DEBUG_MAX];
+ struct elf_ppc64_opd_data opd_data, *opd;
+ struct dwarf_sections dwarf_sections;
+ struct dwarf_data *fileline_altlink = NULL;
+
+ if (!debuginfo)
+ {
+ *found_sym = 0;
+ *found_dwarf = 0;
+ }
+
+ shdrs_view_valid = 0;
+ names_view_valid = 0;
+ symtab_view_valid = 0;
+ strtab_view_valid = 0;
+ buildid_view_valid = 0;
+ buildid_data = NULL;
+ buildid_size = 0;
+ debuglink_view_valid = 0;
+ debuglink_name = NULL;
+ debuglink_crc = 0;
+ debugaltlink_view_valid = 0;
+ debugaltlink_name = NULL;
+ debugaltlink_buildid_data = NULL;
+ debugaltlink_buildid_size = 0;
+ gnu_debugdata_view_valid = 0;
+ gnu_debugdata_size = 0;
+ debug_view_valid = 0;
+ memset (&split_debug_view_valid[0], 0, sizeof split_debug_view_valid);
+ opd = NULL;
+
+ if (!elf_get_view (state, descriptor, memory, memory_size, 0, sizeof ehdr,
+ error_callback, data, &ehdr_view))
+ goto fail;
+
+ memcpy (&ehdr, ehdr_view.view.data, sizeof ehdr);
+
+ elf_release_view (state, &ehdr_view, error_callback, data);
+
+ if (ehdr.e_ident[EI_MAG0] != ELFMAG0
+ || ehdr.e_ident[EI_MAG1] != ELFMAG1
+ || ehdr.e_ident[EI_MAG2] != ELFMAG2
+ || ehdr.e_ident[EI_MAG3] != ELFMAG3)
+ {
+ error_callback (data, "executable file is not ELF", 0);
+ goto fail;
+ }
+ if (ehdr.e_ident[EI_VERSION] != EV_CURRENT)
+ {
+ error_callback (data, "executable file is unrecognized ELF version", 0);
+ goto fail;
+ }
+
+#if BACKTRACE_ELF_SIZE == 32
+#define BACKTRACE_ELFCLASS ELFCLASS32
+#else
+#define BACKTRACE_ELFCLASS ELFCLASS64
+#endif
+
+ if (ehdr.e_ident[EI_CLASS] != BACKTRACE_ELFCLASS)
+ {
+ error_callback (data, "executable file is unexpected ELF class", 0);
+ goto fail;
+ }
+
+ if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB
+ && ehdr.e_ident[EI_DATA] != ELFDATA2MSB)
+ {
+ error_callback (data, "executable file has unknown endianness", 0);
+ goto fail;
+ }
+
+ /* If the executable is ET_DYN, it is either a PIE, or we are running
+ directly a shared library with .interp. We need to wait for
+ dl_iterate_phdr in that case to determine the actual base_address. */
+ if (exe && ehdr.e_type == ET_DYN)
+ return -1;
+
+ shoff = ehdr.e_shoff;
+ shnum = ehdr.e_shnum;
+ shstrndx = ehdr.e_shstrndx;
+
+ if ((shnum == 0 || shstrndx == SHN_XINDEX)
+ && shoff != 0)
+ {
+ struct elf_view shdr_view;
+ const b_elf_shdr *shdr;
+
+ if (!elf_get_view (state, descriptor, memory, memory_size, shoff,
+ sizeof shdr, error_callback, data, &shdr_view))
+ goto fail;
+
+ shdr = (const b_elf_shdr *) shdr_view.view.data;
+
+ if (shnum == 0)
+ shnum = shdr->sh_size;
+
+ if (shstrndx == SHN_XINDEX)
+ {
+ shstrndx = shdr->sh_link;
+
+ /* Versions of the GNU binutils between 2.12 and 2.18 did
+ not handle objects with more than SHN_LORESERVE sections
+ correctly. All large section indexes were offset by
+ 0x100. There is more information at
+ http://sourceware.org/bugzilla/show_bug.cgi?id-5900 .
+ Fortunately these object files are easy to detect, as the
+ GNU binutils always put the section header string table
+ near the end of the list of sections. Thus if the
+ section header string table index is larger than the
+ number of sections, then we know we have to subtract
+ 0x100 to get the real section index. */
+ if (shstrndx >= shnum && shstrndx >= SHN_LORESERVE + 0x100)
+ shstrndx -= 0x100;
+ }
+
+ elf_release_view (state, &shdr_view, error_callback, data);
+ }
+
+ if (shnum == 0 || shstrndx == 0)
+ goto fail;
+
+ /* To translate PC to file/line when using DWARF, we need to find
+ the .debug_info and .debug_line sections. */
+
+ /* Read the section headers, skipping the first one. */
+
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ shoff + sizeof (b_elf_shdr),
+ (shnum - 1) * sizeof (b_elf_shdr),
+ error_callback, data, &shdrs_view))
+ goto fail;
+ shdrs_view_valid = 1;
+ shdrs = (const b_elf_shdr *) shdrs_view.view.data;
+
+ /* Read the section names. */
+
+ shstrhdr = &shdrs[shstrndx - 1];
+ shstr_size = shstrhdr->sh_size;
+ shstr_off = shstrhdr->sh_offset;
+
+ if (!elf_get_view (state, descriptor, memory, memory_size, shstr_off,
+ shstrhdr->sh_size, error_callback, data, &names_view))
+ goto fail;
+ names_view_valid = 1;
+ names = (const char *) names_view.view.data;
+
+ symtab_shndx = 0;
+ dynsym_shndx = 0;
+
+ memset (sections, 0, sizeof sections);
+ memset (zsections, 0, sizeof zsections);
+
+ /* Look for the symbol table. */
+ for (i = 1; i < shnum; ++i)
+ {
+ const b_elf_shdr *shdr;
+ unsigned int sh_name;
+ const char *name;
+ int j;
+
+ shdr = &shdrs[i - 1];
+
+ if (shdr->sh_type == SHT_SYMTAB)
+ symtab_shndx = i;
+ else if (shdr->sh_type == SHT_DYNSYM)
+ dynsym_shndx = i;
+
+ sh_name = shdr->sh_name;
+ if (sh_name >= shstr_size)
+ {
+ error_callback (data, "ELF section name out of range", 0);
+ goto fail;
+ }
+
+ name = names + sh_name;
+
+ for (j = 0; j < (int) DEBUG_MAX; ++j)
+ {
+ if (strcmp (name, dwarf_section_names[j]) == 0)
+ {
+ sections[j].offset = shdr->sh_offset;
+ sections[j].size = shdr->sh_size;
+ sections[j].compressed = (shdr->sh_flags & SHF_COMPRESSED) != 0;
+ break;
+ }
+ }
+
+ if (name[0] == '.' && name[1] == 'z')
+ {
+ for (j = 0; j < (int) DEBUG_MAX; ++j)
+ {
+ if (strcmp (name + 2, dwarf_section_names[j] + 1) == 0)
+ {
+ zsections[j].offset = shdr->sh_offset;
+ zsections[j].size = shdr->sh_size;
+ break;
+ }
+ }
+ }
+
+ /* Read the build ID if present. This could check for any
+ SHT_NOTE section with the right note name and type, but gdb
+ looks for a specific section name. */
+ if ((!debuginfo || with_buildid_data != NULL)
+ && !buildid_view_valid
+ && strcmp (name, ".note.gnu.build-id") == 0)
+ {
+ const b_elf_note *note;
+
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ shdr->sh_offset, shdr->sh_size, error_callback,
+ data, &buildid_view))
+ goto fail;
+
+ buildid_view_valid = 1;
+ note = (const b_elf_note *) buildid_view.view.data;
+ if (note->type == NT_GNU_BUILD_ID
+ && note->namesz == 4
+ && strncmp (note->name, "GNU", 4) == 0
+ && shdr->sh_size <= 12 + ((note->namesz + 3) & ~ 3) + note->descsz)
+ {
+ buildid_data = &note->name[0] + ((note->namesz + 3) & ~ 3);
+ buildid_size = note->descsz;
+ }
+
+ if (with_buildid_size != 0)
+ {
+ if (buildid_size != with_buildid_size)
+ goto fail;
+
+ if (memcmp (buildid_data, with_buildid_data, buildid_size) != 0)
+ goto fail;
+ }
+ }
+
+ /* Read the debuglink file if present. */
+ if (!debuginfo
+ && !debuglink_view_valid
+ && strcmp (name, ".gnu_debuglink") == 0)
+ {
+ const char *debuglink_data;
+ size_t crc_offset;
+
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ shdr->sh_offset, shdr->sh_size, error_callback,
+ data, &debuglink_view))
+ goto fail;
+
+ debuglink_view_valid = 1;
+ debuglink_data = (const char *) debuglink_view.view.data;
+ crc_offset = strnlen (debuglink_data, shdr->sh_size);
+ crc_offset = (crc_offset + 3) & ~3;
+ if (crc_offset + 4 <= shdr->sh_size)
+ {
+ debuglink_name = debuglink_data;
+ debuglink_crc = *(const uint32_t*)(debuglink_data + crc_offset);
+ }
+ }
+
+ if (!debugaltlink_view_valid
+ && strcmp (name, ".gnu_debugaltlink") == 0)
+ {
+ const char *debugaltlink_data;
+ size_t debugaltlink_name_len;
+
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ shdr->sh_offset, shdr->sh_size, error_callback,
+ data, &debugaltlink_view))
+ goto fail;
+
+ debugaltlink_view_valid = 1;
+ debugaltlink_data = (const char *) debugaltlink_view.view.data;
+ debugaltlink_name = debugaltlink_data;
+ debugaltlink_name_len = strnlen (debugaltlink_data, shdr->sh_size);
+ if (debugaltlink_name_len < shdr->sh_size)
+ {
+ /* Include terminating zero. */
+ debugaltlink_name_len += 1;
+
+ debugaltlink_buildid_data
+ = debugaltlink_data + debugaltlink_name_len;
+ debugaltlink_buildid_size = shdr->sh_size - debugaltlink_name_len;
+ }
+ }
+
+ if (!gnu_debugdata_view_valid
+ && strcmp (name, ".gnu_debugdata") == 0)
+ {
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ shdr->sh_offset, shdr->sh_size, error_callback,
+ data, &gnu_debugdata_view))
+ goto fail;
+
+ gnu_debugdata_size = shdr->sh_size;
+ gnu_debugdata_view_valid = 1;
+ }
+
+ /* Read the .opd section on PowerPC64 ELFv1. */
+ if (ehdr.e_machine == EM_PPC64
+ && (ehdr.e_flags & EF_PPC64_ABI) < 2
+ && shdr->sh_type == SHT_PROGBITS
+ && strcmp (name, ".opd") == 0)
+ {
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ shdr->sh_offset, shdr->sh_size, error_callback,
+ data, &opd_data.view))
+ goto fail;
+
+ opd = &opd_data;
+ opd->addr = shdr->sh_addr;
+ opd->data = (const char *) opd_data.view.view.data;
+ opd->size = shdr->sh_size;
+ }
+ }
+
+ if (symtab_shndx == 0)
+ symtab_shndx = dynsym_shndx;
+ if (symtab_shndx != 0 && !debuginfo)
+ {
+ const b_elf_shdr *symtab_shdr;
+ unsigned int strtab_shndx;
+ const b_elf_shdr *strtab_shdr;
+ struct elf_syminfo_data *sdata;
+
+ symtab_shdr = &shdrs[symtab_shndx - 1];
+ strtab_shndx = symtab_shdr->sh_link;
+ if (strtab_shndx >= shnum)
+ {
+ error_callback (data,
+ "ELF symbol table strtab link out of range", 0);
+ goto fail;
+ }
+ strtab_shdr = &shdrs[strtab_shndx - 1];
+
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ symtab_shdr->sh_offset, symtab_shdr->sh_size,
+ error_callback, data, &symtab_view))
+ goto fail;
+ symtab_view_valid = 1;
+
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ strtab_shdr->sh_offset, strtab_shdr->sh_size,
+ error_callback, data, &strtab_view))
+ goto fail;
+ strtab_view_valid = 1;
+
+ sdata = ((struct elf_syminfo_data *)
+ backtrace_alloc (state, sizeof *sdata, error_callback, data));
+ if (sdata == NULL)
+ goto fail;
+
+ if (!elf_initialize_syminfo (state, base_address,
+ (const unsigned char*)symtab_view.view.data, symtab_shdr->sh_size,
+ (const unsigned char*)strtab_view.view.data, strtab_shdr->sh_size,
+ error_callback, data, sdata, opd))
+ {
+ backtrace_free (state, sdata, sizeof *sdata, error_callback, data);
+ goto fail;
+ }
+
+ /* We no longer need the symbol table, but we hold on to the
+ string table permanently. */
+ elf_release_view (state, &symtab_view, error_callback, data);
+ symtab_view_valid = 0;
+ strtab_view_valid = 0;
+
+ *found_sym = 1;
+
+ elf_add_syminfo_data (state, sdata);
+ }
+
+ elf_release_view (state, &shdrs_view, error_callback, data);
+ shdrs_view_valid = 0;
+ elf_release_view (state, &names_view, error_callback, data);
+ names_view_valid = 0;
+
+ /* If the debug info is in a separate file, read that one instead. */
+
+ if (buildid_data != NULL)
+ {
+ int d;
+
+ d = elf_open_debugfile_by_buildid (state, buildid_data, buildid_size,
+ error_callback, data);
+ if (d >= 0)
+ {
+ int ret;
+
+ elf_release_view (state, &buildid_view, error_callback, data);
+ if (debuglink_view_valid)
+ elf_release_view (state, &debuglink_view, error_callback, data);
+ if (debugaltlink_view_valid)
+ elf_release_view (state, &debugaltlink_view, error_callback, data);
+ ret = elf_add (state, "", d, NULL, 0, base_address, error_callback,
+ data, fileline_fn, found_sym, found_dwarf, NULL, 0,
+ 1, NULL, 0);
+ if (ret < 0)
+ backtrace_close (d, error_callback, data);
+ else if (descriptor >= 0)
+ backtrace_close (descriptor, error_callback, data);
+ return ret;
+ }
+ }
+
+ if (buildid_view_valid)
+ {
+ elf_release_view (state, &buildid_view, error_callback, data);
+ buildid_view_valid = 0;
+ }
+
+ if (opd)
+ {
+ elf_release_view (state, &opd->view, error_callback, data);
+ opd = NULL;
+ }
+
+ if (debuglink_name != NULL)
+ {
+ int d;
+
+ d = elf_open_debugfile_by_debuglink (state, filename, debuglink_name,
+ debuglink_crc, error_callback,
+ data);
+ if (d >= 0)
+ {
+ int ret;
+
+ elf_release_view (state, &debuglink_view, error_callback, data);
+ if (debugaltlink_view_valid)
+ elf_release_view (state, &debugaltlink_view, error_callback, data);
+ ret = elf_add (state, "", d, NULL, 0, base_address, error_callback,
+ data, fileline_fn, found_sym, found_dwarf, NULL, 0,
+ 1, NULL, 0);
+ if (ret < 0)
+ backtrace_close (d, error_callback, data);
+ else if (descriptor >= 0)
+ backtrace_close(descriptor, error_callback, data);
+ return ret;
+ }
+ }
+
+ if (debuglink_view_valid)
+ {
+ elf_release_view (state, &debuglink_view, error_callback, data);
+ debuglink_view_valid = 0;
+ }
+
+ if (debugaltlink_name != NULL)
+ {
+ int d;
+
+ d = elf_open_debugfile_by_debuglink (state, filename, debugaltlink_name,
+ 0, error_callback, data);
+ if (d >= 0)
+ {
+ int ret;
+
+ ret = elf_add (state, filename, d, NULL, 0, base_address,
+ error_callback, data, fileline_fn, found_sym,
+ found_dwarf, &fileline_altlink, 0, 1,
+ debugaltlink_buildid_data, debugaltlink_buildid_size);
+ elf_release_view (state, &debugaltlink_view, error_callback, data);
+ debugaltlink_view_valid = 0;
+ if (ret < 0)
+ {
+ backtrace_close (d, error_callback, data);
+ return ret;
+ }
+ }
+ }
+
+ if (debugaltlink_view_valid)
+ {
+ elf_release_view (state, &debugaltlink_view, error_callback, data);
+ debugaltlink_view_valid = 0;
+ }
+
+ if (gnu_debugdata_view_valid)
+ {
+ int ret;
+
+ ret = elf_uncompress_lzma (state,
+ ((const unsigned char *)
+ gnu_debugdata_view.view.data),
+ gnu_debugdata_size, error_callback, data,
+ &gnu_debugdata_uncompressed,
+ &gnu_debugdata_uncompressed_size);
+
+ elf_release_view (state, &gnu_debugdata_view, error_callback, data);
+ gnu_debugdata_view_valid = 0;
+
+ if (ret)
+ {
+ ret = elf_add (state, filename, -1, gnu_debugdata_uncompressed,
+ gnu_debugdata_uncompressed_size, base_address,
+ error_callback, data, fileline_fn, found_sym,
+ found_dwarf, NULL, 0, 0, NULL, 0);
+ if (ret >= 0 && descriptor >= 0)
+ backtrace_close(descriptor, error_callback, data);
+ return ret;
+ }
+ }
+
+ /* Read all the debug sections in a single view, since they are
+ probably adjacent in the file. If any of sections are
+ uncompressed, we never release this view. */
+
+ min_offset = 0;
+ max_offset = 0;
+ debug_size = 0;
+ for (i = 0; i < (int) DEBUG_MAX; ++i)
+ {
+ off_t end;
+
+ if (sections[i].size != 0)
+ {
+ if (min_offset == 0 || sections[i].offset < min_offset)
+ min_offset = sections[i].offset;
+ end = sections[i].offset + sections[i].size;
+ if (end > max_offset)
+ max_offset = end;
+ debug_size += sections[i].size;
+ }
+ if (zsections[i].size != 0)
+ {
+ if (min_offset == 0 || zsections[i].offset < min_offset)
+ min_offset = zsections[i].offset;
+ end = zsections[i].offset + zsections[i].size;
+ if (end > max_offset)
+ max_offset = end;
+ debug_size += zsections[i].size;
+ }
+ }
+ if (min_offset == 0 || max_offset == 0)
+ {
+ if (descriptor >= 0)
+ {
+ if (!backtrace_close (descriptor, error_callback, data))
+ goto fail;
+ }
+ return 1;
+ }
+
+ /* If the total debug section size is large, assume that there are
+ gaps between the sections, and read them individually. */
+
+ if (max_offset - min_offset < 0x20000000
+ || max_offset - min_offset < debug_size + 0x10000)
+ {
+ if (!elf_get_view (state, descriptor, memory, memory_size, min_offset,
+ max_offset - min_offset, error_callback, data,
+ &debug_view))
+ goto fail;
+ debug_view_valid = 1;
+ }
+ else
+ {
+ memset (&split_debug_view[0], 0, sizeof split_debug_view);
+ for (i = 0; i < (int) DEBUG_MAX; ++i)
+ {
+ struct debug_section_info *dsec;
+
+ if (sections[i].size != 0)
+ dsec = &sections[i];
+ else if (zsections[i].size != 0)
+ dsec = &zsections[i];
+ else
+ continue;
+
+ if (!elf_get_view (state, descriptor, memory, memory_size,
+ dsec->offset, dsec->size, error_callback, data,
+ &split_debug_view[i]))
+ goto fail;
+ split_debug_view_valid[i] = 1;
+
+ if (sections[i].size != 0)
+ sections[i].data = ((const unsigned char *)
+ split_debug_view[i].view.data);
+ else
+ zsections[i].data = ((const unsigned char *)
+ split_debug_view[i].view.data);
+ }
+ }
+
+ /* We've read all we need from the executable. */
+ if (descriptor >= 0)
+ {
+ if (!backtrace_close (descriptor, error_callback, data))
+ goto fail;
+ descriptor = -1;
+ }
+
+ using_debug_view = 0;
+ if (debug_view_valid)
+ {
+ for (i = 0; i < (int) DEBUG_MAX; ++i)
+ {
+ if (sections[i].size == 0)
+ sections[i].data = NULL;
+ else
+ {
+ sections[i].data = ((const unsigned char *) debug_view.view.data
+ + (sections[i].offset - min_offset));
+ ++using_debug_view;
+ }
+
+ if (zsections[i].size == 0)
+ zsections[i].data = NULL;
+ else
+ zsections[i].data = ((const unsigned char *) debug_view.view.data
+ + (zsections[i].offset - min_offset));
+ }
+ }
+
+ /* Uncompress the old format (--compress-debug-sections=zlib-gnu). */
+
+ zdebug_table = NULL;
+ for (i = 0; i < (int) DEBUG_MAX; ++i)
+ {
+ if (sections[i].size == 0 && zsections[i].size > 0)
+ {
+ unsigned char *uncompressed_data;
+ size_t uncompressed_size;
+
+ if (zdebug_table == NULL)
+ {
+ zdebug_table = ((uint16_t *)
+ backtrace_alloc (state, ZDEBUG_TABLE_SIZE,
+ error_callback, data));
+ if (zdebug_table == NULL)
+ goto fail;
+ }
+
+ uncompressed_data = NULL;
+ uncompressed_size = 0;
+ if (!elf_uncompress_zdebug (state, zsections[i].data,
+ zsections[i].size, zdebug_table,
+ error_callback, data,
+ &uncompressed_data, &uncompressed_size))
+ goto fail;
+ sections[i].data = uncompressed_data;
+ sections[i].size = uncompressed_size;
+ sections[i].compressed = 0;
+
+ if (split_debug_view_valid[i])
+ {
+ elf_release_view (state, &split_debug_view[i],
+ error_callback, data);
+ split_debug_view_valid[i] = 0;
+ }
+ }
+ }
+
+ /* Uncompress the official ELF format
+ (--compress-debug-sections=zlib-gabi). */
+ for (i = 0; i < (int) DEBUG_MAX; ++i)
+ {
+ unsigned char *uncompressed_data;
+ size_t uncompressed_size;
+
+ if (sections[i].size == 0 || !sections[i].compressed)
+ continue;
+
+ if (zdebug_table == NULL)
+ {
+ zdebug_table = ((uint16_t *)
+ backtrace_alloc (state, ZDEBUG_TABLE_SIZE,
+ error_callback, data));
+ if (zdebug_table == NULL)
+ goto fail;
+ }
+
+ uncompressed_data = NULL;
+ uncompressed_size = 0;
+ if (!elf_uncompress_chdr (state, sections[i].data, sections[i].size,
+ zdebug_table, error_callback, data,
+ &uncompressed_data, &uncompressed_size))
+ goto fail;
+ sections[i].data = uncompressed_data;
+ sections[i].size = uncompressed_size;
+ sections[i].compressed = 0;
+
+ if (debug_view_valid)
+ --using_debug_view;
+ else if (split_debug_view_valid[i])
+ {
+ elf_release_view (state, &split_debug_view[i], error_callback, data);
+ split_debug_view_valid[i] = 0;
+ }
+ }
+
+ if (zdebug_table != NULL)
+ backtrace_free (state, zdebug_table, ZDEBUG_TABLE_SIZE,
+ error_callback, data);
+
+ if (debug_view_valid && using_debug_view == 0)
+ {
+ elf_release_view (state, &debug_view, error_callback, data);
+ debug_view_valid = 0;
+ }
+
+ for (i = 0; i < (int) DEBUG_MAX; ++i)
+ {
+ dwarf_sections.data[i] = sections[i].data;
+ dwarf_sections.size[i] = sections[i].size;
+ }
+
+ if (!backtrace_dwarf_add (state, base_address, &dwarf_sections,
+ ehdr.e_ident[EI_DATA] == ELFDATA2MSB,
+ fileline_altlink,
+ error_callback, data, fileline_fn,
+ fileline_entry))
+ goto fail;
+
+ *found_dwarf = 1;
+
+ return 1;
+
+ fail:
+ if (shdrs_view_valid)
+ elf_release_view (state, &shdrs_view, error_callback, data);
+ if (names_view_valid)
+ elf_release_view (state, &names_view, error_callback, data);
+ if (symtab_view_valid)
+ elf_release_view (state, &symtab_view, error_callback, data);
+ if (strtab_view_valid)
+ elf_release_view (state, &strtab_view, error_callback, data);
+ if (debuglink_view_valid)
+ elf_release_view (state, &debuglink_view, error_callback, data);
+ if (debugaltlink_view_valid)
+ elf_release_view (state, &debugaltlink_view, error_callback, data);
+ if (gnu_debugdata_view_valid)
+ elf_release_view (state, &gnu_debugdata_view, error_callback, data);
+ if (buildid_view_valid)
+ elf_release_view (state, &buildid_view, error_callback, data);
+ if (debug_view_valid)
+ elf_release_view (state, &debug_view, error_callback, data);
+ for (i = 0; i < (int) DEBUG_MAX; ++i)
+ {
+ if (split_debug_view_valid[i])
+ elf_release_view (state, &split_debug_view[i], error_callback, data);
+ }
+ if (opd)
+ elf_release_view (state, &opd->view, error_callback, data);
+ if (descriptor >= 0)
+ backtrace_close (descriptor, error_callback, data);
+ return 0;
+}
+
+/* Data passed to phdr_callback. */
+
+struct phdr_data
+{
+ struct backtrace_state *state;
+ backtrace_error_callback error_callback;
+ void *data;
+ fileline *fileline_fn;
+ int *found_sym;
+ int *found_dwarf;
+ const char *exe_filename;
+ int exe_descriptor;
+};
+
+/* Callback passed to dl_iterate_phdr. Load debug info from shared
+ libraries. */
+
+static int
+#ifdef __i386__
+__attribute__ ((__force_align_arg_pointer__))
+#endif
+phdr_callback (struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED,
+ void *pdata)
+{
+ struct phdr_data *pd = (struct phdr_data *) pdata;
+ const char *filename;
+ int descriptor;
+ int does_not_exist;
+ fileline elf_fileline_fn;
+ int found_dwarf;
+
+ /* There is not much we can do if we don't have the module name,
+ unless executable is ET_DYN, where we expect the very first
+ phdr_callback to be for the PIE. */
+ if (info->dlpi_name == NULL || info->dlpi_name[0] == '\0')
+ {
+ if (pd->exe_descriptor == -1)
+ return 0;
+ filename = pd->exe_filename;
+ descriptor = pd->exe_descriptor;
+ pd->exe_descriptor = -1;
+ }
+ else
+ {
+ if (pd->exe_descriptor != -1)
+ {
+ backtrace_close (pd->exe_descriptor, pd->error_callback, pd->data);
+ pd->exe_descriptor = -1;
+ }
+
+ filename = info->dlpi_name;
+ descriptor = backtrace_open (info->dlpi_name, pd->error_callback,
+ pd->data, &does_not_exist);
+ if (descriptor < 0)
+ return 0;
+ }
+
+ if (elf_add (pd->state, filename, descriptor, NULL, 0, info->dlpi_addr,
+ pd->error_callback, pd->data, &elf_fileline_fn, pd->found_sym,
+ &found_dwarf, NULL, 0, 0, NULL, 0))
+ {
+ if (found_dwarf)
+ {
+ *pd->found_dwarf = 1;
+ *pd->fileline_fn = elf_fileline_fn;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize the backtrace data we need from an ELF executable. At
+ the ELF level, all we need to do is find the debug info
+ sections. */
+
+int
+backtrace_initialize (struct backtrace_state *state, const char *filename,
+ int descriptor, backtrace_error_callback error_callback,
+ void *data, fileline *fileline_fn)
+{
+ int ret;
+ int found_sym;
+ int found_dwarf;
+ fileline elf_fileline_fn = elf_nodebug;
+ struct phdr_data pd;
+
+ ret = elf_add (state, filename, descriptor, NULL, 0, 0, error_callback, data,
+ &elf_fileline_fn, &found_sym, &found_dwarf, NULL, 1, 0, NULL,
+ 0);
+ if (!ret)
+ return 0;
+
+ pd.state = state;
+ pd.error_callback = error_callback;
+ pd.data = data;
+ pd.fileline_fn = &elf_fileline_fn;
+ pd.found_sym = &found_sym;
+ pd.found_dwarf = &found_dwarf;
+ pd.exe_filename = filename;
+ pd.exe_descriptor = ret < 0 ? descriptor : -1;
+
+ dl_iterate_phdr (phdr_callback, (void *) &pd);
+
+ if (!state->threaded)
+ {
+ if (found_sym)
+ state->syminfo_fn = elf_syminfo;
+ else if (state->syminfo_fn == NULL)
+ state->syminfo_fn = elf_nosyms;
+ }
+ else
+ {
+ if (found_sym)
+ backtrace_atomic_store_pointer (&state->syminfo_fn, &elf_syminfo);
+ else
+ (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL,
+ elf_nosyms);
+ }
+
+ if (!state->threaded)
+ *fileline_fn = state->fileline_fn;
+ else
+ *fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn);
+
+ if (*fileline_fn == NULL || *fileline_fn == elf_nodebug)
+ *fileline_fn = elf_fileline_fn;
+
+ return 1;
+}
+
+}
diff --git a/3rdparty/tracy/tracy/libbacktrace/fileline.cpp b/3rdparty/tracy/tracy/libbacktrace/fileline.cpp
new file mode 100644
index 0000000..8645d75
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/fileline.cpp
@@ -0,0 +1,351 @@
+/* fileline.c -- Get file and line number information in a backtrace.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#if defined (HAVE_KERN_PROC_ARGS) || defined (HAVE_KERN_PROC)
+#include <sys/sysctl.h>
+#endif
+
+#ifdef HAVE_MACH_O_DYLD_H
+#include <mach-o/dyld.h>
+#endif
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+#ifndef HAVE_GETEXECNAME
+#define getexecname() NULL
+#endif
+
+namespace tracy
+{
+
+#if !defined (HAVE_KERN_PROC_ARGS) && !defined (HAVE_KERN_PROC)
+
+#define sysctl_exec_name1(state, error_callback, data) NULL
+#define sysctl_exec_name2(state, error_callback, data) NULL
+
+#else /* defined (HAVE_KERN_PROC_ARGS) || |defined (HAVE_KERN_PROC) */
+
+static char *
+sysctl_exec_name (struct backtrace_state *state,
+ int mib0, int mib1, int mib2, int mib3,
+ backtrace_error_callback error_callback, void *data)
+{
+ int mib[4];
+ size_t len;
+ char *name;
+ size_t rlen;
+
+ mib[0] = mib0;
+ mib[1] = mib1;
+ mib[2] = mib2;
+ mib[3] = mib3;
+
+ if (sysctl (mib, 4, NULL, &len, NULL, 0) < 0)
+ return NULL;
+ name = (char *) backtrace_alloc (state, len, error_callback, data);
+ if (name == NULL)
+ return NULL;
+ rlen = len;
+ if (sysctl (mib, 4, name, &rlen, NULL, 0) < 0)
+ {
+ backtrace_free (state, name, len, error_callback, data);
+ return NULL;
+ }
+ return name;
+}
+
+#ifdef HAVE_KERN_PROC_ARGS
+
+static char *
+sysctl_exec_name1 (struct backtrace_state *state,
+ backtrace_error_callback error_callback, void *data)
+{
+ /* This variant is used on NetBSD. */
+ return sysctl_exec_name (state, CTL_KERN, KERN_PROC_ARGS, -1,
+ KERN_PROC_PATHNAME, error_callback, data);
+}
+
+#else
+
+#define sysctl_exec_name1(state, error_callback, data) NULL
+
+#endif
+
+#ifdef HAVE_KERN_PROC
+
+static char *
+sysctl_exec_name2 (struct backtrace_state *state,
+ backtrace_error_callback error_callback, void *data)
+{
+ /* This variant is used on FreeBSD. */
+ return sysctl_exec_name (state, CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1,
+ error_callback, data);
+}
+
+#else
+
+#define sysctl_exec_name2(state, error_callback, data) NULL
+
+#endif
+
+#endif /* defined (HAVE_KERN_PROC_ARGS) || |defined (HAVE_KERN_PROC) */
+
+#ifdef HAVE_MACH_O_DYLD_H
+
+static char *
+macho_get_executable_path (struct backtrace_state *state,
+ backtrace_error_callback error_callback, void *data)
+{
+ uint32_t len;
+ char *name;
+
+ len = 0;
+ if (_NSGetExecutablePath (NULL, &len) == 0)
+ return NULL;
+ name = (char *) backtrace_alloc (state, len, error_callback, data);
+ if (name == NULL)
+ return NULL;
+ if (_NSGetExecutablePath (name, &len) != 0)
+ {
+ backtrace_free (state, name, len, error_callback, data);
+ return NULL;
+ }
+ return name;
+}
+
+#else /* !defined (HAVE_MACH_O_DYLD_H) */
+
+#define macho_get_executable_path(state, error_callback, data) NULL
+
+#endif /* !defined (HAVE_MACH_O_DYLD_H) */
+
+/* Initialize the fileline information from the executable. Returns 1
+ on success, 0 on failure. */
+
+static int
+fileline_initialize (struct backtrace_state *state,
+ backtrace_error_callback error_callback, void *data)
+{
+ int failed;
+ fileline fileline_fn;
+ int pass;
+ int called_error_callback;
+ int descriptor;
+ const char *filename;
+ char buf[64];
+
+ if (!state->threaded)
+ failed = state->fileline_initialization_failed;
+ else
+ failed = backtrace_atomic_load_int (&state->fileline_initialization_failed);
+
+ if (failed)
+ {
+ error_callback (data, "failed to read executable information", -1);
+ return 0;
+ }
+
+ if (!state->threaded)
+ fileline_fn = state->fileline_fn;
+ else
+ fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn);
+ if (fileline_fn != NULL)
+ return 1;
+
+ /* We have not initialized the information. Do it now. */
+
+ descriptor = -1;
+ called_error_callback = 0;
+ for (pass = 0; pass < 8; ++pass)
+ {
+ int does_not_exist;
+
+ switch (pass)
+ {
+ case 0:
+ filename = state->filename;
+ break;
+ case 1:
+ filename = getexecname ();
+ break;
+ case 2:
+ filename = "/proc/self/exe";
+ break;
+ case 3:
+ filename = "/proc/curproc/file";
+ break;
+ case 4:
+ snprintf (buf, sizeof (buf), "/proc/%ld/object/a.out",
+ (long) getpid ());
+ filename = buf;
+ break;
+ case 5:
+ filename = sysctl_exec_name1 (state, error_callback, data);
+ break;
+ case 6:
+ filename = sysctl_exec_name2 (state, error_callback, data);
+ break;
+ case 7:
+ filename = macho_get_executable_path (state, error_callback, data);
+ break;
+ default:
+ abort ();
+ }
+
+ if (filename == NULL)
+ continue;
+
+ descriptor = backtrace_open (filename, error_callback, data,
+ &does_not_exist);
+ if (descriptor < 0 && !does_not_exist)
+ {
+ called_error_callback = 1;
+ break;
+ }
+ if (descriptor >= 0)
+ break;
+ }
+
+ if (descriptor < 0)
+ {
+ if (!called_error_callback)
+ {
+ if (state->filename != NULL)
+ error_callback (data, state->filename, ENOENT);
+ else
+ error_callback (data,
+ "libbacktrace could not find executable to open",
+ 0);
+ }
+ failed = 1;
+ }
+
+ if (!failed)
+ {
+ if (!backtrace_initialize (state, filename, descriptor, error_callback,
+ data, &fileline_fn))
+ failed = 1;
+ }
+
+ if (failed)
+ {
+ if (!state->threaded)
+ state->fileline_initialization_failed = 1;
+ else
+ backtrace_atomic_store_int (&state->fileline_initialization_failed, 1);
+ return 0;
+ }
+
+ if (!state->threaded)
+ state->fileline_fn = fileline_fn;
+ else
+ {
+ backtrace_atomic_store_pointer (&state->fileline_fn, fileline_fn);
+
+ /* Note that if two threads initialize at once, one of the data
+ sets may be leaked. */
+ }
+
+ return 1;
+}
+
+/* Given a PC, find the file name, line number, and function name. */
+
+int
+backtrace_pcinfo (struct backtrace_state *state, uintptr_t pc,
+ backtrace_full_callback callback,
+ backtrace_error_callback error_callback, void *data)
+{
+ if (!fileline_initialize (state, error_callback, data))
+ return 0;
+
+ if (state->fileline_initialization_failed)
+ return 0;
+
+ return state->fileline_fn (state, pc, callback, error_callback, data);
+}
+
+/* Given a PC, find the symbol for it, and its value. */
+
+int
+backtrace_syminfo (struct backtrace_state *state, uintptr_t pc,
+ backtrace_syminfo_callback callback,
+ backtrace_error_callback error_callback, void *data)
+{
+ if (!fileline_initialize (state, error_callback, data))
+ return 0;
+
+ if (state->fileline_initialization_failed)
+ return 0;
+
+ state->syminfo_fn (state, pc, callback, error_callback, data);
+ return 1;
+}
+
+/* A backtrace_syminfo_callback that can call into a
+ backtrace_full_callback, used when we have a symbol table but no
+ debug info. */
+
+void
+backtrace_syminfo_to_full_callback (void *data, uintptr_t pc,
+ const char *symname,
+ uintptr_t symval ATTRIBUTE_UNUSED,
+ uintptr_t symsize ATTRIBUTE_UNUSED)
+{
+ struct backtrace_call_full *bdata = (struct backtrace_call_full *) data;
+
+ bdata->ret = bdata->full_callback (bdata->full_data, pc, 0, NULL, 0, symname);
+}
+
+/* An error callback that corresponds to
+ backtrace_syminfo_to_full_callback. */
+
+void
+backtrace_syminfo_to_full_error_callback (void *data, const char *msg,
+ int errnum)
+{
+ struct backtrace_call_full *bdata = (struct backtrace_call_full *) data;
+
+ bdata->full_error_callback (bdata->full_data, msg, errnum);
+}
+
+}
diff --git a/3rdparty/tracy/tracy/libbacktrace/filenames.hpp b/3rdparty/tracy/tracy/libbacktrace/filenames.hpp
new file mode 100644
index 0000000..aa7bd7a
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/filenames.hpp
@@ -0,0 +1,52 @@
+/* btest.c -- Filename header for libbacktrace library
+ Copyright (C) 2012-2018 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#ifndef GCC_VERSION
+# define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+#endif
+
+#if (GCC_VERSION < 2007)
+# define __attribute__(x)
+#endif
+
+#ifndef ATTRIBUTE_UNUSED
+# define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#endif
+
+#if defined(__MSDOS__) || defined(_WIN32) || defined(__OS2__) || defined (__CYGWIN__)
+# define IS_DIR_SEPARATOR(c) ((c) == '/' || (c) == '\\')
+# define HAS_DRIVE_SPEC(f) ((f)[0] != '\0' && (f)[1] == ':')
+# define IS_ABSOLUTE_PATH(f) (IS_DIR_SEPARATOR((f)[0]) || HAS_DRIVE_SPEC(f))
+#else
+# define IS_DIR_SEPARATOR(c) ((c) == '/')
+# define IS_ABSOLUTE_PATH(f) (IS_DIR_SEPARATOR((f)[0]))
+#endif
diff --git a/3rdparty/tracy/tracy/libbacktrace/internal.hpp b/3rdparty/tracy/tracy/libbacktrace/internal.hpp
new file mode 100644
index 0000000..96c097e
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/internal.hpp
@@ -0,0 +1,385 @@
+/* internal.h -- Internal header file for stack backtrace library.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#ifndef BACKTRACE_INTERNAL_H
+#define BACKTRACE_INTERNAL_H
+
+/* We assume that <sys/types.h> and "backtrace.h" have already been
+ included. */
+
+#ifndef GCC_VERSION
+# define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+#endif
+
+#if (GCC_VERSION < 2007)
+# define __attribute__(x)
+#endif
+
+#ifndef ATTRIBUTE_UNUSED
+# define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#endif
+
+#ifndef ATTRIBUTE_MALLOC
+# if (GCC_VERSION >= 2096)
+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__))
+# else
+# define ATTRIBUTE_MALLOC
+# endif
+#endif
+
+#ifndef ATTRIBUTE_FALLTHROUGH
+# if (GCC_VERSION >= 7000)
+# define ATTRIBUTE_FALLTHROUGH __attribute__ ((__fallthrough__))
+# else
+# define ATTRIBUTE_FALLTHROUGH
+# endif
+#endif
+
+#ifndef HAVE_SYNC_FUNCTIONS
+
+/* Define out the sync functions. These should never be called if
+ they are not available. */
+
+#define __sync_bool_compare_and_swap(A, B, C) (abort(), 1)
+#define __sync_lock_test_and_set(A, B) (abort(), 0)
+#define __sync_lock_release(A) abort()
+
+#endif /* !defined (HAVE_SYNC_FUNCTIONS) */
+
+#ifdef HAVE_ATOMIC_FUNCTIONS
+
+/* We have the atomic builtin functions. */
+
+#define backtrace_atomic_load_pointer(p) \
+ __atomic_load_n ((p), __ATOMIC_ACQUIRE)
+#define backtrace_atomic_load_int(p) \
+ __atomic_load_n ((p), __ATOMIC_ACQUIRE)
+#define backtrace_atomic_store_pointer(p, v) \
+ __atomic_store_n ((p), (v), __ATOMIC_RELEASE)
+#define backtrace_atomic_store_size_t(p, v) \
+ __atomic_store_n ((p), (v), __ATOMIC_RELEASE)
+#define backtrace_atomic_store_int(p, v) \
+ __atomic_store_n ((p), (v), __ATOMIC_RELEASE)
+
+#else /* !defined (HAVE_ATOMIC_FUNCTIONS) */
+#ifdef HAVE_SYNC_FUNCTIONS
+
+/* We have the sync functions but not the atomic functions. Define
+ the atomic ones in terms of the sync ones. */
+
+extern void *backtrace_atomic_load_pointer (void *);
+extern int backtrace_atomic_load_int (int *);
+extern void backtrace_atomic_store_pointer (void *, void *);
+extern void backtrace_atomic_store_size_t (size_t *, size_t);
+extern void backtrace_atomic_store_int (int *, int);
+
+#else /* !defined (HAVE_SYNC_FUNCTIONS) */
+
+/* We have neither the sync nor the atomic functions. These will
+ never be called. */
+
+#define backtrace_atomic_load_pointer(p) (abort(), (void *) NULL)
+#define backtrace_atomic_load_int(p) (abort(), 0)
+#define backtrace_atomic_store_pointer(p, v) abort()
+#define backtrace_atomic_store_size_t(p, v) abort()
+#define backtrace_atomic_store_int(p, v) abort()
+
+#endif /* !defined (HAVE_SYNC_FUNCTIONS) */
+#endif /* !defined (HAVE_ATOMIC_FUNCTIONS) */
+
+namespace tracy
+{
+
+/* The type of the function that collects file/line information. This
+ is like backtrace_pcinfo. */
+
+typedef int (*fileline) (struct backtrace_state *state, uintptr_t pc,
+ backtrace_full_callback callback,
+ backtrace_error_callback error_callback, void *data);
+
+/* The type of the function that collects symbol information. This is
+ like backtrace_syminfo. */
+
+typedef void (*syminfo) (struct backtrace_state *state, uintptr_t pc,
+ backtrace_syminfo_callback callback,
+ backtrace_error_callback error_callback, void *data);
+
+/* What the backtrace state pointer points to. */
+
+struct backtrace_state
+{
+ /* The name of the executable. */
+ const char *filename;
+ /* Non-zero if threaded. */
+ int threaded;
+ /* The master lock for fileline_fn, fileline_data, syminfo_fn,
+ syminfo_data, fileline_initialization_failed and everything the
+ data pointers point to. */
+ void *lock;
+ /* The function that returns file/line information. */
+ fileline fileline_fn;
+ /* The data to pass to FILELINE_FN. */
+ void *fileline_data;
+ /* The function that returns symbol information. */
+ syminfo syminfo_fn;
+ /* The data to pass to SYMINFO_FN. */
+ void *syminfo_data;
+ /* Whether initializing the file/line information failed. */
+ int fileline_initialization_failed;
+ /* The lock for the freelist. */
+ int lock_alloc;
+ /* The freelist when using mmap. */
+ struct backtrace_freelist_struct *freelist;
+};
+
+/* Open a file for reading. Returns -1 on error. If DOES_NOT_EXIST
+ is not NULL, *DOES_NOT_EXIST will be set to 0 normally and set to 1
+ if the file does not exist. If the file does not exist and
+ DOES_NOT_EXIST is not NULL, the function will return -1 and will
+ not call ERROR_CALLBACK. On other errors, or if DOES_NOT_EXIST is
+ NULL, the function will call ERROR_CALLBACK before returning. */
+extern int backtrace_open (const char *filename,
+ backtrace_error_callback error_callback,
+ void *data,
+ int *does_not_exist);
+
+/* A view of the contents of a file. This supports mmap when
+ available. A view will remain in memory even after backtrace_close
+ is called on the file descriptor from which the view was
+ obtained. */
+
+struct backtrace_view
+{
+ /* The data that the caller requested. */
+ const void *data;
+ /* The base of the view. */
+ void *base;
+ /* The total length of the view. */
+ size_t len;
+};
+
+/* Create a view of SIZE bytes from DESCRIPTOR at OFFSET. Store the
+ result in *VIEW. Returns 1 on success, 0 on error. */
+extern int backtrace_get_view (struct backtrace_state *state, int descriptor,
+ off_t offset, uint64_t size,
+ backtrace_error_callback error_callback,
+ void *data, struct backtrace_view *view);
+
+/* Release a view created by backtrace_get_view. */
+extern void backtrace_release_view (struct backtrace_state *state,
+ struct backtrace_view *view,
+ backtrace_error_callback error_callback,
+ void *data);
+
+/* Close a file opened by backtrace_open. Returns 1 on success, 0 on
+ error. */
+
+extern int backtrace_close (int descriptor,
+ backtrace_error_callback error_callback,
+ void *data);
+
+/* Sort without using memory. */
+
+extern void backtrace_qsort (void *base, size_t count, size_t size,
+ int (*compar) (const void *, const void *));
+
+/* Allocate memory. This is like malloc. If ERROR_CALLBACK is NULL,
+ this does not report an error, it just returns NULL. */
+
+extern void *backtrace_alloc (struct backtrace_state *state, size_t size,
+ backtrace_error_callback error_callback,
+ void *data) ATTRIBUTE_MALLOC;
+
+/* Free memory allocated by backtrace_alloc. If ERROR_CALLBACK is
+ NULL, this does not report an error. */
+
+extern void backtrace_free (struct backtrace_state *state, void *mem,
+ size_t size,
+ backtrace_error_callback error_callback,
+ void *data);
+
+/* A growable vector of some struct. This is used for more efficient
+ allocation when we don't know the final size of some group of data
+ that we want to represent as an array. */
+
+struct backtrace_vector
+{
+ /* The base of the vector. */
+ void *base;
+ /* The number of bytes in the vector. */
+ size_t size;
+ /* The number of bytes available at the current allocation. */
+ size_t alc;
+};
+
+/* Grow VEC by SIZE bytes. Return a pointer to the newly allocated
+ bytes. Note that this may move the entire vector to a new memory
+ location. Returns NULL on failure. */
+
+extern void *backtrace_vector_grow (struct backtrace_state *state, size_t size,
+ backtrace_error_callback error_callback,
+ void *data,
+ struct backtrace_vector *vec);
+
+/* Finish the current allocation on VEC. Prepare to start a new
+ allocation. The finished allocation will never be freed. Returns
+ a pointer to the base of the finished entries, or NULL on
+ failure. */
+
+extern void* backtrace_vector_finish (struct backtrace_state *state,
+ struct backtrace_vector *vec,
+ backtrace_error_callback error_callback,
+ void *data);
+
+/* Release any extra space allocated for VEC. This may change
+ VEC->base. Returns 1 on success, 0 on failure. */
+
+extern int backtrace_vector_release (struct backtrace_state *state,
+ struct backtrace_vector *vec,
+ backtrace_error_callback error_callback,
+ void *data);
+
+/* Free the space managed by VEC. This will reset VEC. */
+
+static inline void
+backtrace_vector_free (struct backtrace_state *state,
+ struct backtrace_vector *vec,
+ backtrace_error_callback error_callback, void *data)
+{
+ vec->alc += vec->size;
+ vec->size = 0;
+ backtrace_vector_release (state, vec, error_callback, data);
+}
+
+/* Read initial debug data from a descriptor, and set the
+ fileline_data, syminfo_fn, and syminfo_data fields of STATE.
+ Return the fileln_fn field in *FILELN_FN--this is done this way so
+ that the synchronization code is only implemented once. This is
+ called after the descriptor has first been opened. It will close
+ the descriptor if it is no longer needed. Returns 1 on success, 0
+ on error. There will be multiple implementations of this function,
+ for different file formats. Each system will compile the
+ appropriate one. */
+
+extern int backtrace_initialize (struct backtrace_state *state,
+ const char *filename,
+ int descriptor,
+ backtrace_error_callback error_callback,
+ void *data,
+ fileline *fileline_fn);
+
+/* An enum for the DWARF sections we care about. */
+
+enum dwarf_section
+{
+ DEBUG_INFO,
+ DEBUG_LINE,
+ DEBUG_ABBREV,
+ DEBUG_RANGES,
+ DEBUG_STR,
+ DEBUG_ADDR,
+ DEBUG_STR_OFFSETS,
+ DEBUG_LINE_STR,
+ DEBUG_RNGLISTS,
+
+ DEBUG_MAX
+};
+
+/* Data for the DWARF sections we care about. */
+
+struct dwarf_sections
+{
+ const unsigned char *data[DEBUG_MAX];
+ size_t size[DEBUG_MAX];
+};
+
+/* DWARF data read from a file, used for .gnu_debugaltlink. */
+
+struct dwarf_data;
+
+/* Add file/line information for a DWARF module. */
+
+extern int backtrace_dwarf_add (struct backtrace_state *state,
+ uintptr_t base_address,
+ const struct dwarf_sections *dwarf_sections,
+ int is_bigendian,
+ struct dwarf_data *fileline_altlink,
+ backtrace_error_callback error_callback,
+ void *data, fileline *fileline_fn,
+ struct dwarf_data **fileline_entry);
+
+/* A data structure to pass to backtrace_syminfo_to_full. */
+
+struct backtrace_call_full
+{
+ backtrace_full_callback full_callback;
+ backtrace_error_callback full_error_callback;
+ void *full_data;
+ int ret;
+};
+
+/* A backtrace_syminfo_callback that can call into a
+ backtrace_full_callback, used when we have a symbol table but no
+ debug info. */
+
+extern void backtrace_syminfo_to_full_callback (void *data, uintptr_t pc,
+ const char *symname,
+ uintptr_t symval,
+ uintptr_t symsize);
+
+/* An error callback that corresponds to
+ backtrace_syminfo_to_full_callback. */
+
+extern void backtrace_syminfo_to_full_error_callback (void *, const char *,
+ int);
+
+/* A test-only hook for elf_uncompress_zdebug. */
+
+extern int backtrace_uncompress_zdebug (struct backtrace_state *,
+ const unsigned char *compressed,
+ size_t compressed_size,
+ backtrace_error_callback, void *data,
+ unsigned char **uncompressed,
+ size_t *uncompressed_size);
+
+/* A test-only hook for elf_uncompress_lzma. */
+
+extern int backtrace_uncompress_lzma (struct backtrace_state *,
+ const unsigned char *compressed,
+ size_t compressed_size,
+ backtrace_error_callback, void *data,
+ unsigned char **uncompressed,
+ size_t *uncompressed_size);
+
+}
+
+#endif
diff --git a/3rdparty/tracy/tracy/libbacktrace/macho.cpp b/3rdparty/tracy/tracy/libbacktrace/macho.cpp
new file mode 100644
index 0000000..cb50dc5
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/macho.cpp
@@ -0,0 +1,1360 @@
+/* elf.c -- Get debug data from a Mach-O file for backtraces.
+ Copyright (C) 2020-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <sys/types.h>
+#include <dirent.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef HAVE_MACH_O_DYLD_H
+#include <mach-o/dyld.h>
+#endif
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+namespace tracy
+{
+
+/* Mach-O file header for a 32-bit executable. */
+
+struct macho_header_32
+{
+ uint32_t magic; /* Magic number (MACH_O_MAGIC_32) */
+ uint32_t cputype; /* CPU type */
+ uint32_t cpusubtype; /* CPU subtype */
+ uint32_t filetype; /* Type of file (object, executable) */
+ uint32_t ncmds; /* Number of load commands */
+ uint32_t sizeofcmds; /* Total size of load commands */
+ uint32_t flags; /* Flags for special features */
+};
+
+/* Mach-O file header for a 64-bit executable. */
+
+struct macho_header_64
+{
+ uint32_t magic; /* Magic number (MACH_O_MAGIC_64) */
+ uint32_t cputype; /* CPU type */
+ uint32_t cpusubtype; /* CPU subtype */
+ uint32_t filetype; /* Type of file (object, executable) */
+ uint32_t ncmds; /* Number of load commands */
+ uint32_t sizeofcmds; /* Total size of load commands */
+ uint32_t flags; /* Flags for special features */
+ uint32_t reserved; /* Reserved */
+};
+
+/* Mach-O file header for a fat executable. */
+
+struct macho_header_fat
+{
+ uint32_t magic; /* Magic number (MACH_O_MH_(MAGIC|CIGAM)_FAT(_64)?) */
+ uint32_t nfat_arch; /* Number of components */
+};
+
+/* Values for the header magic field. */
+
+#define MACH_O_MH_MAGIC_32 0xfeedface
+#define MACH_O_MH_MAGIC_64 0xfeedfacf
+#define MACH_O_MH_MAGIC_FAT 0xcafebabe
+#define MACH_O_MH_CIGAM_FAT 0xbebafeca
+#define MACH_O_MH_MAGIC_FAT_64 0xcafebabf
+#define MACH_O_MH_CIGAM_FAT_64 0xbfbafeca
+
+/* Value for the header filetype field. */
+
+#define MACH_O_MH_EXECUTE 0x02
+#define MACH_O_MH_DYLIB 0x06
+#define MACH_O_MH_DSYM 0x0a
+
+/* A component of a fat file. A fat file starts with a
+ macho_header_fat followed by nfat_arch instances of this
+ struct. */
+
+struct macho_fat_arch
+{
+ uint32_t cputype; /* CPU type */
+ uint32_t cpusubtype; /* CPU subtype */
+ uint32_t offset; /* File offset of this entry */
+ uint32_t size; /* Size of this entry */
+ uint32_t align; /* Alignment of this entry */
+};
+
+/* A component of a 64-bit fat file. This is used if the magic field
+ is MAGIC_FAT_64. This is only used when some file size or file
+ offset is too large to represent in the 32-bit format. */
+
+struct macho_fat_arch_64
+{
+ uint32_t cputype; /* CPU type */
+ uint32_t cpusubtype; /* CPU subtype */
+ uint64_t offset; /* File offset of this entry */
+ uint64_t size; /* Size of this entry */
+ uint32_t align; /* Alignment of this entry */
+ uint32_t reserved; /* Reserved */
+};
+
+/* Values for the fat_arch cputype field (and the header cputype
+ field). */
+
+#define MACH_O_CPU_ARCH_ABI64 0x01000000
+
+#define MACH_O_CPU_TYPE_X86 7
+#define MACH_O_CPU_TYPE_ARM 12
+#define MACH_O_CPU_TYPE_PPC 18
+
+#define MACH_O_CPU_TYPE_X86_64 (MACH_O_CPU_TYPE_X86 | MACH_O_CPU_ARCH_ABI64)
+#define MACH_O_CPU_TYPE_ARM64 (MACH_O_CPU_TYPE_ARM | MACH_O_CPU_ARCH_ABI64)
+#define MACH_O_CPU_TYPE_PPC64 (MACH_O_CPU_TYPE_PPC | MACH_O_CPU_ARCH_ABI64)
+
+/* The header of a load command. */
+
+struct macho_load_command
+{
+ uint32_t cmd; /* The type of load command */
+ uint32_t cmdsize; /* Size in bytes of the entire command */
+};
+
+/* Values for the load_command cmd field. */
+
+#define MACH_O_LC_SEGMENT 0x01
+#define MACH_O_LC_SYMTAB 0x02
+#define MACH_O_LC_SEGMENT_64 0x19
+#define MACH_O_LC_UUID 0x1b
+
+/* The length of a section of segment name. */
+
+#define MACH_O_NAMELEN (16)
+
+/* LC_SEGMENT load command. */
+
+struct macho_segment_command
+{
+ uint32_t cmd; /* The type of load command (LC_SEGMENT) */
+ uint32_t cmdsize; /* Size in bytes of the entire command */
+ char segname[MACH_O_NAMELEN]; /* Segment name */
+ uint32_t vmaddr; /* Virtual memory address */
+ uint32_t vmsize; /* Virtual memory size */
+ uint32_t fileoff; /* Offset of data to be mapped */
+ uint32_t filesize; /* Size of data in file */
+ uint32_t maxprot; /* Maximum permitted virtual protection */
+ uint32_t initprot; /* Initial virtual memory protection */
+ uint32_t nsects; /* Number of sections in this segment */
+ uint32_t flags; /* Flags */
+};
+
+/* LC_SEGMENT_64 load command. */
+
+struct macho_segment_64_command
+{
+ uint32_t cmd; /* The type of load command (LC_SEGMENT) */
+ uint32_t cmdsize; /* Size in bytes of the entire command */
+ char segname[MACH_O_NAMELEN]; /* Segment name */
+ uint64_t vmaddr; /* Virtual memory address */
+ uint64_t vmsize; /* Virtual memory size */
+ uint64_t fileoff; /* Offset of data to be mapped */
+ uint64_t filesize; /* Size of data in file */
+ uint32_t maxprot; /* Maximum permitted virtual protection */
+ uint32_t initprot; /* Initial virtual memory protection */
+ uint32_t nsects; /* Number of sections in this segment */
+ uint32_t flags; /* Flags */
+};
+
+/* LC_SYMTAB load command. */
+
+struct macho_symtab_command
+{
+ uint32_t cmd; /* The type of load command (LC_SEGMENT) */
+ uint32_t cmdsize; /* Size in bytes of the entire command */
+ uint32_t symoff; /* File offset of symbol table */
+ uint32_t nsyms; /* Number of symbols */
+ uint32_t stroff; /* File offset of string table */
+ uint32_t strsize; /* String table size */
+};
+
+/* The length of a Mach-O uuid. */
+
+#define MACH_O_UUID_LEN (16)
+
+/* LC_UUID load command. */
+
+struct macho_uuid_command
+{
+ uint32_t cmd; /* Type of load command (LC_UUID) */
+ uint32_t cmdsize; /* Size in bytes of command */
+ unsigned char uuid[MACH_O_UUID_LEN]; /* UUID */
+};
+
+/* 32-bit section header within a LC_SEGMENT segment. */
+
+struct macho_section
+{
+ char sectname[MACH_O_NAMELEN]; /* Section name */
+ char segment[MACH_O_NAMELEN]; /* Segment of this section */
+ uint32_t addr; /* Address in memory */
+ uint32_t size; /* Section size */
+ uint32_t offset; /* File offset */
+ uint32_t align; /* Log2 of section alignment */
+ uint32_t reloff; /* File offset of relocations */
+ uint32_t nreloc; /* Number of relocs for this section */
+ uint32_t flags; /* Flags */
+ uint32_t reserved1;
+ uint32_t reserved2;
+};
+
+/* 64-bit section header within a LC_SEGMENT_64 segment. */
+
+struct macho_section_64
+{
+ char sectname[MACH_O_NAMELEN]; /* Section name */
+ char segment[MACH_O_NAMELEN]; /* Segment of this section */
+ uint64_t addr; /* Address in memory */
+ uint64_t size; /* Section size */
+ uint32_t offset; /* File offset */
+ uint32_t align; /* Log2 of section alignment */
+ uint32_t reloff; /* File offset of section relocations */
+ uint32_t nreloc; /* Number of relocs for this section */
+ uint32_t flags; /* Flags */
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+/* 32-bit symbol data. */
+
+struct macho_nlist
+{
+ uint32_t n_strx; /* Index of name in string table */
+ uint8_t n_type; /* Type flag */
+ uint8_t n_sect; /* Section number */
+ uint16_t n_desc; /* Stabs description field */
+ uint32_t n_value; /* Value */
+};
+
+/* 64-bit symbol data. */
+
+struct macho_nlist_64
+{
+ uint32_t n_strx; /* Index of name in string table */
+ uint8_t n_type; /* Type flag */
+ uint8_t n_sect; /* Section number */
+ uint16_t n_desc; /* Stabs description field */
+ uint64_t n_value; /* Value */
+};
+
+/* Value found in nlist n_type field. */
+
+#define MACH_O_N_EXT 0x01 /* Extern symbol */
+#define MACH_O_N_ABS 0x02 /* Absolute symbol */
+#define MACH_O_N_SECT 0x0e /* Defined in section */
+
+#define MACH_O_N_TYPE 0x0e /* Mask for type bits */
+#define MACH_O_N_STAB 0xe0 /* Stabs debugging symbol */
+
+/* Information we keep for a Mach-O symbol. */
+
+struct macho_symbol
+{
+ const char *name; /* Symbol name */
+ uintptr_t address; /* Symbol address */
+};
+
+/* Information to pass to macho_syminfo. */
+
+struct macho_syminfo_data
+{
+ struct macho_syminfo_data *next; /* Next module */
+ struct macho_symbol *symbols; /* Symbols sorted by address */
+ size_t count; /* Number of symbols */
+};
+
+/* Names of sections, indexed by enum dwarf_section in internal.h. */
+
+static const char * const dwarf_section_names[DEBUG_MAX] =
+{
+ "__debug_info",
+ "__debug_line",
+ "__debug_abbrev",
+ "__debug_ranges",
+ "__debug_str",
+ "", /* DEBUG_ADDR */
+ "__debug_str_offs",
+ "", /* DEBUG_LINE_STR */
+ "__debug_rnglists"
+};
+
+/* Forward declaration. */
+
+static int macho_add (struct backtrace_state *, const char *, int, off_t,
+ const unsigned char *, uintptr_t, int,
+ backtrace_error_callback, void *, fileline *, int *);
+
+/* A dummy callback function used when we can't find any debug info. */
+
+static int
+macho_nodebug (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ uintptr_t pc ATTRIBUTE_UNUSED,
+ backtrace_full_callback callback ATTRIBUTE_UNUSED,
+ backtrace_error_callback error_callback, void *data)
+{
+ error_callback (data, "no debug info in Mach-O executable", -1);
+ return 0;
+}
+
+/* A dummy callback function used when we can't find a symbol
+ table. */
+
+static void
+macho_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ uintptr_t addr ATTRIBUTE_UNUSED,
+ backtrace_syminfo_callback callback ATTRIBUTE_UNUSED,
+ backtrace_error_callback error_callback, void *data)
+{
+ error_callback (data, "no symbol table in Mach-O executable", -1);
+}
+
+/* Add a single DWARF section to DWARF_SECTIONS, if we need the
+ section. Returns 1 on success, 0 on failure. */
+
+static int
+macho_add_dwarf_section (struct backtrace_state *state, int descriptor,
+ const char *sectname, uint32_t offset, uint64_t size,
+ backtrace_error_callback error_callback, void *data,
+ struct dwarf_sections *dwarf_sections)
+{
+ int i;
+
+ for (i = 0; i < (int) DEBUG_MAX; ++i)
+ {
+ if (dwarf_section_names[i][0] != '\0'
+ && strncmp (sectname, dwarf_section_names[i], MACH_O_NAMELEN) == 0)
+ {
+ struct backtrace_view section_view;
+
+ /* FIXME: Perhaps it would be better to try to use a single
+ view to read all the DWARF data, as we try to do for
+ ELF. */
+
+ if (!backtrace_get_view (state, descriptor, offset, size,
+ error_callback, data, &section_view))
+ return 0;
+ dwarf_sections->data[i] = (const unsigned char *) section_view.data;
+ dwarf_sections->size[i] = size;
+ break;
+ }
+ }
+ return 1;
+}
+
+/* Collect DWARF sections from a DWARF segment. Returns 1 on success,
+ 0 on failure. */
+
+static int
+macho_add_dwarf_segment (struct backtrace_state *state, int descriptor,
+ off_t offset, unsigned int cmd, const char *psecs,
+ size_t sizesecs, unsigned int nsects,
+ backtrace_error_callback error_callback, void *data,
+ struct dwarf_sections *dwarf_sections)
+{
+ size_t sec_header_size;
+ size_t secoffset;
+ unsigned int i;
+
+ switch (cmd)
+ {
+ case MACH_O_LC_SEGMENT:
+ sec_header_size = sizeof (struct macho_section);
+ break;
+ case MACH_O_LC_SEGMENT_64:
+ sec_header_size = sizeof (struct macho_section_64);
+ break;
+ default:
+ abort ();
+ }
+
+ secoffset = 0;
+ for (i = 0; i < nsects; ++i)
+ {
+ if (secoffset + sec_header_size > sizesecs)
+ {
+ error_callback (data, "section overflow withing segment", 0);
+ return 0;
+ }
+
+ switch (cmd)
+ {
+ case MACH_O_LC_SEGMENT:
+ {
+ struct macho_section section;
+
+ memcpy (&section, psecs + secoffset, sizeof section);
+ macho_add_dwarf_section (state, descriptor, section.sectname,
+ offset + section.offset, section.size,
+ error_callback, data, dwarf_sections);
+ }
+ break;
+
+ case MACH_O_LC_SEGMENT_64:
+ {
+ struct macho_section_64 section;
+
+ memcpy (&section, psecs + secoffset, sizeof section);
+ macho_add_dwarf_section (state, descriptor, section.sectname,
+ offset + section.offset, section.size,
+ error_callback, data, dwarf_sections);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ secoffset += sec_header_size;
+ }
+
+ return 1;
+}
+
+/* Compare struct macho_symbol for qsort. */
+
+static int
+macho_symbol_compare (const void *v1, const void *v2)
+{
+ const struct macho_symbol *m1 = (const struct macho_symbol *) v1;
+ const struct macho_symbol *m2 = (const struct macho_symbol *) v2;
+
+ if (m1->address < m2->address)
+ return -1;
+ else if (m1->address > m2->address)
+ return 1;
+ else
+ return 0;
+}
+
+/* Compare an address against a macho_symbol for bsearch. We allocate
+ one extra entry in the array so that this can safely look at the
+ next entry. */
+
+static int
+macho_symbol_search (const void *vkey, const void *ventry)
+{
+ const uintptr_t *key = (const uintptr_t *) vkey;
+ const struct macho_symbol *entry = (const struct macho_symbol *) ventry;
+ uintptr_t addr;
+
+ addr = *key;
+ if (addr < entry->address)
+ return -1;
+ else if (entry->name[0] == '\0'
+ && entry->address == ~(uintptr_t) 0)
+ return -1;
+ else if ((entry + 1)->name[0] == '\0'
+ && (entry + 1)->address == ~(uintptr_t) 0)
+ return -1;
+ else if (addr >= (entry + 1)->address)
+ return 1;
+ else
+ return 0;
+}
+
+/* Return whether the symbol type field indicates a symbol table entry
+ that we care about: a function or data symbol. */
+
+static int
+macho_defined_symbol (uint8_t type)
+{
+ if ((type & MACH_O_N_STAB) != 0)
+ return 0;
+ if ((type & MACH_O_N_EXT) != 0)
+ return 0;
+ switch (type & MACH_O_N_TYPE)
+ {
+ case MACH_O_N_ABS:
+ return 1;
+ case MACH_O_N_SECT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Add symbol table information for a Mach-O file. */
+
+static int
+macho_add_symtab (struct backtrace_state *state, int descriptor,
+ uintptr_t base_address, int is_64,
+ off_t symoff, unsigned int nsyms, off_t stroff,
+ unsigned int strsize,
+ backtrace_error_callback error_callback, void *data)
+{
+ size_t symsize;
+ struct backtrace_view sym_view;
+ int sym_view_valid;
+ struct backtrace_view str_view;
+ int str_view_valid;
+ size_t ndefs;
+ size_t symtaboff;
+ unsigned int i;
+ size_t macho_symbol_size;
+ struct macho_symbol *macho_symbols;
+ unsigned int j;
+ struct macho_syminfo_data *sdata;
+
+ sym_view_valid = 0;
+ str_view_valid = 0;
+ macho_symbol_size = 0;
+ macho_symbols = NULL;
+
+ if (is_64)
+ symsize = sizeof (struct macho_nlist_64);
+ else
+ symsize = sizeof (struct macho_nlist);
+
+ if (!backtrace_get_view (state, descriptor, symoff, nsyms * symsize,
+ error_callback, data, &sym_view))
+ goto fail;
+ sym_view_valid = 1;
+
+ if (!backtrace_get_view (state, descriptor, stroff, strsize,
+ error_callback, data, &str_view))
+ return 0;
+ str_view_valid = 1;
+
+ ndefs = 0;
+ symtaboff = 0;
+ for (i = 0; i < nsyms; ++i, symtaboff += symsize)
+ {
+ if (is_64)
+ {
+ struct macho_nlist_64 nlist;
+
+ memcpy (&nlist, (const char *) sym_view.data + symtaboff,
+ sizeof nlist);
+ if (macho_defined_symbol (nlist.n_type))
+ ++ndefs;
+ }
+ else
+ {
+ struct macho_nlist nlist;
+
+ memcpy (&nlist, (const char *) sym_view.data + symtaboff,
+ sizeof nlist);
+ if (macho_defined_symbol (nlist.n_type))
+ ++ndefs;
+ }
+ }
+
+ /* Add 1 to ndefs to make room for a sentinel. */
+ macho_symbol_size = (ndefs + 1) * sizeof (struct macho_symbol);
+ macho_symbols = ((struct macho_symbol *)
+ backtrace_alloc (state, macho_symbol_size, error_callback,
+ data));
+ if (macho_symbols == NULL)
+ goto fail;
+
+ j = 0;
+ symtaboff = 0;
+ for (i = 0; i < nsyms; ++i, symtaboff += symsize)
+ {
+ uint32_t strx;
+ uint64_t value;
+ const char *name;
+
+ strx = 0;
+ value = 0;
+ if (is_64)
+ {
+ struct macho_nlist_64 nlist;
+
+ memcpy (&nlist, (const char *) sym_view.data + symtaboff,
+ sizeof nlist);
+ if (!macho_defined_symbol (nlist.n_type))
+ continue;
+
+ strx = nlist.n_strx;
+ value = nlist.n_value;
+ }
+ else
+ {
+ struct macho_nlist nlist;
+
+ memcpy (&nlist, (const char *) sym_view.data + symtaboff,
+ sizeof nlist);
+ if (!macho_defined_symbol (nlist.n_type))
+ continue;
+
+ strx = nlist.n_strx;
+ value = nlist.n_value;
+ }
+
+ if (strx >= strsize)
+ {
+ error_callback (data, "symbol string index out of range", 0);
+ goto fail;
+ }
+
+ name = (const char *) str_view.data + strx;
+ if (name[0] == '_')
+ ++name;
+ macho_symbols[j].name = name;
+ macho_symbols[j].address = value + base_address;
+ ++j;
+ }
+
+ sdata = ((struct macho_syminfo_data *)
+ backtrace_alloc (state, sizeof *sdata, error_callback, data));
+ if (sdata == NULL)
+ goto fail;
+
+ /* We need to keep the string table since it holds the names, but we
+ can release the symbol table. */
+
+ backtrace_release_view (state, &sym_view, error_callback, data);
+ sym_view_valid = 0;
+ str_view_valid = 0;
+
+ /* Add a trailing sentinel symbol. */
+ macho_symbols[j].name = "";
+ macho_symbols[j].address = ~(uintptr_t) 0;
+
+ backtrace_qsort (macho_symbols, ndefs + 1, sizeof (struct macho_symbol),
+ macho_symbol_compare);
+
+ sdata->next = NULL;
+ sdata->symbols = macho_symbols;
+ sdata->count = ndefs;
+
+ if (!state->threaded)
+ {
+ struct macho_syminfo_data **pp;
+
+ for (pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data;
+ *pp != NULL;
+ pp = &(*pp)->next)
+ ;
+ *pp = sdata;
+ }
+ else
+ {
+ while (1)
+ {
+ struct macho_syminfo_data **pp;
+
+ pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data;
+
+ while (1)
+ {
+ struct macho_syminfo_data *p;
+
+ p = backtrace_atomic_load_pointer (pp);
+
+ if (p == NULL)
+ break;
+
+ pp = &p->next;
+ }
+
+ if (__sync_bool_compare_and_swap (pp, NULL, sdata))
+ break;
+ }
+ }
+
+ return 1;
+
+ fail:
+ if (macho_symbols != NULL)
+ backtrace_free (state, macho_symbols, macho_symbol_size,
+ error_callback, data);
+ if (sym_view_valid)
+ backtrace_release_view (state, &sym_view, error_callback, data);
+ if (str_view_valid)
+ backtrace_release_view (state, &str_view, error_callback, data);
+ return 0;
+}
+
+/* Return the symbol name and value for an ADDR. */
+
+static void
+macho_syminfo (struct backtrace_state *state, uintptr_t addr,
+ backtrace_syminfo_callback callback,
+ backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
+ void *data)
+{
+ struct macho_syminfo_data *sdata;
+ struct macho_symbol *sym;
+
+ sym = NULL;
+ if (!state->threaded)
+ {
+ for (sdata = (struct macho_syminfo_data *) state->syminfo_data;
+ sdata != NULL;
+ sdata = sdata->next)
+ {
+ sym = ((struct macho_symbol *)
+ bsearch (&addr, sdata->symbols, sdata->count,
+ sizeof (struct macho_symbol), macho_symbol_search));
+ if (sym != NULL)
+ break;
+ }
+ }
+ else
+ {
+ struct macho_syminfo_data **pp;
+
+ pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data;
+ while (1)
+ {
+ sdata = backtrace_atomic_load_pointer (pp);
+ if (sdata == NULL)
+ break;
+
+ sym = ((struct macho_symbol *)
+ bsearch (&addr, sdata->symbols, sdata->count,
+ sizeof (struct macho_symbol), macho_symbol_search));
+ if (sym != NULL)
+ break;
+
+ pp = &sdata->next;
+ }
+ }
+
+ if (sym == NULL)
+ callback (data, addr, NULL, 0, 0);
+ else
+ callback (data, addr, sym->name, sym->address, 0);
+}
+
+/* Look through a fat file to find the relevant executable. Returns 1
+ on success, 0 on failure (in both cases descriptor is closed). */
+
+static int
+macho_add_fat (struct backtrace_state *state, const char *filename,
+ int descriptor, int swapped, off_t offset,
+ const unsigned char *match_uuid, uintptr_t base_address,
+ int skip_symtab, uint32_t nfat_arch, int is_64,
+ backtrace_error_callback error_callback, void *data,
+ fileline *fileline_fn, int *found_sym)
+{
+ int arch_view_valid;
+ unsigned int cputype;
+ size_t arch_size;
+ struct backtrace_view arch_view;
+ unsigned int i;
+
+ arch_view_valid = 0;
+
+#if defined (__x86_64__)
+ cputype = MACH_O_CPU_TYPE_X86_64;
+#elif defined (__i386__)
+ cputype = MACH_O_CPU_TYPE_X86;
+#elif defined (__aarch64__)
+ cputype = MACH_O_CPU_TYPE_ARM64;
+#elif defined (__arm__)
+ cputype = MACH_O_CPU_TYPE_ARM;
+#elif defined (__ppc__)
+ cputype = MACH_O_CPU_TYPE_PPC;
+#elif defined (__ppc64__)
+ cputype = MACH_O_CPU_TYPE_PPC64;
+#else
+ error_callback (data, "unknown Mach-O architecture", 0);
+ goto fail;
+#endif
+
+ if (is_64)
+ arch_size = sizeof (struct macho_fat_arch_64);
+ else
+ arch_size = sizeof (struct macho_fat_arch);
+
+ if (!backtrace_get_view (state, descriptor, offset,
+ nfat_arch * arch_size,
+ error_callback, data, &arch_view))
+ goto fail;
+
+ for (i = 0; i < nfat_arch; ++i)
+ {
+ uint32_t fcputype;
+ uint64_t foffset;
+
+ if (is_64)
+ {
+ struct macho_fat_arch_64 fat_arch_64;
+
+ memcpy (&fat_arch_64,
+ (const char *) arch_view.data + i * arch_size,
+ arch_size);
+ fcputype = fat_arch_64.cputype;
+ foffset = fat_arch_64.offset;
+ if (swapped)
+ {
+ fcputype = __builtin_bswap32 (fcputype);
+ foffset = __builtin_bswap64 (foffset);
+ }
+ }
+ else
+ {
+ struct macho_fat_arch fat_arch_32;
+
+ memcpy (&fat_arch_32,
+ (const char *) arch_view.data + i * arch_size,
+ arch_size);
+ fcputype = fat_arch_32.cputype;
+ foffset = (uint64_t) fat_arch_32.offset;
+ if (swapped)
+ {
+ fcputype = __builtin_bswap32 (fcputype);
+ foffset = (uint64_t) __builtin_bswap32 ((uint32_t) foffset);
+ }
+ }
+
+ if (fcputype == cputype)
+ {
+ /* FIXME: What about cpusubtype? */
+ backtrace_release_view (state, &arch_view, error_callback, data);
+ return macho_add (state, filename, descriptor, foffset, match_uuid,
+ base_address, skip_symtab, error_callback, data,
+ fileline_fn, found_sym);
+ }
+ }
+
+ error_callback (data, "could not find executable in fat file", 0);
+
+ fail:
+ if (arch_view_valid)
+ backtrace_release_view (state, &arch_view, error_callback, data);
+ if (descriptor != -1)
+ backtrace_close (descriptor, error_callback, data);
+ return 0;
+}
+
+/* Look for the dsym file for FILENAME. This is called if FILENAME
+ does not have debug info or a symbol table. Returns 1 on success,
+ 0 on failure. */
+
+static int
+macho_add_dsym (struct backtrace_state *state, const char *filename,
+ uintptr_t base_address, const unsigned char *uuid,
+ backtrace_error_callback error_callback, void *data,
+ fileline* fileline_fn)
+{
+ const char *p;
+ const char *dirname;
+ char *diralc;
+ size_t dirnamelen;
+ const char *basename;
+ size_t basenamelen;
+ const char *dsymsuffixdir;
+ size_t dsymsuffixdirlen;
+ size_t dsymlen;
+ char *dsym;
+ char *ps;
+ int d;
+ int does_not_exist;
+ int dummy_found_sym;
+
+ diralc = NULL;
+ dirnamelen = 0;
+ dsym = NULL;
+ dsymlen = 0;
+
+ p = strrchr (filename, '/');
+ if (p == NULL)
+ {
+ dirname = ".";
+ dirnamelen = 1;
+ basename = filename;
+ basenamelen = strlen (basename);
+ diralc = NULL;
+ }
+ else
+ {
+ dirnamelen = p - filename;
+ diralc = (char*)backtrace_alloc (state, dirnamelen + 1, error_callback, data);
+ if (diralc == NULL)
+ goto fail;
+ memcpy (diralc, filename, dirnamelen);
+ diralc[dirnamelen] = '\0';
+ dirname = diralc;
+ basename = p + 1;
+ basenamelen = strlen (basename);
+ }
+
+ dsymsuffixdir = ".dSYM/Contents/Resources/DWARF/";
+ dsymsuffixdirlen = strlen (dsymsuffixdir);
+
+ dsymlen = (dirnamelen
+ + 1
+ + basenamelen
+ + dsymsuffixdirlen
+ + basenamelen
+ + 1);
+ dsym = (char*)backtrace_alloc (state, dsymlen, error_callback, data);
+ if (dsym == NULL)
+ goto fail;
+
+ ps = dsym;
+ memcpy (ps, dirname, dirnamelen);
+ ps += dirnamelen;
+ *ps++ = '/';
+ memcpy (ps, basename, basenamelen);
+ ps += basenamelen;
+ memcpy (ps, dsymsuffixdir, dsymsuffixdirlen);
+ ps += dsymsuffixdirlen;
+ memcpy (ps, basename, basenamelen);
+ ps += basenamelen;
+ *ps = '\0';
+
+ if (diralc != NULL)
+ {
+ backtrace_free (state, diralc, dirnamelen + 1, error_callback, data);
+ diralc = NULL;
+ }
+
+ d = backtrace_open (dsym, error_callback, data, &does_not_exist);
+ if (d < 0)
+ {
+ /* The file does not exist, so we can't read the debug info.
+ Just return success. */
+ backtrace_free (state, dsym, dsymlen, error_callback, data);
+ return 1;
+ }
+
+ if (!macho_add (state, dsym, d, 0, uuid, base_address, 1,
+ error_callback, data, fileline_fn, &dummy_found_sym))
+ goto fail;
+
+ backtrace_free (state, dsym, dsymlen, error_callback, data);
+
+ return 1;
+
+ fail:
+ if (dsym != NULL)
+ backtrace_free (state, dsym, dsymlen, error_callback, data);
+ if (diralc != NULL)
+ backtrace_free (state, diralc, dirnamelen, error_callback, data);
+ return 0;
+}
+
+/* Add the backtrace data for a Macho-O file. Returns 1 on success, 0
+ on failure (in both cases descriptor is closed).
+
+ FILENAME: the name of the executable.
+ DESCRIPTOR: an open descriptor for the executable, closed here.
+ OFFSET: the offset within the file of this executable, for fat files.
+ MATCH_UUID: if not NULL, UUID that must match.
+ BASE_ADDRESS: the load address of the executable.
+ SKIP_SYMTAB: if non-zero, ignore the symbol table; used for dSYM files.
+ FILELINE_FN: set to the fileline function, by backtrace_dwarf_add.
+ FOUND_SYM: set to non-zero if we found the symbol table.
+*/
+
+static int
+macho_add (struct backtrace_state *state, const char *filename, int descriptor,
+ off_t offset, const unsigned char *match_uuid,
+ uintptr_t base_address, int skip_symtab,
+ backtrace_error_callback error_callback, void *data,
+ fileline *fileline_fn, int *found_sym)
+{
+ struct backtrace_view header_view;
+ struct macho_header_32 header;
+ off_t hdroffset;
+ int is_64;
+ struct backtrace_view cmds_view;
+ int cmds_view_valid;
+ struct dwarf_sections dwarf_sections;
+ int have_dwarf;
+ unsigned char uuid[MACH_O_UUID_LEN];
+ int have_uuid;
+ size_t cmdoffset;
+ unsigned int i;
+
+ *found_sym = 0;
+
+ cmds_view_valid = 0;
+
+ /* The 32-bit and 64-bit file headers start out the same, so we can
+ just always read the 32-bit version. A fat header is shorter but
+ it will always be followed by data, so it's OK to read extra. */
+
+ if (!backtrace_get_view (state, descriptor, offset,
+ sizeof (struct macho_header_32),
+ error_callback, data, &header_view))
+ goto fail;
+
+ memcpy (&header, header_view.data, sizeof header);
+
+ backtrace_release_view (state, &header_view, error_callback, data);
+
+ switch (header.magic)
+ {
+ case MACH_O_MH_MAGIC_32:
+ is_64 = 0;
+ hdroffset = offset + sizeof (struct macho_header_32);
+ break;
+ case MACH_O_MH_MAGIC_64:
+ is_64 = 1;
+ hdroffset = offset + sizeof (struct macho_header_64);
+ break;
+ case MACH_O_MH_MAGIC_FAT:
+ case MACH_O_MH_MAGIC_FAT_64:
+ {
+ struct macho_header_fat fat_header;
+
+ hdroffset = offset + sizeof (struct macho_header_fat);
+ memcpy (&fat_header, &header, sizeof fat_header);
+ return macho_add_fat (state, filename, descriptor, 0, hdroffset,
+ match_uuid, base_address, skip_symtab,
+ fat_header.nfat_arch,
+ header.magic == MACH_O_MH_MAGIC_FAT_64,
+ error_callback, data, fileline_fn, found_sym);
+ }
+ case MACH_O_MH_CIGAM_FAT:
+ case MACH_O_MH_CIGAM_FAT_64:
+ {
+ struct macho_header_fat fat_header;
+ uint32_t nfat_arch;
+
+ hdroffset = offset + sizeof (struct macho_header_fat);
+ memcpy (&fat_header, &header, sizeof fat_header);
+ nfat_arch = __builtin_bswap32 (fat_header.nfat_arch);
+ return macho_add_fat (state, filename, descriptor, 1, hdroffset,
+ match_uuid, base_address, skip_symtab,
+ nfat_arch,
+ header.magic == MACH_O_MH_CIGAM_FAT_64,
+ error_callback, data, fileline_fn, found_sym);
+ }
+ default:
+ error_callback (data, "executable file is not in Mach-O format", 0);
+ goto fail;
+ }
+
+ switch (header.filetype)
+ {
+ case MACH_O_MH_EXECUTE:
+ case MACH_O_MH_DYLIB:
+ case MACH_O_MH_DSYM:
+ break;
+ default:
+ error_callback (data, "executable file is not an executable", 0);
+ goto fail;
+ }
+
+ if (!backtrace_get_view (state, descriptor, hdroffset, header.sizeofcmds,
+ error_callback, data, &cmds_view))
+ goto fail;
+ cmds_view_valid = 1;
+
+ memset (&dwarf_sections, 0, sizeof dwarf_sections);
+ have_dwarf = 0;
+ memset (&uuid, 0, sizeof uuid);
+ have_uuid = 0;
+
+ cmdoffset = 0;
+ for (i = 0; i < header.ncmds; ++i)
+ {
+ const char *pcmd;
+ struct macho_load_command load_command;
+
+ if (cmdoffset + sizeof load_command > header.sizeofcmds)
+ break;
+
+ pcmd = (const char *) cmds_view.data + cmdoffset;
+ memcpy (&load_command, pcmd, sizeof load_command);
+
+ switch (load_command.cmd)
+ {
+ case MACH_O_LC_SEGMENT:
+ {
+ struct macho_segment_command segcmd;
+
+ memcpy (&segcmd, pcmd, sizeof segcmd);
+ if (memcmp (segcmd.segname,
+ "__DWARF\0\0\0\0\0\0\0\0\0",
+ MACH_O_NAMELEN) == 0)
+ {
+ if (!macho_add_dwarf_segment (state, descriptor, offset,
+ load_command.cmd,
+ pcmd + sizeof segcmd,
+ (load_command.cmdsize
+ - sizeof segcmd),
+ segcmd.nsects, error_callback,
+ data, &dwarf_sections))
+ goto fail;
+ have_dwarf = 1;
+ }
+ }
+ break;
+
+ case MACH_O_LC_SEGMENT_64:
+ {
+ struct macho_segment_64_command segcmd;
+
+ memcpy (&segcmd, pcmd, sizeof segcmd);
+ if (memcmp (segcmd.segname,
+ "__DWARF\0\0\0\0\0\0\0\0\0",
+ MACH_O_NAMELEN) == 0)
+ {
+ if (!macho_add_dwarf_segment (state, descriptor, offset,
+ load_command.cmd,
+ pcmd + sizeof segcmd,
+ (load_command.cmdsize
+ - sizeof segcmd),
+ segcmd.nsects, error_callback,
+ data, &dwarf_sections))
+ goto fail;
+ have_dwarf = 1;
+ }
+ }
+ break;
+
+ case MACH_O_LC_SYMTAB:
+ if (!skip_symtab)
+ {
+ struct macho_symtab_command symcmd;
+
+ memcpy (&symcmd, pcmd, sizeof symcmd);
+ if (!macho_add_symtab (state, descriptor, base_address, is_64,
+ offset + symcmd.symoff, symcmd.nsyms,
+ offset + symcmd.stroff, symcmd.strsize,
+ error_callback, data))
+ goto fail;
+
+ *found_sym = 1;
+ }
+ break;
+
+ case MACH_O_LC_UUID:
+ {
+ struct macho_uuid_command uuidcmd;
+
+ memcpy (&uuidcmd, pcmd, sizeof uuidcmd);
+ memcpy (&uuid[0], &uuidcmd.uuid[0], MACH_O_UUID_LEN);
+ have_uuid = 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ cmdoffset += load_command.cmdsize;
+ }
+
+ if (!backtrace_close (descriptor, error_callback, data))
+ goto fail;
+ descriptor = -1;
+
+ backtrace_release_view (state, &cmds_view, error_callback, data);
+ cmds_view_valid = 0;
+
+ if (match_uuid != NULL)
+ {
+ /* If we don't have a UUID, or it doesn't match, just ignore
+ this file. */
+ if (!have_uuid
+ || memcmp (match_uuid, &uuid[0], MACH_O_UUID_LEN) != 0)
+ return 1;
+ }
+
+ if (have_dwarf)
+ {
+ int is_big_endian;
+
+ is_big_endian = 0;
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ is_big_endian = 1;
+#endif
+#endif
+
+ if (!backtrace_dwarf_add (state, base_address, &dwarf_sections,
+ is_big_endian, NULL, error_callback, data,
+ fileline_fn, NULL))
+ goto fail;
+ }
+
+ if (!have_dwarf && have_uuid)
+ {
+ if (!macho_add_dsym (state, filename, base_address, &uuid[0],
+ error_callback, data, fileline_fn))
+ goto fail;
+ }
+
+ return 1;
+
+ fail:
+ if (cmds_view_valid)
+ backtrace_release_view (state, &cmds_view, error_callback, data);
+ if (descriptor != -1)
+ backtrace_close (descriptor, error_callback, data);
+ return 0;
+}
+
+#ifdef HAVE_MACH_O_DYLD_H
+
+/* Initialize the backtrace data we need from a Mach-O executable
+ using the dyld support functions. This closes descriptor. */
+
+int
+backtrace_initialize (struct backtrace_state *state, const char *filename,
+ int descriptor, backtrace_error_callback error_callback,
+ void *data, fileline *fileline_fn)
+{
+ uint32_t c;
+ uint32_t i;
+ int closed_descriptor;
+ int found_sym;
+ fileline macho_fileline_fn;
+
+ closed_descriptor = 0;
+ found_sym = 0;
+ macho_fileline_fn = macho_nodebug;
+
+ c = _dyld_image_count ();
+ for (i = 0; i < c; ++i)
+ {
+ uintptr_t base_address;
+ const char *name;
+ int d;
+ fileline mff;
+ int mfs;
+
+ name = _dyld_get_image_name (i);
+ if (name == NULL)
+ continue;
+
+ if (strcmp (name, filename) == 0 && !closed_descriptor)
+ {
+ d = descriptor;
+ closed_descriptor = 1;
+ }
+ else
+ {
+ int does_not_exist;
+
+ d = backtrace_open (name, error_callback, data, &does_not_exist);
+ if (d < 0)
+ continue;
+ }
+
+ base_address = _dyld_get_image_vmaddr_slide (i);
+
+ mff = macho_nodebug;
+ if (!macho_add (state, name, d, 0, NULL, base_address, 0,
+ error_callback, data, &mff, &mfs))
+ return 0;
+
+ if (mff != macho_nodebug)
+ macho_fileline_fn = mff;
+ if (mfs)
+ found_sym = 1;
+ }
+
+ if (!closed_descriptor)
+ backtrace_close (descriptor, error_callback, data);
+
+ if (!state->threaded)
+ {
+ if (found_sym)
+ state->syminfo_fn = macho_syminfo;
+ else if (state->syminfo_fn == NULL)
+ state->syminfo_fn = macho_nosyms;
+ }
+ else
+ {
+ if (found_sym)
+ backtrace_atomic_store_pointer (&state->syminfo_fn, macho_syminfo);
+ else
+ (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL,
+ macho_nosyms);
+ }
+
+ if (!state->threaded)
+ *fileline_fn = state->fileline_fn;
+ else
+ *fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn);
+
+ if (*fileline_fn == NULL || *fileline_fn == macho_nodebug)
+ *fileline_fn = macho_fileline_fn;
+
+ return 1;
+}
+
+#else /* !defined (HAVE_MACH_O_DYLD_H) */
+
+/* Initialize the backtrace data we need from a Mach-O executable
+ without using the dyld support functions. This closes
+ descriptor. */
+
+int
+backtrace_initialize (struct backtrace_state *state, const char *filename,
+ int descriptor, backtrace_error_callback error_callback,
+ void *data, fileline *fileline_fn)
+{
+ fileline macho_fileline_fn;
+ int found_sym;
+
+ macho_fileline_fn = macho_nodebug;
+ if (!macho_add (state, filename, descriptor, 0, NULL, 0, 0,
+ error_callback, data, &macho_fileline_fn, &found_sym))
+ return 0;
+
+ if (!state->threaded)
+ {
+ if (found_sym)
+ state->syminfo_fn = macho_syminfo;
+ else if (state->syminfo_fn == NULL)
+ state->syminfo_fn = macho_nosyms;
+ }
+ else
+ {
+ if (found_sym)
+ backtrace_atomic_store_pointer (&state->syminfo_fn, macho_syminfo);
+ else
+ (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL,
+ macho_nosyms);
+ }
+
+ if (!state->threaded)
+ *fileline_fn = state->fileline_fn;
+ else
+ *fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn);
+
+ if (*fileline_fn == NULL || *fileline_fn == macho_nodebug)
+ *fileline_fn = macho_fileline_fn;
+
+ return 1;
+}
+
+#endif /* !defined (HAVE_MACH_O_DYLD_H) */
+
+}
diff --git a/3rdparty/tracy/tracy/libbacktrace/mmapio.cpp b/3rdparty/tracy/tracy/libbacktrace/mmapio.cpp
new file mode 100644
index 0000000..0e8f599
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/mmapio.cpp
@@ -0,0 +1,115 @@
+/* mmapio.c -- File views using mmap.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+#ifndef HAVE_DECL_GETPAGESIZE
+extern int getpagesize (void);
+#endif
+
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void *)-1)
+#endif
+
+namespace tracy
+{
+
+/* This file implements file views and memory allocation when mmap is
+ available. */
+
+/* Create a view of SIZE bytes from DESCRIPTOR at OFFSET. */
+
+int
+backtrace_get_view (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ int descriptor, off_t offset, uint64_t size,
+ backtrace_error_callback error_callback,
+ void *data, struct backtrace_view *view)
+{
+ size_t pagesize;
+ unsigned int inpage;
+ off_t pageoff;
+ void *map;
+
+ if ((uint64_t) (size_t) size != size)
+ {
+ error_callback (data, "file size too large", 0);
+ return 0;
+ }
+
+ pagesize = getpagesize ();
+ inpage = offset % pagesize;
+ pageoff = offset - inpage;
+
+ size += inpage;
+ size = (size + (pagesize - 1)) & ~ (pagesize - 1);
+
+ map = mmap (NULL, size, PROT_READ, MAP_PRIVATE, descriptor, pageoff);
+ if (map == MAP_FAILED)
+ {
+ error_callback (data, "mmap", errno);
+ return 0;
+ }
+
+ view->data = (char *) map + inpage;
+ view->base = map;
+ view->len = size;
+
+ return 1;
+}
+
+/* Release a view read by backtrace_get_view. */
+
+void
+backtrace_release_view (struct backtrace_state *state ATTRIBUTE_UNUSED,
+ struct backtrace_view *view,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ union {
+ const void *cv;
+ void *v;
+ } cc;
+
+ cc.cv = view->base;
+ if (munmap (cc.v, view->len) < 0)
+ error_callback (data, "munmap", errno);
+}
+
+}
diff --git a/3rdparty/tracy/tracy/libbacktrace/posix.cpp b/3rdparty/tracy/tracy/libbacktrace/posix.cpp
new file mode 100644
index 0000000..8233a8e
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/posix.cpp
@@ -0,0 +1,109 @@
+/* posix.c -- POSIX file I/O routines for the backtrace library.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+#ifndef O_BINARY
+#define O_BINARY 0
+#endif
+
+#ifndef O_CLOEXEC
+#define O_CLOEXEC 0
+#endif
+
+#ifndef FD_CLOEXEC
+#define FD_CLOEXEC 1
+#endif
+
+namespace tracy
+{
+
+/* Open a file for reading. */
+
+int
+backtrace_open (const char *filename, backtrace_error_callback error_callback,
+ void *data, int *does_not_exist)
+{
+ int descriptor;
+
+ if (does_not_exist != NULL)
+ *does_not_exist = 0;
+
+ descriptor = open (filename, (int) (O_RDONLY | O_BINARY | O_CLOEXEC));
+ if (descriptor < 0)
+ {
+ /* If DOES_NOT_EXIST is not NULL, then don't call ERROR_CALLBACK
+ if the file does not exist. We treat lacking permission to
+ open the file as the file not existing; this case arises when
+ running the libgo syscall package tests as root. */
+ if (does_not_exist != NULL && (errno == ENOENT || errno == EACCES))
+ *does_not_exist = 1;
+ else
+ error_callback (data, filename, errno);
+ return -1;
+ }
+
+#ifdef HAVE_FCNTL
+ /* Set FD_CLOEXEC just in case the kernel does not support
+ O_CLOEXEC. It doesn't matter if this fails for some reason.
+ FIXME: At some point it should be safe to only do this if
+ O_CLOEXEC == 0. */
+ fcntl (descriptor, F_SETFD, FD_CLOEXEC);
+#endif
+
+ return descriptor;
+}
+
+/* Close DESCRIPTOR. */
+
+int
+backtrace_close (int descriptor, backtrace_error_callback error_callback,
+ void *data)
+{
+ if (close (descriptor) < 0)
+ {
+ error_callback (data, "close", errno);
+ return 0;
+ }
+ return 1;
+}
+
+}
diff --git a/3rdparty/tracy/tracy/libbacktrace/sort.cpp b/3rdparty/tracy/tracy/libbacktrace/sort.cpp
new file mode 100644
index 0000000..6daee0a
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/sort.cpp
@@ -0,0 +1,113 @@
+/* sort.c -- Sort without allocating memory
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <stddef.h>
+#include <sys/types.h>
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+namespace tracy
+{
+
+/* The GNU glibc version of qsort allocates memory, which we must not
+ do if we are invoked by a signal handler. So provide our own
+ sort. */
+
+static void
+swap (char *a, char *b, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++, a++, b++)
+ {
+ char t;
+
+ t = *a;
+ *a = *b;
+ *b = t;
+ }
+}
+
+void
+backtrace_qsort (void *basearg, size_t count, size_t size,
+ int (*compar) (const void *, const void *))
+{
+ char *base = (char *) basearg;
+ size_t i;
+ size_t mid;
+
+ tail_recurse:
+ if (count < 2)
+ return;
+
+ /* The symbol table and DWARF tables, which is all we use this
+ routine for, tend to be roughly sorted. Pick the middle element
+ in the array as our pivot point, so that we are more likely to
+ cut the array in half for each recursion step. */
+ swap (base, base + (count / 2) * size, size);
+
+ mid = 0;
+ for (i = 1; i < count; i++)
+ {
+ if ((*compar) (base, base + i * size) > 0)
+ {
+ ++mid;
+ if (i != mid)
+ swap (base + mid * size, base + i * size, size);
+ }
+ }
+
+ if (mid > 0)
+ swap (base, base + mid * size, size);
+
+ /* Recurse with the smaller array, loop with the larger one. That
+ ensures that our maximum stack depth is log count. */
+ if (2 * mid < count)
+ {
+ backtrace_qsort (base, mid, size, compar);
+ base += (mid + 1) * size;
+ count -= mid + 1;
+ goto tail_recurse;
+ }
+ else
+ {
+ backtrace_qsort (base + (mid + 1) * size, count - (mid + 1),
+ size, compar);
+ count = mid;
+ goto tail_recurse;
+ }
+}
+
+}
diff --git a/3rdparty/tracy/tracy/libbacktrace/state.cpp b/3rdparty/tracy/tracy/libbacktrace/state.cpp
new file mode 100644
index 0000000..ea3c137
--- /dev/null
+++ b/3rdparty/tracy/tracy/libbacktrace/state.cpp
@@ -0,0 +1,76 @@
+/* state.c -- Create the backtrace state.
+ Copyright (C) 2012-2021 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <string.h>
+#include <sys/types.h>
+
+#include "backtrace.hpp"
+#include "internal.hpp"
+
+namespace tracy
+{
+
+/* Create the backtrace state. This will then be passed to all the
+ other routines. */
+
+struct backtrace_state *
+backtrace_create_state (const char *filename, int threaded,
+ backtrace_error_callback error_callback,
+ void *data)
+{
+ struct backtrace_state init_state;
+ struct backtrace_state *state;
+
+#ifndef HAVE_SYNC_FUNCTIONS
+ if (threaded)
+ {
+ error_callback (data, "backtrace library does not support threads", 0);
+ return NULL;
+ }
+#endif
+
+ memset (&init_state, 0, sizeof init_state);
+ init_state.filename = filename;
+ init_state.threaded = threaded;
+
+ state = ((struct backtrace_state *)
+ backtrace_alloc (&init_state, sizeof *state, error_callback, data));
+ if (state == NULL)
+ return NULL;
+ *state = init_state;
+
+ return state;
+}
+
+}