diff options
author | rtk0c <[email protected]> | 2022-06-03 23:26:44 -0700 |
---|---|---|
committer | rtk0c <[email protected]> | 2022-06-03 23:26:44 -0700 |
commit | 60ccc62f4934e44ad5b905fdbcf458302b8d8a09 (patch) | |
tree | 02ec83cc8387abfd08bd5ee7ea4e8115f1bfb8d0 /source/Common | |
parent | c2ef7737536bf1f8c81fcfae95c0183b21c9753f (diff) |
Changeset: 63 [WIP] Rename directories
Diffstat (limited to 'source/Common')
-rw-r--r-- | source/Common/Color.hpp | 148 | ||||
-rw-r--r-- | source/Common/Enum.hpp | 103 | ||||
-rw-r--r-- | source/Common/LookupTable.hpp | 64 | ||||
-rw-r--r-- | source/Common/Macros.hpp | 31 | ||||
-rw-r--r-- | source/Common/PodVector.hpp | 297 | ||||
-rw-r--r-- | source/Common/RTTI.hpp | 44 | ||||
-rw-r--r-- | source/Common/RapidJsonHelper.hpp | 110 | ||||
-rw-r--r-- | source/Common/RcPtr.hpp | 120 | ||||
-rw-r--r-- | source/Common/Rect.hpp | 164 | ||||
-rw-r--r-- | source/Common/ScopeGuard.hpp | 60 | ||||
-rw-r--r-- | source/Common/SmallVector.cpp | 145 | ||||
-rw-r--r-- | source/Common/SmallVector.hpp | 1332 | ||||
-rw-r--r-- | source/Common/StbImplementations.c | 14 | ||||
-rw-r--r-- | source/Common/Type2ObjectMap.hpp | 38 | ||||
-rw-r--r-- | source/Common/TypeTraits.hpp | 27 | ||||
-rw-r--r-- | source/Common/Uid.cpp | 58 | ||||
-rw-r--r-- | source/Common/Uid.hpp | 42 | ||||
-rw-r--r-- | source/Common/Utils.cpp | 107 | ||||
-rw-r--r-- | source/Common/Utils.hpp | 65 | ||||
-rw-r--r-- | source/Common/YCombinator.hpp | 14 | ||||
-rw-r--r-- | source/Common/buildfile | 1 |
21 files changed, 2984 insertions, 0 deletions
diff --git a/source/Common/Color.hpp b/source/Common/Color.hpp new file mode 100644 index 0000000..ef0c5a9 --- /dev/null +++ b/source/Common/Color.hpp @@ -0,0 +1,148 @@ +#pragma once + +#include "Utils.hpp" + +#include <algorithm> +#include <cstdint> +#include <glm/glm.hpp> +#include <limits> + +class HsvColor; +class RgbaColor { +public: + uint8_t r; + uint8_t g; + uint8_t b; + uint8_t a; + +public: + constexpr RgbaColor() noexcept + : r{ 255 } + , g{ 255 } + , b{ 255 } + , a{ 255 } { + } + + constexpr RgbaColor(float r, float g, float b, float a = 1.0f) noexcept + : r{ static_cast<uint8_t>(r * 255.0f) } + , g{ static_cast<uint8_t>(g * 255.0f) } + , b{ static_cast<uint8_t>(b * 255.0f) } + , a{ static_cast<uint8_t>(a * 255.0f) } { + } + + constexpr RgbaColor(int r, int g, int b, int a = 255) noexcept + : r{ static_cast<uint8_t>(r & 0xFF) } + , g{ static_cast<uint8_t>(g & 0xFF) } + , b{ static_cast<uint8_t>(b & 0xFF) } + , a{ static_cast<uint8_t>(a & 0xFF) } { + } + + constexpr RgbaColor(uint32_t rgba) noexcept + : r{ static_cast<uint8_t>((rgba >> 0) & 0xFF) } + , g{ static_cast<uint8_t>((rgba >> 8) & 0xFF) } + , b{ static_cast<uint8_t>((rgba >> 16) & 0xFF) } + , a{ static_cast<uint8_t>((rgba >> 24) & 0xFF) } { + } + + constexpr uint32_t GetScalar() const noexcept { + uint32_t res = 0; + res |= r << 0; + res |= g << 8; + res |= b << 16; + res |= a << 24; + return res; + } + + constexpr void SetScalar(uint32_t scalar) noexcept { + r = (scalar >> 0) & 0xFF; + g = (scalar >> 8) & 0xFF; + b = (scalar >> 16) & 0xFF; + a = (scalar >> 24) & 0xFF; + } + + constexpr float GetNormalizedRed() const noexcept { + return r / 255.0f; + } + + constexpr float GetNormalizedGreen() const noexcept { + return g / 255.0f; + } + + constexpr float GetNormalizedBlue() const noexcept { + return b / 255.0f; + } + + constexpr float GetNormalizedAlpha() const noexcept { + return a / 255.0f; + } + + constexpr glm::ivec4 ToIVec() const noexcept { + return { r, g, b, a }; + } + + constexpr glm::vec4 ToVec() const noexcept { + return { GetNormalizedRed(), GetNormalizedGreen(), GetNormalizedBlue(), GetNormalizedAlpha() }; + } + + // Forward declaring because cyclic reference between RgbaColor and HsvColor + constexpr HsvColor ToHsv() const noexcept; + + friend constexpr bool operator==(const RgbaColor&, const RgbaColor&) noexcept = default; +}; + +constexpr RgbaColor kXAxisColor(0xFF, 0x80, 0x80, 0xFF); +constexpr RgbaColor kYAxisColor(0x80, 0xFF, 0x80, 0xFF); +constexpr RgbaColor kZAxisColor(0x80, 0x80, 0xFF, 0xFF); + +class HsvColor { +public: + float h; + float s; + float v; + float a; + +public: + constexpr HsvColor() noexcept + : h{ 0.0f } + , s{ 0.0f } + , v{ 1.0f } + , a{ 1.0f } { + } + + constexpr HsvColor(float h, float s, float v, float a) noexcept + : h{ h } + , s{ s } + , v{ v } + , a{ a } { + } + + // Forward declaring because cyclic reference between RgbaColor and HsvColor + constexpr RgbaColor ToRgba() const noexcept; +}; + +constexpr HsvColor RgbaColor::ToHsv() const noexcept { + float r = GetNormalizedRed(); + float g = GetNormalizedBlue(); + float b = GetNormalizedGreen(); + float a = GetNormalizedAlpha(); + + auto p = g < b ? glm::vec4(b, g, -1, 2.0f / 3.0f) : glm::vec4(g, b, 0, -1.0f / 3.0f); + auto q = r < p.x ? glm::vec4(p.x, p.y, p.w, r) : glm::vec4(r, p.y, p.z, p.x); + float c = q.x - std::min(q.w, q.y); + float h = Utils::Abs((q.w - q.y) / (6 * c + std::numeric_limits<float>::epsilon()) + q.z); + + glm::vec3 hcv{ h, c, q.x }; + float s = hcv.y / (hcv.z + std::numeric_limits<float>::epsilon()); + return HsvColor(hcv.x, s, hcv.z, a); +} + +constexpr RgbaColor HsvColor::ToRgba() const noexcept { + float r = Utils::Abs(h * 6 - 3) - 1; + float g = 2 - Utils::Abs(h * 6 - 2); + float b = 2 - Utils::Abs(h * 6 - 4); + + auto rgb = glm::vec3{ std::clamp(r, 0.0f, 1.0f), std::clamp(g, 0.0f, 1.0f), std::clamp(b, 0.0f, 1.0f) }; + auto vc = (rgb - glm::vec3{ 0, 0, 0 }) * s + glm::vec3{ 1, 1, 1 } * v; + + return RgbaColor(vc.x, vc.y, vc.z, a); +} diff --git a/source/Common/Enum.hpp b/source/Common/Enum.hpp new file mode 100644 index 0000000..8ad75ba --- /dev/null +++ b/source/Common/Enum.hpp @@ -0,0 +1,103 @@ +#pragma once + +#include <initializer_list> +#include <type_traits> + +template <class TEnum> +class EnumFlags { +public: + using Enum = TEnum; + using Underlying = std::underlying_type_t<TEnum>; + +private: + Underlying mValue; + +public: + EnumFlags() + : mValue{ 0 } { + } + + EnumFlags(TEnum e) + : mValue{ static_cast<Underlying>(1) << static_cast<Underlying>(e) } { + } + + bool IsSet(EnumFlags mask) const { + return (mValue & mask.mValue) == mask.mValue; + } + + bool IsSet(std::initializer_list<TEnum> enums) { + EnumFlags flags; + for (auto& e : enums) { + flags.mValue |= static_cast<Underlying>(e); + } + return IsSet(flags); + } + + bool IsSetExclusive(EnumFlags mask) const { + return mValue == mask.mValue; + } + + bool IsSetExclusive(std::initializer_list<TEnum> enums) { + EnumFlags flags; + for (auto& e : enums) { + flags.mValue |= static_cast<Underlying>(e); + } + return IsSetExclusive(flags); + } + + void SetOn(EnumFlags mask) { + mValue |= mask.mValue; + } + + void SetOff(EnumFlags mask) { + mValue &= ~mask.mValue; + } + + void Set(EnumFlags mask, bool enabled) { + if (enabled) { + SetOn(mask); + } else { + SetOff(mask); + } + } + + EnumFlags& operator|=(EnumFlags that) { + mValue |= that.mValue; + return *this; + } + + EnumFlags& operator&=(EnumFlags that) { + mValue &= that.mValue; + return *this; + } + + EnumFlags& operator^=(EnumFlags that) { + mValue ^= that.mValue; + return *this; + } + + EnumFlags& operator|=(TEnum e) { + mValue |= 1 << static_cast<Underlying>(e); + return *this; + } + + EnumFlags& operator&=(TEnum e) { + mValue &= 1 << static_cast<Underlying>(e); + return *this; + } + + EnumFlags& operator^=(TEnum e) { + mValue ^= 1 << static_cast<Underlying>(e); + return *this; + } + + EnumFlags operator|(EnumFlags that) const { return EnumFlags(mValue | that.mValue); } + EnumFlags operator&(EnumFlags that) const { return EnumFlags(mValue & that.mValue); } + EnumFlags operator^(EnumFlags that) const { return EnumFlags(mValue ^ that.mValue); } + + EnumFlags operator|(TEnum e) const { return EnumFlags(mValue | 1 << static_cast<Underlying>(e)); } + EnumFlags operator&(TEnum e) const { return EnumFlags(mValue & 1 << static_cast<Underlying>(e)); } + EnumFlags operator^(TEnum e) const { return EnumFlags(mValue ^ 1 << static_cast<Underlying>(e)); } + + EnumFlags operator~() const { return EnumFlags(~mValue); } +}; diff --git a/source/Common/LookupTable.hpp b/source/Common/LookupTable.hpp new file mode 100644 index 0000000..54548f2 --- /dev/null +++ b/source/Common/LookupTable.hpp @@ -0,0 +1,64 @@ +#pragma once + +#include <robin_hood.h> +#include <string_view> + +// BIDI stands for bi-directional +#define BIDI_LUT_DECL(name, aType, aCount, bType, bCount) \ + int gLutBidi_##name##_A2B[aCount]; \ + int gLutBidi_##name##_B2A[bCount]; \ + using name##AType = aType; \ + using name##BType = bType; \ + void InitializeLutBidi##name() +#define BIDI_LUT_MAP_FOR(name) \ + int* lutMappingA2B = gLutBidi_##name##_A2B; \ + int* lutMappingB2A = gLutBidi_##name##_B2A +#define BIDI_LUT_MAP(from, to) \ + lutMappingA2B[from] = to; \ + lutMappingB2A[to] = from +#define BIDI_LUT_INIT(name) InitializeLutBidi##name() +#define BIDI_LUT_A2B_LOOKUP(name, from) (name##BType)(gLutBidi_##name##_A2B[from]) +#define BIDI_LUT_B2A_LOOKUP(name, to) (name##AType)(gLutBidi_##name##_B2A[to]) + +// Forward string lookup +#define FSTR_LUT_DECL(name, enumMinValue, enumMaxValue) \ + constexpr int kLutFwMinVal_##name = enumMinValue; \ + const char* gLutFw_##name[(int)enumMaxValue - (int)enumMinValue]; \ + void InitializeLutFw##name() +#define FSTR_LUT_MAP_FOR(name) \ + const char** lutMapping = gLutFw_##name; \ + int lutMappingMinValue = kLutFwMinVal_##name +#define FSTR_LUT_MAP(value, text) lutMapping[value - lutMappingMinValue] = text +#define FSTR_LUT_MAP_ENUM(enumValue) FSTR_LUT_MAP(enumValue, #enumValue) +#define FSTR_LUT_LOOKUP(name, enumValue) gLutFw_##name[enumValue - kLutFwMinVal_##name] +#define FSTR_LUT_INIT(name) InitializeLutFw##name() + +// RSTR stands for reverse-string lookup +#define RSTR_LUT_DECL(name, enumMinValue, enumMaxValue) \ + robin_hood::unordered_flat_map<std::string_view, decltype(enumMaxValue)> gLutRv_##name; \ + void InitializeLutRv##name() +#define RSTR_LUT_MAP_FOR(name) auto& lutMapping = gLutRv_##name; +#define RSTR_LUT_MAP(value, text) lutMapping.insert_or_assign(std::string_view(text), value); +#define RSTR_LUT(name) gLutRv_##name +#define BSTR_LUT_LOOKUP(name, string) gLutRv_##name.find(std::string_view(text))->second +#define RSTR_LUT_INIT(name) InitializeLutRv##name() + +// BSTR stands for bi-directional string lookup +#define BSTR_LUT_DECL(name, enumMinValue, enumMaxValue) \ + constexpr int kLutBstrMinVal_##name = enumMinValue; \ + const char* gLutBstr_##name##_V2S[(int)enumMaxValue - (int)enumMinValue]; \ + robin_hood::unordered_flat_map<std::string_view, decltype(enumMaxValue)> gLutBstr_##name##_S2V; \ + void InitializeLutBstr##name() +#define BSTR_LUT_MAP_FOR(name) \ + const char** lutMappingV2S = gLutBstr_##name##_V2S; \ + auto& lutMappingS2V = gLutBstr_##name##_S2V; \ + int lutMappingMinValue = kLutBstrMinVal_##name +#define BSTR_LUT_MAP(value, text) \ + lutMappingV2S[value - lutMappingMinValue] = text; \ + lutMappingS2V.insert_or_assign(std::string_view(text), value); +#define BSTR_LUT_MAP_ENUM(enumValue) BSTR_LUT_MAP(enumValue, #enumValue) +#define BSTR_LUT_V2S(name) gLutBstr_##name##_V2S +#define BSTR_LUT_S2V(name) gLutBstr_##name##_S2V +#define BSTR_LUT_V2S_LOOKUP(name, enumValue) gLutBstr_##name##_V2S[enumValue - kLutBstrMinVal_##name] +#define BSTR_LUT_S2V_LOOKUP(name, string) gLutBstr_##name##_S2V.find(std::string_view(text))->second +#define BSTR_LUT_INIT(name) InitializeLutBstr##name() diff --git a/source/Common/Macros.hpp b/source/Common/Macros.hpp new file mode 100644 index 0000000..a255ada --- /dev/null +++ b/source/Common/Macros.hpp @@ -0,0 +1,31 @@ +#pragma once + +#define STRINGIFY_IMPL(text) #text +#define STRINGIFY(text) STRINGIFY_IMPL(text) + +#define CONCAT_IMPL(a, b) a##b +#define CONCAT(a, b) CONCAT_IMPL(a, b) +#define CONCAT_3(a, b, c) CONCAT(a, CONCAT(b, c)) +#define CONCAT_4(a, b, c, d) CONCAT(CONCAT(a, b), CONCAT(c, d)) + +#define UNIQUE_NAME(prefix) CONCAT(prefix, __COUNTER__) +#define UNIQUE_NAME_LINE(prefix) CONCAT(prefix, __LINE__) +#define DISCARD UNIQUE_NAME(_discard) + +#define UNUSED(x) (void)x; + +#define PRINTF_STRING_VIEW(s) (int)s.size(), s.data() + +#if defined(_MSC_VER) +# define UNREACHABLE __assume(0) +#elif defined(__GNUC__) || defined(__clang__) +# define UNREACHABLE __builtin_unreachable() +#else +# define UNREACHABLE +#endif + +#if _WIN32 +# define PLATFORM_PATH_STR "%ls" +#else +# define PLATFORM_PATH_STR "%s" +#endif diff --git a/source/Common/PodVector.hpp b/source/Common/PodVector.hpp new file mode 100644 index 0000000..74e99d6 --- /dev/null +++ b/source/Common/PodVector.hpp @@ -0,0 +1,297 @@ +// File adapted from dear-imgui's ImVector, implemented in https://github.com/ocornut/imgUI/blob/master/imgui.h +#pragma once + +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <cstdlib> +#include <cstring> +#include <span> + +template <class T> +class PodVector { +public: + using value_type = T; + using iterator = value_type*; + using const_iterator = const value_type*; + +private: + int mSize; + int mCapacity; + T* mData; + +public: + PodVector() { + mSize = mCapacity = 0; + mData = nullptr; + } + + PodVector(const PodVector<T>& src) { + mSize = mCapacity = 0; + mData = nullptr; + operator=(src); + } + + PodVector<T>& operator=(const PodVector<T>& src) { + clear(); + resize(src.mSize); + std::memcpy(mData, src.mData, (size_t)mSize * sizeof(T)); + return *this; + } + + PodVector(PodVector&& src) { + mSize = src.mSize; + mCapacity = src.mCapacity; + mData = src.mData; + + src.mSize = src.mCapacity = 0; + src.mData = nullptr; + } + + PodVector& operator=(PodVector&& src) { + if (this != &src) { + std::free(mData); + + mSize = src.mSize; + mCapacity = src.mCapacity; + mData = src.mData; + + src.mSize = src.mCapacity = 0; + src.mData = nullptr; + } + return *this; + } + + ~PodVector() { + std::free(mData); + } + + bool empty() const { return mSize == 0; } + int size() const { return mSize; } + int size_in_bytes() const { return mSize * (int)sizeof(T); } + int max_size() const { return 0x7FFFFFFF / (int)sizeof(T); } + int capacity() const { return mCapacity; } + + T& operator[](int i) { + assert(i >= 0 && i < mSize); + return mData[i]; + } + + const T& operator[](int i) const { + assert(i >= 0 && i < mSize); + return mData[i]; + } + + void clear() { + if (mData) { + mSize = mCapacity = 0; + std::free(mData); + mData = nullptr; + } + } + + T* begin() { return mData; } + const T* begin() const { return mData; } + T* end() { return mData + mSize; } + const T* end() const { return mData + mSize; } + + T* data() { return mData; } + + T& front() { + assert(mSize > 0); + return mData[0]; + } + + const T& front() const { + assert(mSize > 0); + return mData[0]; + } + + T& back() { + assert(mSize > 0); + return mData[mSize - 1]; + } + + const T& back() const { + assert(mSize > 0); + return mData[mSize - 1]; + } + + void swap(PodVector<T>& rhs) { + int rhs_size = rhs.mSize; + rhs.mSize = mSize; + mSize = rhs_size; + int rhs_cap = rhs.mCapacity; + rhs.mCapacity = mCapacity; + mCapacity = rhs_cap; + T* rhs_mDataTmp = rhs.mData; + rhs.mData = mData; + mData = rhs_mDataTmp; + } + + int grow_capacity(int sz) const { + int newCapacity = mCapacity ? (mCapacity + mCapacity / 2) : 8; + return newCapacity > sz ? newCapacity : sz; + } + + void resize(int new_size) { + if (new_size > mCapacity) reserve(grow_capacity(new_size)); + mSize = new_size; + } + + void resize_more(int size) { + resize(mSize + size); + } + + void resize(int new_size, const T& v) { + if (new_size > mCapacity) reserve(grow_capacity(new_size)); + if (new_size > mSize) { + for (int n = mSize; n < new_size; n++) { + std::memcpy(&mData[n], &v, sizeof(v)); + } + } + mSize = new_size; + } + + void resize_more(int size, const T& v) { + resize(mSize + size, v); + } + + void shrink(int new_size) { + assert(new_size <= mSize); + mSize = new_size; + } + + /// Resize a vector to a smaller mSize, guaranteed not to cause a reallocation + void reserve(int newCapacity) { + if (newCapacity <= mCapacity) return; + auto tmp = (T*)std::malloc((size_t)newCapacity * sizeof(T)); + if (mData) { + std::memcpy(tmp, mData, (size_t)mSize * sizeof(T)); + std::free(mData); + } + mData = tmp; + mCapacity = newCapacity; + } + + void reserve_more(int size) { + reserve(mSize + size); + } + + /// NB: It is illegal to call push_back/push_front/insert with a reference pointing inside the PodVector data itself! e.g. v.push_back(v[10]) is forbidden. + void push_back(const T& v) { + if (mSize == mCapacity) reserve(grow_capacity(mSize + 1)); + std::memcpy(&mData[mSize], &v, sizeof(v)); + mSize++; + } + + void pop_back() { + assert(mSize > 0); + mSize--; + } + + void push_front(const T& v) { + if (mSize == 0) { + push_back(v); + } else { + insert(mData, v); + } + } + + T* erase(const T* it) { + assert(it >= mData && it < mData + mSize); + const ptrdiff_t off = it - mData; + std::memmove(mData + off, mData + off + 1, ((size_t)mSize - (size_t)off - 1) * sizeof(T)); + mSize--; + return mData + off; + } + + T* erase(const T* it, const T* it_last) { + assert(it >= mData && it < mData + mSize && it_last > it && it_last <= mData + mSize); + const ptrdiff_t count = it_last - it; + const ptrdiff_t off = it - mData; + std::memmove(mData + off, mData + off + count, ((size_t)mSize - (size_t)off - count) * sizeof(T)); + mSize -= (int)count; + return mData + off; + } + + T* erase_unsorted(const T* it) { + assert(it >= mData && it < mData + mSize); + const ptrdiff_t off = it - mData; + if (it < mData + mSize - 1) std::memcpy(mData + off, mData + mSize - 1, sizeof(T)); + mSize--; + return mData + off; + } + + T* insert(const T* it, const T& v) { + assert(it >= mData && it <= mData + mSize); + const ptrdiff_t off = it - mData; + if (mSize == mCapacity) reserve(grow_capacity(mSize + 1)); + if (off < (int)mSize) std::memmove(mData + off + 1, mData + off, ((size_t)mSize - (size_t)off) * sizeof(T)); + std::memcpy(&mData[off], &v, sizeof(v)); + mSize++; + return mData + off; + } + + bool contains(const T& v) const { + const T* data = mData; + const T* dataEnd = mData + mSize; + while (data < dataEnd) { + if (*data++ == v) return true; + } + return false; + } + + T* find(const T& v) { + T* data = mData; + const T* dataEnd = mData + mSize; + while (data < dataEnd) + if (*data == v) + break; + else + ++data; + return data; + } + + const T* find(const T& v) const { + const T* data = mData; + const T* dataEnd = mData + mSize; + while (data < dataEnd) + if (*data == v) + break; + else + ++data; + return data; + } + + bool find_erase(const T& v) { + const T* it = find(v); + if (it < mData + mSize) { + erase(it); + return true; + } + return false; + } + + bool find_erase_unsorted(const T& v) { + const T* it = find(v); + if (it < mData + mSize) { + erase_unsorted(it); + return true; + } + return false; + } + + int index_from_ptr(const T* it) const { + assert(it >= mData && it < mData + mSize); + const ptrdiff_t off = it - mData; + return (int)off; + } + + // Custom utility functions + + std::span<T> as_span() { return { mData, (size_t)mSize }; } + std::span<uint8_t> as_data_span() { return { (uint8_t*)mData, (size_t)mSize * sizeof(T) }; } + std::span<const T> as_span() const { return { mData, (size_t)mSize }; } + std::span<const uint8_t> as_data_span() const { return { (uint8_t*)mData, (size_t)mSize * sizeof(T) }; } +}; diff --git a/source/Common/RTTI.hpp b/source/Common/RTTI.hpp new file mode 100644 index 0000000..bc0d289 --- /dev/null +++ b/source/Common/RTTI.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include <cassert> + +template <class T, class TBase> +bool is_a(TBase* t) { + assert(t != nullptr); + return T::IsInstance(t); +} + +template <class T, class TBase> +bool is_a_nullable(TBase* t) { + if (t) { + return is_a<T, TBase>(t); + } else { + return false; + } +} + +template <class T, class TBase> +T* dyn_cast(TBase* t) { + assert(t != nullptr); + if (T::IsInstance(t)) { + return static_cast<T*>(t); + } else { + return nullptr; + } +} + +template <class T, class TBase> +const T* dyn_cast(const TBase* t) { + assert(t != nullptr); + if (T::IsInstance(t)) { + return static_cast<const T*>(t); + } else { + return nullptr; + } +} + +template <class T, class TBase> +T* dyn_cast_nullable(TBase* t) { + if (!t) return nullptr; + return dyn_cast<T, TBase>(t); +} diff --git a/source/Common/RapidJsonHelper.hpp b/source/Common/RapidJsonHelper.hpp new file mode 100644 index 0000000..75cd93a --- /dev/null +++ b/source/Common/RapidJsonHelper.hpp @@ -0,0 +1,110 @@ +#pragma once + +#include <rapidjson/document.h> +#include <cstring> +#include <string> +#include <string_view> + +#define BRUSSEL_JSON_GET(object, name, type, out, failAction) \ + { \ + auto it = (object).FindMember(name); \ + if (it == (object).MemberEnd()) failAction; \ + auto& value = it->value; \ + if (!value.Is<type>()) failAction; \ + (out) = value.Get<type>(); \ + } + +#define BRUSSEL_JSON_GET_DEFAULT(object, name, type, out, theDefault) \ + do { \ + auto it = (object).FindMember(name); \ + if (it == (object).MemberEnd()) { \ + (out) = theDefault; \ + break; \ + } \ + auto& value = it->value; \ + if (!value.Is<type>()) { \ + (out) = theDefault; \ + break; \ + } \ + (out) = value.Get<type>(); \ + } while (0); + +namespace rapidjson { + +inline const Value* GetProperty(const Value& value, std::string_view name) { + for (auto it = value.MemberBegin(); it != value.MemberEnd(); ++it) { + if (it->name.GetStringLength() != name.size()) continue; + if (std::memcmp(it->name.GetString(), name.data(), name.size())) continue; + + return &it->value; + } + return nullptr; +} + +inline const Value* GetProperty(const Value& value, Type type, std::string_view name) { + for (auto it = value.MemberBegin(); it != value.MemberEnd(); ++it) { + if (it->name.GetStringLength() != name.size()) continue; + if (it->value.GetType() != type) continue; + if (std::memcmp(it->name.GetString(), name.data(), name.size())) continue; + + return &it->value; + } + return nullptr; +} + +inline std::string_view AsStringView(const Value& value) { + return std::string_view(value.GetString(), value.GetStringLength()); +} + +inline std::string_view AsStringView(const GenericStringRef<char>& strRef) { + return std::string_view(strRef.s, strRef.length); +} + +inline std::string AsString(const Value& value) { + return std::string(value.GetString(), value.GetStringLength()); +} + +inline std::string AsString(const GenericStringRef<char>& strRef) { + return std::string(strRef.s, strRef.length); +} + +// RapidJson itself already provides std::string and const char* overloads +inline GenericStringRef<char> StringRef(std::string_view str) { + return GenericStringRef<char>( + str.data() ? str.data() : "", + str.size()); +} + +template <class TIter, class TSentienl> +rapidjson::Value WriteVectorPrimitives(rapidjson::Document& root, TIter begin, TSentienl end) { + using TElement = typename TIter::value_type; + + rapidjson::Value list; + while (begin != end) { + if constexpr (std::is_same_v<TElement, std::string>) { + auto& elm = *begin; + list.PushBack(rapidjson::Value(elm.c_str(), elm.size()), root.GetAllocator()); + } else { + list.PushBack(*begin, root.GetAllocator()); + } + ++begin; + } + return list; +} + +template <class TContainer> +bool ReadVectorPrimitives(const rapidjson::Value& value, TContainer& list) { + using TElement = typename TContainer::value_type; + + if (!value.IsArray()) return false; + + list.reserve(value.Size()); + for (auto& elm : value.GetArray()) { + if (!elm.Is<TElement>()) return {}; + list.push_back(elm.Get<TElement>()); + } + + return true; +} + +} // namespace rapidjson diff --git a/source/Common/RcPtr.hpp b/source/Common/RcPtr.hpp new file mode 100644 index 0000000..130b2b2 --- /dev/null +++ b/source/Common/RcPtr.hpp @@ -0,0 +1,120 @@ +#pragma once + +#include "Macros.hpp" +#include "TypeTraits.hpp" + +#include <cstddef> +#include <cstdint> +#include <optional> +#include <type_traits> + +class RefCounted { +public: + // DO NOT MODIFY this field, unless explicitly documented the use + uint32_t refCount = 0; + uint32_t weakCount = 0; // TODO implement +}; + +template <class T, class TDeleter = DefaultDeleter<T>> +class RcPtr : TDeleter { +private: + static_assert(std::is_base_of_v<RefCounted, T>); + T* mPtr; + +public: + RcPtr() + : mPtr{ nullptr } { + } + + explicit RcPtr(T* ptr) + : mPtr{ ptr } { + if (ptr) { + ++ptr->RefCounted::refCount; + } + } + + ~RcPtr() { + CleanUp(); + } + + void Attach(T* ptr) { + CleanUp(); + mPtr = ptr; + if (ptr) { + ++ptr->RefCounted::refCount; + } + } + + void Detatch() { + CleanUp(); + mPtr = nullptr; + } + + RcPtr(const RcPtr& that) + : mPtr{ that.mPtr } { + if (mPtr) { + ++mPtr->RefCounted::refCount; + } + } + + RcPtr& operator=(const RcPtr& that) { + CleanUp(); + mPtr = that.mPtr; + if (mPtr) { + ++mPtr->RefCounted::refCount; + } + return *this; + } + + RcPtr(RcPtr&& that) + : mPtr{ that.mPtr } { + that.mPtr = nullptr; + } + + RcPtr& operator=(RcPtr&& that) { + CleanUp(); + mPtr = that.mPtr; + that.mPtr = nullptr; + return *this; + } + + template <class TBase> + requires std::is_base_of_v<TBase, T> + operator RcPtr<TBase>() const { + return RcPtr<TBase>(mPtr); + } + + bool operator==(std::nullptr_t ptr) const { + return mPtr == nullptr; + } + + bool operator==(const T* ptr) const { + return mPtr == ptr; + } + + bool operator==(T* ptr) const { + return mPtr == ptr; + } + + template <class TThat> + bool operator==(const RcPtr<TThat>& ptr) const { + return mPtr == ptr.Get(); + } + + T* Get() const { + return mPtr; + } + + T& operator*() const { return *mPtr; } + T* operator->() const { return mPtr; } + +private: + void CleanUp() { + if (mPtr) { + --mPtr->RefCounted::refCount; + if (mPtr->RefCounted::refCount == 0) { + TDeleter::operator()(mPtr); + } + } + } +}; diff --git a/source/Common/Rect.hpp b/source/Common/Rect.hpp new file mode 100644 index 0000000..89d9b01 --- /dev/null +++ b/source/Common/Rect.hpp @@ -0,0 +1,164 @@ +#pragma once + +#include <glm/glm.hpp> + +/// Rect is a rectangle representation based on a point and a dimensions, in television coordinate space +/// (x increases from left to right, y increases from top to bottom). +template <class T> +class Rect { +public: + using ScalarType = T; + using VectorType = glm::vec<2, T>; + +public: + T x; + T y; + T width; + T height; + +public: + Rect() + : x{ 0 }, y{ 0 }, width{ 0 }, height{ 0 } { + } + + Rect(T x, T y, T width, T height) + : x{ x }, y{ y }, width{ width }, height{ height } { + } + + Rect(VectorType pos, VectorType size) + : x{ pos.x } + , y{ pos.y } + , width{ size.x } + , height{ size.y } { + } + + T x0() const { return x; } + T y0() const { return y; } + T x1() const { return x + width; } + T y1() const { return y + height; } + + VectorType TopLeft() const { + return VectorType{ x, y }; + } + + VectorType TopRight() const { + return VectorType{ x + width, y }; + } + + VectorType BottomLeft() const { + return VectorType{ x, y + height }; + } + + VectorType BottomRight() const { + return VectorType{ x + width, y + height }; + } + + VectorType Center() const { + return TopLeft() + VectorType{ width / 2, height / 2 }; + } + + VectorType Dimensions() const { + return VectorType{ width, height }; + } + + VectorType Extents() const { + return VectorType{ width / 2, height / 2 }; + } + + /// Assumes `bySize * 2` is smaller than both `width` and `height` (does not produce a negative-dimension rectangle). + Rect Shrink(T bySize) const { + T two = bySize * 2; + return Rect{ x + bySize, y + bySize, width - two, height - two }; + } + + Rect Shrink(T left, T top, T right, T bottom) const { + return Rect{ + x + left, + y + top, + width - left - right, + height - top - bottom, + }; + } + + Rect Expand(T bySize) const { + T two = bySize * 2; + return Rect{ x - bySize, y - bySize, width + two, height + two }; + } + + Rect Expand(T left, T top, T right, T bottom) const { + return Rect{ + x - left, + y - top, + width + left + right, + height + top + bottom, + }; + } + + bool Contains(VectorType point) const { + return point.x >= x && + point.y >= y && + point.x < x + width && + point.y < y + height; + } + + bool Intersects(const Rect& that) const { + bool xBetween = x > that.x0() && x < that.x1(); + bool yBetween = y > that.y0() && y < that.y1(); + return xBetween && yBetween; + } + + // Write min()/max() tenary by hand so that we don't have to include <algorithm> + // This file is practically going to be included in every file in this project + + static Rect Intersection(const Rect& a, const Rect& b) { + auto x0 = a.x0() > b.x0() ? a.x0() : b.x0(); // Max + auto y0 = a.y0() > b.y0() ? a.y0() : b.y0(); // Max + auto x1 = a.x1() < b.x1() ? a.x1() : b.x1(); // Min + auto y1 = a.y1() < b.y1() ? a.y1() : b.y1(); // Min + auto width = x1 - x0; + auto height = y1 - x0; + return Rect{ x0, y0, width, height }; + } + + static Rect Union(const Rect& a, const Rect& b) { + auto x0 = a.x0() < b.x0() ? a.x0() : b.x0(); // Min + auto y0 = a.y0() < b.y0() ? a.y0() : b.y0(); // Min + auto x1 = a.x1() > b.x1() ? a.x1() : b.x1(); // Max + auto y1 = a.y1() > b.y1() ? a.y1() : b.y1(); // Max + auto width = x1 - x0; + auto height = y1 - x0; + return Rect{ x0, y0, width, height }; + } + + friend bool operator==(const Rect<T>&, const Rect<T>&) = default; + + Rect operator+(glm::vec<2, T> offset) const { + return { x + offset.x, y + offset.y, width, height }; + } + + Rect operator-(glm::vec<2, T> offset) const { + return { x - offset.x, y - offset.y, width, height }; + } + + Rect& operator+=(glm::vec<2, T> offset) { + x += offset.x; + y += offset.y; + return *this; + } + + Rect& operator-=(glm::vec<2, T> offset) { + x -= offset.x; + y -= offset.y; + return *this; + } + + template <class TTarget> + Rect<TTarget> Cast() const { + return { + static_cast<TTarget>(x), + static_cast<TTarget>(y), + static_cast<TTarget>(width), + static_cast<TTarget>(height), + }; + } +}; diff --git a/source/Common/ScopeGuard.hpp b/source/Common/ScopeGuard.hpp new file mode 100644 index 0000000..28f3385 --- /dev/null +++ b/source/Common/ScopeGuard.hpp @@ -0,0 +1,60 @@ +#pragma once + +#include "Macros.hpp" + +#include <utility> + +template <class TCleanupFunc> +class ScopeGuard { +private: + TCleanupFunc mFunc; + bool mDismissed = false; + +public: + /// Specifically left this implicit so that constructs like + /// \code + /// ScopeGuard sg = [&]() { res.Cleanup(); }; + /// \endcode + /// would work. It is highly discourage and unlikely that one would want to use ScopeGuard as a function + /// parameter, so the normal argument that implicit conversion are harmful doesn't really apply here. + // Deliberately not explicit to allow usages like: ScopeGuard var = lambda; + ScopeGuard(TCleanupFunc&& function) noexcept + : mFunc{ std::move(function) } { + } + + ~ScopeGuard() noexcept { + if (!mDismissed) { + mFunc(); + } + } + + ScopeGuard(const ScopeGuard&) = delete; + ScopeGuard& operator=(const ScopeGuard&) = delete; + + ScopeGuard(ScopeGuard&& that) noexcept + : mFunc{ std::move(that.mFunc) } { + that.Cancel(); + } + + ScopeGuard& operator=(ScopeGuard&& that) noexcept { + if (!mDismissed) { + mFunc(); + } + this->mFunc = std::move(that.mFunc); + this->cancelled = std::exchange(that.cancelled, true); + } + + void Dismiss() noexcept { + mDismissed = true; + } +}; + +template <class T> +auto GuardDeletion(T* ptr) { + return ScopeGuard([ptr]() { + delete ptr; + }); +} + +#define SCOPE_GUARD(name) ScopeGuard name = [&]() +#define DEFER ScopeGuard UNIQUE_NAME(scopeGuard) = [&]() diff --git a/source/Common/SmallVector.cpp b/source/Common/SmallVector.cpp new file mode 100644 index 0000000..c38e8a7 --- /dev/null +++ b/source/Common/SmallVector.cpp @@ -0,0 +1,145 @@ +// Obtained from https://github.com/llvm/llvm-project/blob/main/llvm/lib/Support/SmallVector.cpp +// commit 4b82bb6d82f65f98f23d0e4c2cd5297dc162864c +// adapted in code style and utilities to fix this project + +//===- llvm/ADT/SmallVector.cpp - 'Normally small' vectors ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the SmallVector class. +// +//===----------------------------------------------------------------------===// + +#include "SmallVector.hpp" + +#include <cstdlib> +#include <stdexcept> +#include <string> + +// Check that no bytes are wasted and everything is well-aligned. +namespace { +// These structures may cause binary compat warnings on AIX. Suppress the +// warning since we are only using these types for the static assertions below. +#if defined(_AIX) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Waix-compat" +#endif +struct Struct16B { + alignas(16) void* X; +}; +struct Struct32B { + alignas(32) void* X; +}; +#if defined(_AIX) +# pragma GCC diagnostic pop +#endif +} // namespace +static_assert(sizeof(SmallVector<void*, 0>) == + sizeof(unsigned) * 2 + sizeof(void*), + "wasted space in SmallVector size 0"); +static_assert(alignof(SmallVector<Struct16B, 0>) >= alignof(Struct16B), + "wrong alignment for 16-byte aligned T"); +static_assert(alignof(SmallVector<Struct32B, 0>) >= alignof(Struct32B), + "wrong alignment for 32-byte aligned T"); +static_assert(sizeof(SmallVector<Struct16B, 0>) >= alignof(Struct16B), + "missing padding for 16-byte aligned T"); +static_assert(sizeof(SmallVector<Struct32B, 0>) >= alignof(Struct32B), + "missing padding for 32-byte aligned T"); +static_assert(sizeof(SmallVector<void*, 1>) == + sizeof(unsigned) * 2 + sizeof(void*) * 2, + "wasted space in SmallVector size 1"); + +static_assert(sizeof(SmallVector<char, 0>) == + sizeof(void*) * 2 + sizeof(void*), + "1 byte elements have word-sized type for size and capacity"); + +/// Report that MinSize doesn't fit into this vector's size type. Throws +/// std::length_error or calls report_fatal_error. +[[noreturn]] static void report_size_overflow(size_t MinSize, size_t MaxSize); +static void report_size_overflow(size_t MinSize, size_t MaxSize) { + std::string Reason = "SmallVector unable to grow. Requested capacity (" + + std::to_string(MinSize) + + ") is larger than maximum value for size type (" + + std::to_string(MaxSize) + ")"; + throw std::length_error(Reason); +} + +/// Report that this vector is already at maximum capacity. Throws +/// std::length_error or calls report_fatal_error. +[[noreturn]] static void report_at_maximum_capacity(size_t MaxSize); +static void report_at_maximum_capacity(size_t MaxSize) { + std::string Reason = + "SmallVector capacity unable to grow. Already at maximum size " + + std::to_string(MaxSize); + throw std::length_error(Reason); +} + +// Note: Moving this function into the header may cause performance regression. +template <class Size_T> +static size_t getNewCapacity(size_t MinSize, size_t TSize, size_t OldCapacity) { + constexpr size_t MaxSize = std::numeric_limits<Size_T>::max(); + + // Ensure we can fit the new capacity. + // This is only going to be applicable when the capacity is 32 bit. + if (MinSize > MaxSize) + report_size_overflow(MinSize, MaxSize); + + // Ensure we can meet the guarantee of space for at least one more element. + // The above check alone will not catch the case where grow is called with a + // default MinSize of 0, but the current capacity cannot be increased. + // This is only going to be applicable when the capacity is 32 bit. + if (OldCapacity == MaxSize) + report_at_maximum_capacity(MaxSize); + + // In theory 2*capacity can overflow if the capacity is 64 bit, but the + // original capacity would never be large enough for this to be a problem. + size_t NewCapacity = 2 * OldCapacity + 1; // Always grow. + return std::min(std::max(NewCapacity, MinSize), MaxSize); +} + +// Note: Moving this function into the header may cause performance regression. +template <class Size_T> +void* SmallVectorBase<Size_T>::mallocForGrow(size_t MinSize, size_t TSize, size_t& NewCapacity) { + NewCapacity = getNewCapacity<Size_T>(MinSize, TSize, this->capacity()); + return malloc(NewCapacity * TSize); +} + +// Note: Moving this function into the header may cause performance regression. +template <class Size_T> +void SmallVectorBase<Size_T>::grow_pod(void* FirstEl, size_t MinSize, size_t TSize) { + size_t NewCapacity = getNewCapacity<Size_T>(MinSize, TSize, this->capacity()); + void* NewElts; + if (BeginX == FirstEl) { + NewElts = malloc(NewCapacity * TSize); + + // Copy the elements over. No need to run dtors on PODs. + memcpy(NewElts, this->BeginX, size() * TSize); + } else { + // If this wasn't grown from the inline copy, grow the allocated space. + NewElts = realloc(this->BeginX, NewCapacity * TSize); + } + + this->BeginX = NewElts; + this->Capacity = NewCapacity; +} + +template class SmallVectorBase<uint32_t>; + +// Disable the uint64_t instantiation for 32-bit builds. +// Both uint32_t and uint64_t instantiations are needed for 64-bit builds. +// This instantiation will never be used in 32-bit builds, and will cause +// warnings when sizeof(Size_T) > sizeof(size_t). +#if SIZE_MAX > UINT32_MAX +template class SmallVectorBase<uint64_t>; + +// Assertions to ensure this #if stays in sync with SmallVectorSizeType. +static_assert(sizeof(SmallVectorSizeType<char>) == sizeof(uint64_t), + "Expected SmallVectorBase<uint64_t> variant to be in use."); +#else +static_assert(sizeof(SmallVectorSizeType<char>) == sizeof(uint32_t), + "Expected SmallVectorBase<uint32_t> variant to be in use."); +#endif diff --git a/source/Common/SmallVector.hpp b/source/Common/SmallVector.hpp new file mode 100644 index 0000000..e33a25d --- /dev/null +++ b/source/Common/SmallVector.hpp @@ -0,0 +1,1332 @@ +// Obtained from https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/ADT/SmallVector.h +// commit 4b82bb6d82f65f98f23d0e4c2cd5297dc162864c +// adapted in code style and utilities to fix this project + +//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines the SmallVector class. +/// +//===----------------------------------------------------------------------===// + +#pragma once + +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdlib> +#include <cstring> +#include <functional> +#include <initializer_list> +#include <iterator> +#include <limits> +#include <memory> +#include <new> +#include <type_traits> +#include <utility> + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4267) // The compiler detected a conversion from size_t to a smaller type. +#endif + +#if __has_builtin(__builtin_expect) || defined(__GNUC__) +# define LLVM_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true) +# define LLVM_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false) +#else +# define LLVM_LIKELY(EXPR) (EXPR) +# define LLVM_UNLIKELY(EXPR) (EXPR) +#endif + +template <typename IteratorT> +class iterator_range; + +/// This is all the stuff common to all SmallVectors. +/// +/// The template parameter specifies the type which should be used to hold the +/// Size and Capacity of the SmallVector, so it can be adjusted. +/// Using 32 bit size is desirable to shrink the size of the SmallVector. +/// Using 64 bit size is desirable for cases like SmallVector<char>, where a +/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for +/// buffering bitcode output - which can exceed 4GB. +template <class Size_T> +class SmallVectorBase { +protected: + void* BeginX; + Size_T Size = 0, Capacity; + + /// The maximum value of the Size_T used. + static constexpr size_t SizeTypeMax() { + return std::numeric_limits<Size_T>::max(); + } + + SmallVectorBase() = delete; + SmallVectorBase(void* FirstEl, size_t TotalCapacity) + : BeginX(FirstEl), Capacity(TotalCapacity) {} + + /// This is a helper for \a grow() that's out of line to reduce code + /// duplication. This function will report a fatal error if it can't grow at + /// least to \p MinSize. + void* mallocForGrow(size_t MinSize, size_t TSize, size_t& NewCapacity); + + /// This is an implementation of the grow() method which only works + /// on POD-like data types and is out of line to reduce code duplication. + /// This function will report a fatal error if it cannot increase capacity. + void grow_pod(void* FirstEl, size_t MinSize, size_t TSize); + +public: + size_t size() const { return Size; } + size_t capacity() const { return Capacity; } + + [[nodiscard]] bool empty() const { return !Size; } + +protected: + /// Set the array size to \p N, which the current array must have enough + /// capacity for. + /// + /// This does not construct or destroy any elements in the vector. + void set_size(size_t N) { + assert(N <= capacity()); + Size = N; + } +}; + +template <class T> +using SmallVectorSizeType = + typename std::conditional<sizeof(T) < 4 && sizeof(void*) >= 8, uint64_t, uint32_t>::type; + +/// Figure out the offset of the first element. +template <class T, typename = void> +struct SmallVectorAlignmentAndSize { + alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof( + SmallVectorBase<SmallVectorSizeType<T>>)]; + alignas(T) char FirstEl[sizeof(T)]; +}; + +/// This is the part of SmallVectorTemplateBase which does not depend on whether +/// the type T is a POD. The extra dummy template argument is used by ArrayRef +/// to avoid unnecessarily requiring T to be complete. +template <typename T, typename = void> +class SmallVectorTemplateCommon + : public SmallVectorBase<SmallVectorSizeType<T>> { + using Base = SmallVectorBase<SmallVectorSizeType<T>>; + + /// Find the address of the first element. For this pointer math to be valid + /// with small-size of 0 for T with lots of alignment, it's important that + /// SmallVectorStorage is properly-aligned even for small-size of 0. + void* getFirstEl() const { + return const_cast<void*>(reinterpret_cast<const void*>( + reinterpret_cast<const char*>(this) + + offsetof(SmallVectorAlignmentAndSize<T>, FirstEl))); + } + // Space after 'FirstEl' is clobbered, do not add any instance vars after it. + +protected: + SmallVectorTemplateCommon(size_t Size) + : Base(getFirstEl(), Size) {} + + void grow_pod(size_t MinSize, size_t TSize) { + Base::grow_pod(getFirstEl(), MinSize, TSize); + } + + /// Return true if this is a smallvector which has not had dynamic + /// memory allocated for it. + bool isSmall() const { return this->BeginX == getFirstEl(); } + + /// Put this vector in a state of being small. + void resetToSmall() { + this->BeginX = getFirstEl(); + this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. + } + + /// Return true if V is an internal reference to the given range. + bool isReferenceToRange(const void* V, const void* First, const void* Last) const { + // Use std::less to avoid UB. + std::less<> LessThan; + return !LessThan(V, First) && LessThan(V, Last); + } + + /// Return true if V is an internal reference to this vector. + bool isReferenceToStorage(const void* V) const { + return isReferenceToRange(V, this->begin(), this->end()); + } + + /// Return true if First and Last form a valid (possibly empty) range in this + /// vector's storage. + bool isRangeInStorage(const void* First, const void* Last) const { + // Use std::less to avoid UB. + std::less<> LessThan; + return !LessThan(First, this->begin()) && !LessThan(Last, First) && + !LessThan(this->end(), Last); + } + + /// Return true unless Elt will be invalidated by resizing the vector to + /// NewSize. + bool isSafeToReferenceAfterResize(const void* Elt, size_t NewSize) { + // Past the end. + if (LLVM_LIKELY(!isReferenceToStorage(Elt))) + return true; + + // Return false if Elt will be destroyed by shrinking. + if (NewSize <= this->size()) + return Elt < this->begin() + NewSize; + + // Return false if we need to grow. + return NewSize <= this->capacity(); + } + + /// Check whether Elt will be invalidated by resizing the vector to NewSize. + void assertSafeToReferenceAfterResize(const void* Elt, size_t NewSize) { + assert(isSafeToReferenceAfterResize(Elt, NewSize) && + "Attempting to reference an element of the vector in an operation " + "that invalidates it"); + } + + /// Check whether Elt will be invalidated by increasing the size of the + /// vector by N. + void assertSafeToAdd(const void* Elt, size_t N = 1) { + this->assertSafeToReferenceAfterResize(Elt, this->size() + N); + } + + /// Check whether any part of the range will be invalidated by clearing. + void assertSafeToReferenceAfterClear(const T* From, const T* To) { + if (From == To) + return; + this->assertSafeToReferenceAfterResize(From, 0); + this->assertSafeToReferenceAfterResize(To - 1, 0); + } + template < + class ItTy, + std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T*>::value, + bool> = false> + void assertSafeToReferenceAfterClear(ItTy, ItTy) {} + + /// Check whether any part of the range will be invalidated by growing. + void assertSafeToAddRange(const T* From, const T* To) { + if (From == To) + return; + this->assertSafeToAdd(From, To - From); + this->assertSafeToAdd(To - 1, To - From); + } + template < + class ItTy, + std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T*>::value, + bool> = false> + void assertSafeToAddRange(ItTy, ItTy) {} + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + template <class U> + static const T* reserveForParamAndGetAddressImpl(U* This, const T& Elt, size_t N) { + size_t NewSize = This->size() + N; + if (LLVM_LIKELY(NewSize <= This->capacity())) + return &Elt; + + bool ReferencesStorage = false; + int64_t Index = -1; + if (!U::TakesParamByValue) { + if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))) { + ReferencesStorage = true; + Index = &Elt - This->begin(); + } + } + This->grow(NewSize); + return ReferencesStorage ? This->begin() + Index : &Elt; + } + +public: + using size_type = size_t; + using difference_type = ptrdiff_t; + using value_type = T; + using iterator = T*; + using const_iterator = const T*; + + using const_reverse_iterator = std::reverse_iterator<const_iterator>; + using reverse_iterator = std::reverse_iterator<iterator>; + + using reference = T&; + using const_reference = const T&; + using pointer = T*; + using const_pointer = const T*; + + using Base::capacity; + using Base::empty; + using Base::size; + + // forward iterator creation methods. + iterator begin() { return (iterator)this->BeginX; } + const_iterator begin() const { return (const_iterator)this->BeginX; } + iterator end() { return begin() + size(); } + const_iterator end() const { return begin() + size(); } + + // reverse iterator creation methods. + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } + + size_type size_in_bytes() const { return size() * sizeof(T); } + size_type max_size() const { + return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T)); + } + + size_t capacity_in_bytes() const { return capacity() * sizeof(T); } + + /// Return a pointer to the vector's buffer, even if empty(). + pointer data() { return pointer(begin()); } + /// Return a pointer to the vector's buffer, even if empty(). + const_pointer data() const { return const_pointer(begin()); } + + reference operator[](size_type idx) { + assert(idx < size()); + return begin()[idx]; + } + const_reference operator[](size_type idx) const { + assert(idx < size()); + return begin()[idx]; + } + + reference front() { + assert(!empty()); + return begin()[0]; + } + const_reference front() const { + assert(!empty()); + return begin()[0]; + } + + reference back() { + assert(!empty()); + return end()[-1]; + } + const_reference back() const { + assert(!empty()); + return end()[-1]; + } +}; + +/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put +/// method implementations that are designed to work with non-trivial T's. +/// +/// We approximate is_trivially_copyable with trivial move/copy construction and +/// trivial destruction. While the standard doesn't specify that you're allowed +/// copy these types with memcpy, there is no way for the type to observe this. +/// This catches the important case of std::pair<POD, POD>, which is not +/// trivially assignable. +template <typename T, bool = (std::is_trivially_copy_constructible<T>::value) && (std::is_trivially_move_constructible<T>::value) && std::is_trivially_destructible<T>::value> +class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> { + friend class SmallVectorTemplateCommon<T>; + +protected: + static constexpr bool TakesParamByValue = false; + using ValueParamT = const T&; + + SmallVectorTemplateBase(size_t Size) + : SmallVectorTemplateCommon<T>(Size) {} + + static void destroy_range(T* S, T* E) { + while (S != E) { + --E; + E->~T(); + } + } + + /// Move the range [I, E) into the uninitialized memory starting with "Dest", + /// constructing elements as needed. + template <typename It1, typename It2> + static void uninitialized_move(It1 I, It1 E, It2 Dest) { + std::uninitialized_copy(std::make_move_iterator(I), + std::make_move_iterator(E), + Dest); + } + + /// Copy the range [I, E) onto the uninitialized memory starting with "Dest", + /// constructing elements as needed. + template <typename It1, typename It2> + static void uninitialized_copy(It1 I, It1 E, It2 Dest) { + std::uninitialized_copy(I, E, Dest); + } + + /// Grow the allocated memory (without initializing new elements), doubling + /// the size of the allocated memory. Guarantees space for at least one more + /// element, or MinSize more elements if specified. + void grow(size_t MinSize = 0); + + /// Create a new allocation big enough for \p MinSize and pass back its size + /// in \p NewCapacity. This is the first section of \a grow(). + T* mallocForGrow(size_t MinSize, size_t& NewCapacity) { + return static_cast<T*>( + SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow( + MinSize, sizeof(T), NewCapacity)); + } + + /// Move existing elements over to the new allocation \p NewElts, the middle + /// section of \a grow(). + void moveElementsForGrow(T* NewElts); + + /// Transfer ownership of the allocation, finishing up \a grow(). + void takeAllocationForGrow(T* NewElts, size_t NewCapacity); + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + const T* reserveForParamAndGetAddress(const T& Elt, size_t N = 1) { + return this->reserveForParamAndGetAddressImpl(this, Elt, N); + } + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + T* reserveForParamAndGetAddress(T& Elt, size_t N = 1) { + return const_cast<T*>( + this->reserveForParamAndGetAddressImpl(this, Elt, N)); + } + + static T&& forward_value_param(T&& V) { return std::move(V); } + static const T& forward_value_param(const T& V) { return V; } + + void growAndAssign(size_t NumElts, const T& Elt) { + // Grow manually in case Elt is an internal reference. + size_t NewCapacity; + T* NewElts = mallocForGrow(NumElts, NewCapacity); + std::uninitialized_fill_n(NewElts, NumElts, Elt); + this->destroy_range(this->begin(), this->end()); + takeAllocationForGrow(NewElts, NewCapacity); + this->set_size(NumElts); + } + + template <typename... ArgTypes> + T& growAndEmplaceBack(ArgTypes&&... Args) { + // Grow manually in case one of Args is an internal reference. + size_t NewCapacity; + T* NewElts = mallocForGrow(0, NewCapacity); + ::new ((void*)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...); + moveElementsForGrow(NewElts); + takeAllocationForGrow(NewElts, NewCapacity); + this->set_size(this->size() + 1); + return this->back(); + } + +public: + void push_back(const T& Elt) { + const T* EltPtr = reserveForParamAndGetAddress(Elt); + ::new ((void*)this->end()) T(*EltPtr); + this->set_size(this->size() + 1); + } + + void push_back(T&& Elt) { + T* EltPtr = reserveForParamAndGetAddress(Elt); + ::new ((void*)this->end()) T(::std::move(*EltPtr)); + this->set_size(this->size() + 1); + } + + void pop_back() { + this->set_size(this->size() - 1); + this->end()->~T(); + } +}; + +// Define this out-of-line to dissuade the C++ compiler from inlining it. +template <typename T, bool TriviallyCopyable> +void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { + size_t NewCapacity; + T* NewElts = mallocForGrow(MinSize, NewCapacity); + moveElementsForGrow(NewElts); + takeAllocationForGrow(NewElts, NewCapacity); +} + +// Define this out-of-line to dissuade the C++ compiler from inlining it. +template <typename T, bool TriviallyCopyable> +void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow( + T* NewElts) { + // Move the elements over. + this->uninitialized_move(this->begin(), this->end(), NewElts); + + // Destroy the original elements. + destroy_range(this->begin(), this->end()); +} + +// Define this out-of-line to dissuade the C++ compiler from inlining it. +template <typename T, bool TriviallyCopyable> +void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow( + T* NewElts, size_t NewCapacity) { + // If this wasn't grown from the inline copy, deallocate the old space. + if (!this->isSmall()) + free(this->begin()); + + this->BeginX = NewElts; + this->Capacity = NewCapacity; +} + +/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put +/// method implementations that are designed to work with trivially copyable +/// T's. This allows using memcpy in place of copy/move construction and +/// skipping destruction. +template <typename T> +class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> { + friend class SmallVectorTemplateCommon<T>; + +protected: + /// True if it's cheap enough to take parameters by value. Doing so avoids + /// overhead related to mitigations for reference invalidation. + static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void*); + + /// Either const T& or T, depending on whether it's cheap enough to take + /// parameters by value. + using ValueParamT = + typename std::conditional<TakesParamByValue, T, const T&>::type; + + SmallVectorTemplateBase(size_t Size) + : SmallVectorTemplateCommon<T>(Size) {} + + // No need to do a destroy loop for POD's. + static void destroy_range(T*, T*) {} + + /// Move the range [I, E) onto the uninitialized memory + /// starting with "Dest", constructing elements into it as needed. + template <typename It1, typename It2> + static void uninitialized_move(It1 I, It1 E, It2 Dest) { + // Just do a copy. + uninitialized_copy(I, E, Dest); + } + + /// Copy the range [I, E) onto the uninitialized memory + /// starting with "Dest", constructing elements into it as needed. + template <typename It1, typename It2> + static void uninitialized_copy(It1 I, It1 E, It2 Dest) { + // Arbitrary iterator types; just use the basic implementation. + std::uninitialized_copy(I, E, Dest); + } + + /// Copy the range [I, E) onto the uninitialized memory + /// starting with "Dest", constructing elements into it as needed. + template <typename T1, typename T2> + static void uninitialized_copy( + T1* I, T1* E, T2* Dest, std::enable_if_t<std::is_same<typename std::remove_const<T1>::type, T2>::value>* = nullptr) { + // Use memcpy for PODs iterated by pointers (which includes SmallVector + // iterators): std::uninitialized_copy optimizes to memmove, but we can + // use memcpy here. Note that I and E are iterators and thus might be + // invalid for memcpy if they are equal. + if (I != E) + memcpy(reinterpret_cast<void*>(Dest), I, (E - I) * sizeof(T)); + } + + /// Double the size of the allocated memory, guaranteeing space for at + /// least one more element or MinSize if specified. + void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); } + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + const T* reserveForParamAndGetAddress(const T& Elt, size_t N = 1) { + return this->reserveForParamAndGetAddressImpl(this, Elt, N); + } + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + T* reserveForParamAndGetAddress(T& Elt, size_t N = 1) { + return const_cast<T*>( + this->reserveForParamAndGetAddressImpl(this, Elt, N)); + } + + /// Copy \p V or return a reference, depending on \a ValueParamT. + static ValueParamT forward_value_param(ValueParamT V) { return V; } + + void growAndAssign(size_t NumElts, T Elt) { + // Elt has been copied in case it's an internal reference, side-stepping + // reference invalidation problems without losing the realloc optimization. + this->set_size(0); + this->grow(NumElts); + std::uninitialized_fill_n(this->begin(), NumElts, Elt); + this->set_size(NumElts); + } + + template <typename... ArgTypes> + T& growAndEmplaceBack(ArgTypes&&... Args) { + // Use push_back with a copy in case Args has an internal reference, + // side-stepping reference invalidation problems without losing the realloc + // optimization. + push_back(T(std::forward<ArgTypes>(Args)...)); + return this->back(); + } + +public: + void push_back(ValueParamT Elt) { + const T* EltPtr = reserveForParamAndGetAddress(Elt); + memcpy(reinterpret_cast<void*>(this->end()), EltPtr, sizeof(T)); + this->set_size(this->size() + 1); + } + + void pop_back() { this->set_size(this->size() - 1); } +}; + +/// This class consists of common code factored out of the SmallVector class to +/// reduce code duplication based on the SmallVector 'N' template parameter. +template <typename T> +class SmallVectorImpl : public SmallVectorTemplateBase<T> { + using SuperClass = SmallVectorTemplateBase<T>; + +public: + using iterator = typename SuperClass::iterator; + using const_iterator = typename SuperClass::const_iterator; + using reference = typename SuperClass::reference; + using size_type = typename SuperClass::size_type; + +protected: + using SmallVectorTemplateBase<T>::TakesParamByValue; + using ValueParamT = typename SuperClass::ValueParamT; + + // Default ctor - Initialize to empty. + explicit SmallVectorImpl(unsigned N) + : SmallVectorTemplateBase<T>(N) {} + + void assignRemote(SmallVectorImpl&& RHS) { + this->destroy_range(this->begin(), this->end()); + if (!this->isSmall()) + free(this->begin()); + this->BeginX = RHS.BeginX; + this->Size = RHS.Size; + this->Capacity = RHS.Capacity; + RHS.resetToSmall(); + } + +public: + SmallVectorImpl(const SmallVectorImpl&) = delete; + + ~SmallVectorImpl() { + // Subclass has already destructed this vector's elements. + // If this wasn't grown from the inline copy, deallocate the old space. + if (!this->isSmall()) + free(this->begin()); + } + + void clear() { + this->destroy_range(this->begin(), this->end()); + this->Size = 0; + } + +private: + // Make set_size() private to avoid misuse in subclasses. + using SuperClass::set_size; + + template <bool ForOverwrite> + void resizeImpl(size_type N) { + if (N == this->size()) + return; + + if (N < this->size()) { + this->truncate(N); + return; + } + + this->reserve(N); + for (auto I = this->end(), E = this->begin() + N; I != E; ++I) + if (ForOverwrite) + new (&*I) T; + else + new (&*I) T(); + this->set_size(N); + } + +public: + void resize(size_type N) { resizeImpl<false>(N); } + + /// Like resize, but \ref T is POD, the new values won't be initialized. + void resize_for_overwrite(size_type N) { resizeImpl<true>(N); } + + /// Like resize, but requires that \p N is less than \a size(). + void truncate(size_type N) { + assert(this->size() >= N && "Cannot increase size with truncate"); + this->destroy_range(this->begin() + N, this->end()); + this->set_size(N); + } + + void resize(size_type N, ValueParamT NV) { + if (N == this->size()) + return; + + if (N < this->size()) { + this->truncate(N); + return; + } + + // N > this->size(). Defer to append. + this->append(N - this->size(), NV); + } + + void reserve(size_type N) { + if (this->capacity() < N) + this->grow(N); + } + + void pop_back_n(size_type NumItems) { + assert(this->size() >= NumItems); + truncate(this->size() - NumItems); + } + + [[nodiscard]] T pop_back_val() { + T Result = ::std::move(this->back()); + this->pop_back(); + return Result; + } + + void swap(SmallVectorImpl& RHS); + + /// Add the specified range to the end of the SmallVector. + template <typename in_iter, + typename = std::enable_if_t<std::is_convertible< + typename std::iterator_traits<in_iter>::iterator_category, + std::input_iterator_tag>::value>> + void append(in_iter in_start, in_iter in_end) { + this->assertSafeToAddRange(in_start, in_end); + size_type NumInputs = std::distance(in_start, in_end); + this->reserve(this->size() + NumInputs); + this->uninitialized_copy(in_start, in_end, this->end()); + this->set_size(this->size() + NumInputs); + } + + /// Append \p NumInputs copies of \p Elt to the end. + void append(size_type NumInputs, ValueParamT Elt) { + const T* EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs); + std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr); + this->set_size(this->size() + NumInputs); + } + + void append(std::initializer_list<T> IL) { + append(IL.begin(), IL.end()); + } + + void append(const SmallVectorImpl& RHS) { append(RHS.begin(), RHS.end()); } + + void assign(size_type NumElts, ValueParamT Elt) { + // Note that Elt could be an internal reference. + if (NumElts > this->capacity()) { + this->growAndAssign(NumElts, Elt); + return; + } + + // Assign over existing elements. + std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt); + if (NumElts > this->size()) + std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt); + else if (NumElts < this->size()) + this->destroy_range(this->begin() + NumElts, this->end()); + this->set_size(NumElts); + } + + // FIXME: Consider assigning over existing elements, rather than clearing & + // re-initializing them - for all assign(...) variants. + + template <typename in_iter, + typename = std::enable_if_t<std::is_convertible< + typename std::iterator_traits<in_iter>::iterator_category, + std::input_iterator_tag>::value>> + void assign(in_iter in_start, in_iter in_end) { + this->assertSafeToReferenceAfterClear(in_start, in_end); + clear(); + append(in_start, in_end); + } + + void assign(std::initializer_list<T> IL) { + clear(); + append(IL); + } + + void assign(const SmallVectorImpl& RHS) { assign(RHS.begin(), RHS.end()); } + + iterator erase(const_iterator CI) { + // Just cast away constness because this is a non-const member function. + iterator I = const_cast<iterator>(CI); + + assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds."); + + iterator N = I; + // Shift all elts down one. + std::move(I + 1, this->end(), I); + // Drop the last elt. + this->pop_back(); + return (N); + } + + iterator erase(const_iterator CS, const_iterator CE) { + // Just cast away constness because this is a non-const member function. + iterator S = const_cast<iterator>(CS); + iterator E = const_cast<iterator>(CE); + + assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds."); + + iterator N = S; + // Shift all elts down. + iterator I = std::move(E, this->end(), S); + // Drop the last elts. + this->destroy_range(I, this->end()); + this->set_size(I - this->begin()); + return (N); + } + +private: + template <class ArgType> + iterator insert_one_impl(iterator I, ArgType&& Elt) { + // Callers ensure that ArgType is derived from T. + static_assert( + std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>, + T>::value, + "ArgType must be derived from T!"); + + if (I == this->end()) { // Important special case for empty vector. + this->push_back(::std::forward<ArgType>(Elt)); + return this->end() - 1; + } + + assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds."); + + // Grow if necessary. + size_t Index = I - this->begin(); + std::remove_reference_t<ArgType>* EltPtr = + this->reserveForParamAndGetAddress(Elt); + I = this->begin() + Index; + + ::new ((void*)this->end()) T(::std::move(this->back())); + // Push everything else over. + std::move_backward(I, this->end() - 1, this->end()); + this->set_size(this->size() + 1); + + // If we just moved the element we're inserting, be sure to update + // the reference (never happens if TakesParamByValue). + static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value, + "ArgType must be 'T' when taking by value!"); + if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end())) + ++EltPtr; + + *I = ::std::forward<ArgType>(*EltPtr); + return I; + } + +public: + iterator insert(iterator I, T&& Elt) { + return insert_one_impl(I, this->forward_value_param(std::move(Elt))); + } + + iterator insert(iterator I, const T& Elt) { + return insert_one_impl(I, this->forward_value_param(Elt)); + } + + iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) { + // Convert iterator to elt# to avoid invalidating iterator when we reserve() + size_t InsertElt = I - this->begin(); + + if (I == this->end()) { // Important special case for empty vector. + append(NumToInsert, Elt); + return this->begin() + InsertElt; + } + + assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds."); + + // Ensure there is enough space, and get the (maybe updated) address of + // Elt. + const T* EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert); + + // Uninvalidate the iterator. + I = this->begin() + InsertElt; + + // If there are more elements between the insertion point and the end of the + // range than there are being inserted, we can use a simple approach to + // insertion. Since we already reserved space, we know that this won't + // reallocate the vector. + if (size_t(this->end() - I) >= NumToInsert) { + T* OldEnd = this->end(); + append(std::move_iterator<iterator>(this->end() - NumToInsert), + std::move_iterator<iterator>(this->end())); + + // Copy the existing elements that get replaced. + std::move_backward(I, OldEnd - NumToInsert, OldEnd); + + // If we just moved the element we're inserting, be sure to update + // the reference (never happens if TakesParamByValue). + if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) + EltPtr += NumToInsert; + + std::fill_n(I, NumToInsert, *EltPtr); + return I; + } + + // Otherwise, we're inserting more elements than exist already, and we're + // not inserting at the end. + + // Move over the elements that we're about to overwrite. + T* OldEnd = this->end(); + this->set_size(this->size() + NumToInsert); + size_t NumOverwritten = OldEnd - I; + this->uninitialized_move(I, OldEnd, this->end() - NumOverwritten); + + // If we just moved the element we're inserting, be sure to update + // the reference (never happens if TakesParamByValue). + if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) + EltPtr += NumToInsert; + + // Replace the overwritten part. + std::fill_n(I, NumOverwritten, *EltPtr); + + // Insert the non-overwritten middle part. + std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr); + return I; + } + + template <typename ItTy, + typename = std::enable_if_t<std::is_convertible< + typename std::iterator_traits<ItTy>::iterator_category, + std::input_iterator_tag>::value>> + iterator insert(iterator I, ItTy From, ItTy To) { + // Convert iterator to elt# to avoid invalidating iterator when we reserve() + size_t InsertElt = I - this->begin(); + + if (I == this->end()) { // Important special case for empty vector. + append(From, To); + return this->begin() + InsertElt; + } + + assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds."); + + // Check that the reserve that follows doesn't invalidate the iterators. + this->assertSafeToAddRange(From, To); + + size_t NumToInsert = std::distance(From, To); + + // Ensure there is enough space. + reserve(this->size() + NumToInsert); + + // Uninvalidate the iterator. + I = this->begin() + InsertElt; + + // If there are more elements between the insertion point and the end of the + // range than there are being inserted, we can use a simple approach to + // insertion. Since we already reserved space, we know that this won't + // reallocate the vector. + if (size_t(this->end() - I) >= NumToInsert) { + T* OldEnd = this->end(); + append(std::move_iterator<iterator>(this->end() - NumToInsert), + std::move_iterator<iterator>(this->end())); + + // Copy the existing elements that get replaced. + std::move_backward(I, OldEnd - NumToInsert, OldEnd); + + std::copy(From, To, I); + return I; + } + + // Otherwise, we're inserting more elements than exist already, and we're + // not inserting at the end. + + // Move over the elements that we're about to overwrite. + T* OldEnd = this->end(); + this->set_size(this->size() + NumToInsert); + size_t NumOverwritten = OldEnd - I; + this->uninitialized_move(I, OldEnd, this->end() - NumOverwritten); + + // Replace the overwritten part. + for (T* J = I; NumOverwritten > 0; --NumOverwritten) { + *J = *From; + ++J; + ++From; + } + + // Insert the non-overwritten middle part. + this->uninitialized_copy(From, To, OldEnd); + return I; + } + + void insert(iterator I, std::initializer_list<T> IL) { + insert(I, IL.begin(), IL.end()); + } + + template <typename... ArgTypes> + reference emplace_back(ArgTypes&&... Args) { + if (LLVM_UNLIKELY(this->size() >= this->capacity())) + return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...); + + ::new ((void*)this->end()) T(std::forward<ArgTypes>(Args)...); + this->set_size(this->size() + 1); + return this->back(); + } + + SmallVectorImpl& operator=(const SmallVectorImpl& RHS); + + SmallVectorImpl& operator=(SmallVectorImpl&& RHS); + + bool operator==(const SmallVectorImpl& RHS) const { + if (this->size() != RHS.size()) return false; + return std::equal(this->begin(), this->end(), RHS.begin()); + } + bool operator!=(const SmallVectorImpl& RHS) const { + return !(*this == RHS); + } + + bool operator<(const SmallVectorImpl& RHS) const { + return std::lexicographical_compare(this->begin(), this->end(), RHS.begin(), RHS.end()); + } +}; + +template <typename T> +void SmallVectorImpl<T>::swap(SmallVectorImpl<T>& RHS) { + if (this == &RHS) return; + + // We can only avoid copying elements if neither vector is small. + if (!this->isSmall() && !RHS.isSmall()) { + std::swap(this->BeginX, RHS.BeginX); + std::swap(this->Size, RHS.Size); + std::swap(this->Capacity, RHS.Capacity); + return; + } + this->reserve(RHS.size()); + RHS.reserve(this->size()); + + // Swap the shared elements. + size_t NumShared = this->size(); + if (NumShared > RHS.size()) NumShared = RHS.size(); + for (size_type i = 0; i != NumShared; ++i) + std::swap((*this)[i], RHS[i]); + + // Copy over the extra elts. + if (this->size() > RHS.size()) { + size_t EltDiff = this->size() - RHS.size(); + this->uninitialized_copy(this->begin() + NumShared, this->end(), RHS.end()); + RHS.set_size(RHS.size() + EltDiff); + this->destroy_range(this->begin() + NumShared, this->end()); + this->set_size(NumShared); + } else if (RHS.size() > this->size()) { + size_t EltDiff = RHS.size() - this->size(); + this->uninitialized_copy(RHS.begin() + NumShared, RHS.end(), this->end()); + this->set_size(this->size() + EltDiff); + this->destroy_range(RHS.begin() + NumShared, RHS.end()); + RHS.set_size(NumShared); + } +} + +template <typename T> +SmallVectorImpl<T>& SmallVectorImpl<T>:: +operator=(const SmallVectorImpl<T>& RHS) { + // Avoid self-assignment. + if (this == &RHS) return *this; + + // If we already have sufficient space, assign the common elements, then + // destroy any excess. + size_t RHSSize = RHS.size(); + size_t CurSize = this->size(); + if (CurSize >= RHSSize) { + // Assign common elements. + iterator NewEnd; + if (RHSSize) + NewEnd = std::copy(RHS.begin(), RHS.begin() + RHSSize, this->begin()); + else + NewEnd = this->begin(); + + // Destroy excess elements. + this->destroy_range(NewEnd, this->end()); + + // Trim. + this->set_size(RHSSize); + return *this; + } + + // If we have to grow to have enough elements, destroy the current elements. + // This allows us to avoid copying them during the grow. + // FIXME: don't do this if they're efficiently moveable. + if (this->capacity() < RHSSize) { + // Destroy current elements. + this->clear(); + CurSize = 0; + this->grow(RHSSize); + } else if (CurSize) { + // Otherwise, use assignment for the already-constructed elements. + std::copy(RHS.begin(), RHS.begin() + CurSize, this->begin()); + } + + // Copy construct the new elements in place. + this->uninitialized_copy(RHS.begin() + CurSize, RHS.end(), this->begin() + CurSize); + + // Set end. + this->set_size(RHSSize); + return *this; +} + +template <typename T> +SmallVectorImpl<T>& SmallVectorImpl<T>::operator=(SmallVectorImpl<T>&& RHS) { + // Avoid self-assignment. + if (this == &RHS) return *this; + + // If the RHS isn't small, clear this vector and then steal its buffer. + if (!RHS.isSmall()) { + this->assignRemote(std::move(RHS)); + return *this; + } + + // If we already have sufficient space, assign the common elements, then + // destroy any excess. + size_t RHSSize = RHS.size(); + size_t CurSize = this->size(); + if (CurSize >= RHSSize) { + // Assign common elements. + iterator NewEnd = this->begin(); + if (RHSSize) + NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd); + + // Destroy excess elements and trim the bounds. + this->destroy_range(NewEnd, this->end()); + this->set_size(RHSSize); + + // Clear the RHS. + RHS.clear(); + + return *this; + } + + // If we have to grow to have enough elements, destroy the current elements. + // This allows us to avoid copying them during the grow. + // FIXME: this may not actually make any sense if we can efficiently move + // elements. + if (this->capacity() < RHSSize) { + // Destroy current elements. + this->clear(); + CurSize = 0; + this->grow(RHSSize); + } else if (CurSize) { + // Otherwise, use assignment for the already-constructed elements. + std::move(RHS.begin(), RHS.begin() + CurSize, this->begin()); + } + + // Move-construct the new elements in place. + this->uninitialized_move(RHS.begin() + CurSize, RHS.end(), this->begin() + CurSize); + + // Set end. + this->set_size(RHSSize); + + RHS.clear(); + return *this; +} + +/// Storage for the SmallVector elements. This is specialized for the N=0 case +/// to avoid allocating unnecessary storage. +template <typename T, unsigned N> +struct SmallVectorStorage { + alignas(T) char InlineElts[N * sizeof(T)]; +}; + +/// We need the storage to be properly aligned even for small-size of 0 so that +/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is +/// well-defined. +template <typename T> +struct alignas(T) SmallVectorStorage<T, 0> {}; + +/// Forward declaration of SmallVector so that +/// calculateSmallVectorDefaultInlinedElements can reference +/// `sizeof(SmallVector<T, 0>)`. +template <typename T, unsigned N> +class SmallVector; + +/// Helper class for calculating the default number of inline elements for +/// `SmallVector<T>`. +/// +/// This should be migrated to a constexpr function when our minimum +/// compiler support is enough for multi-statement constexpr functions. +template <typename T> +struct CalculateSmallVectorDefaultInlinedElements { + // Parameter controlling the default number of inlined elements + // for `SmallVector<T>`. + // + // The default number of inlined elements ensures that + // 1. There is at least one inlined element. + // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless + // it contradicts 1. + static constexpr size_t kPreferredSmallVectorSizeof = 64; + + // static_assert that sizeof(T) is not "too big". + // + // Because our policy guarantees at least one inlined element, it is possible + // for an arbitrarily large inlined element to allocate an arbitrarily large + // amount of inline storage. We generally consider it an antipattern for a + // SmallVector to allocate an excessive amount of inline storage, so we want + // to call attention to these cases and make sure that users are making an + // intentional decision if they request a lot of inline storage. + // + // We want this assertion to trigger in pathological cases, but otherwise + // not be too easy to hit. To accomplish that, the cutoff is actually somewhat + // larger than kPreferredSmallVectorSizeof (otherwise, + // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that + // pattern seems useful in practice). + // + // One wrinkle is that this assertion is in theory non-portable, since + // sizeof(T) is in general platform-dependent. However, we don't expect this + // to be much of an issue, because most LLVM development happens on 64-bit + // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for + // 32-bit hosts, dodging the issue. The reverse situation, where development + // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a + // 64-bit host, is expected to be very rare. + static_assert( + sizeof(T) <= 256, + "You are trying to use a default number of inlined elements for " + "`SmallVector<T>` but `sizeof(T)` is really big! Please use an " + "explicit number of inlined elements with `SmallVector<T, N>` to make " + "sure you really want that much inline storage."); + + // Discount the size of the header itself when calculating the maximum inline + // bytes. + static constexpr size_t PreferredInlineBytes = + kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>); + static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T); + static constexpr size_t value = + NumElementsThatFit == 0 ? 1 : NumElementsThatFit; +}; + +/// This is a 'vector' (really, a variable-sized array), optimized +/// for the case when the array is small. It contains some number of elements +/// in-place, which allows it to avoid heap allocation when the actual number of +/// elements is below that threshold. This allows normal "small" cases to be +/// fast without losing generality for large inputs. +/// +/// \note +/// In the absence of a well-motivated choice for the number of inlined +/// elements \p N, it is recommended to use \c SmallVector<T> (that is, +/// omitting the \p N). This will choose a default number of inlined elements +/// reasonable for allocation on the stack (for example, trying to keep \c +/// sizeof(SmallVector<T>) around 64 bytes). +/// +/// \warning This does not attempt to be exception safe. +/// +/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h +template <typename T, + unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value> +class SmallVector : public SmallVectorImpl<T>, + SmallVectorStorage<T, N> { +public: + SmallVector() + : SmallVectorImpl<T>(N) {} + + ~SmallVector() { + // Destroy the constructed elements in the vector. + this->destroy_range(this->begin(), this->end()); + } + + explicit SmallVector(size_t Size, const T& Value = T()) + : SmallVectorImpl<T>(N) { + this->assign(Size, Value); + } + + template <typename ItTy, + typename = std::enable_if_t<std::is_convertible< + typename std::iterator_traits<ItTy>::iterator_category, + std::input_iterator_tag>::value>> + SmallVector(ItTy S, ItTy E) + : SmallVectorImpl<T>(N) { + this->append(S, E); + } + + template <typename RangeTy> + explicit SmallVector(const iterator_range<RangeTy>& R) + : SmallVectorImpl<T>(N) { + this->append(R.begin(), R.end()); + } + + SmallVector(std::initializer_list<T> IL) + : SmallVectorImpl<T>(N) { + this->assign(IL); + } + + SmallVector(const SmallVector& RHS) + : SmallVectorImpl<T>(N) { + if (!RHS.empty()) + SmallVectorImpl<T>::operator=(RHS); + } + + SmallVector& operator=(const SmallVector& RHS) { + SmallVectorImpl<T>::operator=(RHS); + return *this; + } + + SmallVector(SmallVector&& RHS) + : SmallVectorImpl<T>(N) { + if (!RHS.empty()) + SmallVectorImpl<T>::operator=(::std::move(RHS)); + } + + SmallVector(SmallVectorImpl<T>&& RHS) + : SmallVectorImpl<T>(N) { + if (!RHS.empty()) + SmallVectorImpl<T>::operator=(::std::move(RHS)); + } + + SmallVector& operator=(SmallVector&& RHS) { + if (N) { + SmallVectorImpl<T>::operator=(::std::move(RHS)); + return *this; + } + // SmallVectorImpl<T>::operator= does not leverage N==0. Optimize the + // case. + if (this == &RHS) + return *this; + if (RHS.empty()) { + this->destroy_range(this->begin(), this->end()); + this->Size = 0; + } else { + this->assignRemote(std::move(RHS)); + } + return *this; + } + + SmallVector& operator=(SmallVectorImpl<T>&& RHS) { + SmallVectorImpl<T>::operator=(::std::move(RHS)); + return *this; + } + + SmallVector& operator=(std::initializer_list<T> IL) { + this->assign(IL); + return *this; + } +}; + +template <typename T, unsigned N> +inline size_t capacity_in_bytes(const SmallVector<T, N>& X) { + return X.capacity_in_bytes(); +} + +template <typename RangeType> +using ValueTypeFromRangeType = + typename std::remove_const<typename std::remove_reference< + decltype(*std::begin(std::declval<RangeType&>()))>::type>::type; + +/// Given a range of type R, iterate the entire range and return a +/// SmallVector with elements of the vector. This is useful, for example, +/// when you want to iterate a range and then sort the results. +template <unsigned Size, typename R> +SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R&& Range) { + return { std::begin(Range), std::end(Range) }; +} +template <typename R> +SmallVector<ValueTypeFromRangeType<R>, + CalculateSmallVectorDefaultInlinedElements< + ValueTypeFromRangeType<R>>::value> +to_vector(R&& Range) { + return { std::begin(Range), std::end(Range) }; +} + +namespace std { + +/// Implement std::swap in terms of SmallVector swap. +template <typename T> +inline void swap(SmallVectorImpl<T>& LHS, SmallVectorImpl<T>& RHS) { + LHS.swap(RHS); +} + +/// Implement std::swap in terms of SmallVector swap. +template <typename T, unsigned N> +inline void swap(SmallVector<T, N>& LHS, SmallVector<T, N>& RHS) { + LHS.swap(RHS); +} + +} // namespace std + +#ifdef _MSC_VER +# pragma warning(pop) +#endif diff --git a/source/Common/StbImplementations.c b/source/Common/StbImplementations.c new file mode 100644 index 0000000..73bbc2a --- /dev/null +++ b/source/Common/StbImplementations.c @@ -0,0 +1,14 @@ +#define STB_RECT_PACK_IMPLEMENTATION +#include <stb_rect_pack.h> + +#define STB_TRUETYPE_IMPLEMENTATION +#include <stb_truetype.h> + +#define STB_IMAGE_IMPLEMENTATION +#include <stb_image.h> + +#define STB_SPRINTF_IMPLEMENTATION +#include <stb_sprintf.h> + +#define STB_C_LEXER_IMPLEMENTATION +#include <stb_c_lexer.h> diff --git a/source/Common/Type2ObjectMap.hpp b/source/Common/Type2ObjectMap.hpp new file mode 100644 index 0000000..24c45f3 --- /dev/null +++ b/source/Common/Type2ObjectMap.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include "TypeTraits.hpp" + +#include <cstddef> + +template <typename TValue> +class Type2ObjectMap { +public: + template <typename TType> + TType& Insert(TType&& value) { + // TODO + } + + template <typename TType> + TType& InsertOrAssign(TType& value) { + // TODO + } + + template <typename TType> + TType Remove() { + // TODO + } + + template <typename TType> + const TValue* Find() const { + // TODO + } + + template <class TType> + TValue* Find() { + return const_cast<TValue*>(const_cast<const Type2ObjectMap*>(this)->Find<TType>()); + } + + size_t size() const { + // TODO + } +}; diff --git a/source/Common/TypeTraits.hpp b/source/Common/TypeTraits.hpp new file mode 100644 index 0000000..73a56f9 --- /dev/null +++ b/source/Common/TypeTraits.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include <cstddef> + +/// This template will be instanciated for each unique type, and the char variable will be ODR-used which gives it an unique address. +template <typename T> +struct TypeIdentifier { + static const char obj = 0; +}; + +template <typename T> +struct DefaultDeleter { + void operator()(T* ptr) const { + delete ptr; + } +}; + +template <typename> +struct RemoveMemberPtrImpl {}; + +template <typename T, typename U> +struct RemoveMemberPtrImpl<U T::*> { + using Type = U; +}; + +template <typename T> +using RemoveMemberPtr = typename RemoveMemberPtrImpl<T>::Type; diff --git a/source/Common/Uid.cpp b/source/Common/Uid.cpp new file mode 100644 index 0000000..1930cd8 --- /dev/null +++ b/source/Common/Uid.cpp @@ -0,0 +1,58 @@ +#include "Uid.hpp" + +#include "RapidJsonHelper.hpp" + +#include <rapidjson/document.h> +#include <cstring> +#include <random> + +Uid Uid::Create() { + std::random_device rd; + std::mt19937_64 gen(rd()); + std::uniform_int_distribution<uint64_t> dist( + std::numeric_limits<uint64_t>::min(), + std::numeric_limits<uint64_t>::max()); + + Uid uid; + uid.upper = dist(gen); + uid.lower = dist(gen); + return uid; +} + +bool Uid::IsNull() const { + return upper == 0 && lower == 0; +} + +void Uid::ReadString(std::string_view str) { + sscanf(str.data(), BRUSSEL_Uid_SCAN_STR, &upper, &lower); +} + +std::string Uid::WriteString() { + char buf[256]; + snprintf(buf, sizeof(buf), BRUSSEL_Uid_FORMAT_STR, upper, lower); + return std::string(buf); +} + +void Uid::Read(const rapidjson::Value& value) { + assert(value.IsArray()); + assert(value.Size() == 2); + auto& upper = value[0]; + assert(upper.IsUint64()); + auto& lower = value[1]; + assert(lower.IsUint64()); + + this->upper = upper.GetUint64(); + this->lower = lower.GetUint64(); +} + +void Uid::WriteInto(rapidjson::Value& value, rapidjson::Document& root) { + value.Reserve(2, root.GetAllocator()); + value.PushBack((uint64_t)upper, root.GetAllocator()); + value.PushBack((uint64_t)lower, root.GetAllocator()); +} + +rapidjson::Value Uid::Write(rapidjson::Document& root) { + rapidjson::Value result(rapidjson::kArrayType); + WriteInto(result, root); + return result; +} diff --git a/source/Common/Uid.hpp b/source/Common/Uid.hpp new file mode 100644 index 0000000..f58129c --- /dev/null +++ b/source/Common/Uid.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include "Utils.hpp" + +#include <rapidjson/fwd.h> +#include <cinttypes> +#include <functional> +#include <string> +#include <string_view> + +#define BRUSSEL_Uid_SCAN_STR "%" PRIx64 "-%" PRIx64 +#define BRUSSEL_Uid_SCAN_EXPAND(uid) &((uid).upper), &((uid).upper) +#define BRUSSEL_Uid_FORMAT_STR "%016" PRIx64 "-%016" PRIx64 +#define BRUSSEL_Uid_FORMAT_EXPAND(uid) (uid).upper, (uid).lower + +struct Uid { + uint64_t upper = 0; + uint64_t lower = 0; + + static Uid Create(); + + bool IsNull() const; + + void ReadString(std::string_view str); + std::string WriteString(); + + void Read(const rapidjson::Value& value); + void WriteInto(rapidjson::Value& value, rapidjson::Document& root); + rapidjson::Value Write(rapidjson::Document& root); + + auto operator<=>(const Uid&) const = default; +}; + +template <> +struct std::hash<Uid> { + size_t operator()(const Uid& uid) const { + size_t hash = 0; + Utils::HashCombine(hash, uid.upper); + Utils::HashCombine(hash, uid.lower); + return hash; + } +}; diff --git a/source/Common/Utils.cpp b/source/Common/Utils.cpp new file mode 100644 index 0000000..dc76b0a --- /dev/null +++ b/source/Common/Utils.cpp @@ -0,0 +1,107 @@ +#include "Utils.hpp" + +#include "Macros.hpp" +#include "ScopeGuard.hpp" + +#ifdef _WIN32 +# include <Windows.h> +#endif + +namespace fs = std::filesystem; + +#ifdef _WIN32 +# define BRUSSEL_MODE_STRING(string) L##string +#else +# define BRUSSEL_MODE_STRING(string) string +#endif + +#if _WIN32 +using FopenModeString = const wchar_t*; +#else +using FopenModeString = const char*; +#endif + +static FopenModeString GetModeString(Utils::IoMode mode, bool binary) { + using namespace Utils; + if (binary) { + switch (mode) { + case Read: return BRUSSEL_MODE_STRING("rb"); + case WriteTruncate: return BRUSSEL_MODE_STRING("wb"); + case WriteAppend: return BRUSSEL_MODE_STRING("ab"); + } + } else { + switch (mode) { + case Read: return BRUSSEL_MODE_STRING("r"); + case WriteTruncate: return BRUSSEL_MODE_STRING("w"); + case WriteAppend: return BRUSSEL_MODE_STRING("a"); + } + } + return nullptr; +} + +FILE* Utils::OpenCstdioFile(const fs::path& path, IoMode mode, bool binary) { +#ifdef _WIN32 + // fs::path::c_str() returns `const wchar_t*` under Windows, because NT uses UTF-16 natively + // NOTE: _wfopen() only affects the type of path parameter, otherwise the file stream created is identical to the one by fopen() + return _wfopen(path.c_str(), ::GetModeString(mode, binary)); +#else + return fopen(path.c_str(), ::GetModeString(mode, binary)); +#endif +} + +FILE* Utils::OpenCstdioFile(const char* path, IoMode mode, bool binary) { +#ifdef _WIN32 + // On Windows, fopen() accepts ANSI codepage encoded path, convert our UTF-8 string to UTF-16 to ensure that no matter what the locale is, the path continues to work + WCHAR platformPath[MAX_PATH]; + if (MultiByteToWideChar(CP_UTF8, 0, path, -1, platformPath, MAX_PATH) == 0) { + return nullptr; + } + return _wfopen(platformPath, ::GetModeString(mode, binary)); +#else + return fopen(path, ::GetModeString(mode, binary)); +#endif +} + +std::string Utils::ReadFileAsString(const fs::path& path) { + auto file = Utils::OpenCstdioFile(path, Utils::Read); + if (!file) throw std::runtime_error("Failed to open source file."); + DEFER { fclose(file); }; + + fseek(file, 0, SEEK_END); + auto fileSize = ftell(file); + rewind(file); + + std::string result(fileSize, '\0'); + fread(result.data(), fileSize, 1, file); + + return result; +} + +bool Utils::InRangeInclusive(int n, int lower, int upper) { + if (lower > upper) { + std::swap(lower, upper); + } + return n >= lower && n <= upper; +} + +bool Utils::LineContains(glm::ivec2 p1, glm::ivec2 p2, glm::ivec2 candidate) { + bool verticalLine = p1.x == p2.x && InRangeInclusive(candidate.x, p1.x, p2.x); + bool horizontalLine = p1.y == p2.y && InRangeInclusive(candidate.y, p1.y, p2.y); + return verticalLine && horizontalLine; +} + +bool Utils::IsColinear(glm::ivec2 p1, glm::ivec2 p2) { + return p1.x == p2.x || p1.y == p2.y; +} + +std::string Utils::MakeRandomNumberedName(const char* tag) { + int n = std::rand(); +#define RNG_NAME_PATTERN "Unnamed %s #%d", tag, n + // NOTE: does not include null-terminator + int size = snprintf(nullptr, 0, RNG_NAME_PATTERN); + std::string result; + result.resize(size); // std::string::resize handles storage for null-terminator alreaedy + snprintf(result.data(), size, RNG_NAME_PATTERN); +#undef RNG_NAME_PATTERN + return result; +} diff --git a/source/Common/Utils.hpp b/source/Common/Utils.hpp new file mode 100644 index 0000000..9560b57 --- /dev/null +++ b/source/Common/Utils.hpp @@ -0,0 +1,65 @@ +#pragma once + +#include <robin_hood.h> +#include <cstdio> +#include <cstring> +#include <filesystem> +#include <glm/glm.hpp> +#include <string> +#include <string_view> + +namespace Utils { + +enum IoMode { + Read, + WriteTruncate, + WriteAppend, +}; + +FILE* OpenCstdioFile(const std::filesystem::path& path, IoMode mode, bool binary = false); +FILE* OpenCstdioFile(const char* path, IoMode mode, bool binary = false); + +std::string ReadFileAsString(const std::filesystem::path& path); + +constexpr float Abs(float v) noexcept { + return v < 0.0f ? -v : v; +} + +bool InRangeInclusive(int n, int lower, int upper); +bool LineContains(glm::ivec2 p1, glm::ivec2 p2, glm::ivec2 candidate); + +bool IsColinear(glm::ivec2 p1, glm::ivec2 p2); + +template <class T> +void HashCombine(std::size_t& seed, const T& v) { + seed ^= std::hash<T>{}(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); +} + +std::string MakeRandomNumberedName(const char* tag); + +} // namespace Utils + +struct StringHash { + using is_transparent = void; + + std::size_t operator()(const std::string& key) const { return robin_hood::hash_bytes(key.c_str(), key.size()); } + std::size_t operator()(std::string_view key) const { return robin_hood::hash_bytes(key.data(), key.size()); } + std::size_t operator()(const char* key) const { return robin_hood::hash_bytes(key, std::strlen(key)); } +}; + +struct StringEqual { + using is_transparent = int; + + bool operator()(std::string_view lhs, const std::string& rhs) const { + const std::string_view view = rhs; + return lhs == view; + } + + bool operator()(const char* lhs, const std::string& rhs) const { + return std::strcmp(lhs, rhs.c_str()) == 0; + } + + bool operator()(const std::string& lhs, const std::string& rhs) const { + return lhs == rhs; + } +}; diff --git a/source/Common/YCombinator.hpp b/source/Common/YCombinator.hpp new file mode 100644 index 0000000..b1d2350 --- /dev/null +++ b/source/Common/YCombinator.hpp @@ -0,0 +1,14 @@ +#pragma once + +template <class Func> +struct YCombinator { + // NOTE: implicit constructor allows initializing this + Func func; + + template <class... Ts> + decltype(auto) operator()(Ts&&... args) const { + // NOTE: static_cast<Ts>(args)... is equivalent to std::forward<Ts>(args)... + // written this way so that we don't have to include <utility>, as well as reducing template instanciations to help compile time + return func(*this, static_cast<Ts>(args)...); + } +}; diff --git a/source/Common/buildfile b/source/Common/buildfile new file mode 100644 index 0000000..8d1c8b6 --- /dev/null +++ b/source/Common/buildfile @@ -0,0 +1 @@ + |