BLT/include/blt/std/memory.h

697 lines
22 KiB
C
Raw Normal View History

/*
* Created by Brett on 08/02/23.
* Licensed under GNU General Public License V3.0
* See LICENSE file for license detail
*/
#ifndef BLT_TESTS_MEMORY_H
#define BLT_TESTS_MEMORY_H
2023-07-20 21:44:52 -04:00
#include <initializer_list>
#include <iterator>
2023-09-21 23:23:27 -04:00
#include <cstring>
#include "queue.h"
2023-12-18 19:22:29 -05:00
#include "utility.h"
#include <blt/std/assert.h>
#include <blt/std/logging.h>
#include <cstdint>
#include <type_traits>
#include <algorithm>
#include <utility>
#include <cstring>
#include <array>
2023-12-18 19:51:55 -05:00
#include <optional>
#if defined(__clang__) || defined(__llvm__) || defined(__GNUC__) || defined(__GNUG__)
#include <byteswap.h>
#define SWAP16(val) bswap_16(val)
#define SWAP32(val) bswap_32(val)
#define SWAP64(val) bswap_64(val)
2023-10-26 16:35:42 -04:00
#if __cplusplus >= 202002L
2023-10-26 19:44:44 -04:00
#include <bit>
#define ENDIAN_LOOKUP(little_endian) (std::endian::native == std::endian::little && !little_endian) || \
(std::endian::native == std::endian::big && little_endian)
2023-10-26 16:35:42 -04:00
#else
2023-10-26 16:36:27 -04:00
#define ENDIAN_LOOKUP(little_endian) !little_endian
2023-10-26 16:35:42 -04:00
#endif
#elif defined(_MSC_VER)
#include <intrin.h>
#define SWAP16(val) _byteswap_ushort(val)
#define SWAP32(val) _byteswap_ulong(val)
#define SWAP64(val) _byteswap_uint64(val)
#define ENDIAN_LOOKUP(little_endian) !little_endian
#endif
2023-07-20 21:44:52 -04:00
2023-09-21 23:23:27 -04:00
namespace blt
{
2023-07-20 21:44:52 -04:00
namespace mem
{
// Used to grab the byte-data of any T element. Defaults to Big Endian, however can be configured to use little endian
template<bool little_endian = false, typename BYTE_TYPE, typename T>
inline static int toBytes(const T& in, BYTE_TYPE* out)
{
if constexpr (!(std::is_same_v<BYTE_TYPE, std::int8_t> || std::is_same_v<BYTE_TYPE, std::uint8_t>))
static_assert("Must provide a signed/unsigned int8 type");
std::memcpy(out, (void*) &in, sizeof(T));
if constexpr (ENDIAN_LOOKUP(little_endian))
{
// TODO: this but better.
for (size_t i = 0; i < sizeof(T) / 2; i++)
std::swap(out[i], out[sizeof(T) - 1 - i]);
}
return 0;
}
// Used to cast the binary data of any T object, into a T object. Assumes data is in big ending (configurable)
template<bool little_endian = false, typename BYTE_TYPE, typename T>
inline static int fromBytes(const BYTE_TYPE* in, T& out)
{
if constexpr (!(std::is_same_v<BYTE_TYPE, std::int8_t> || std::is_same_v<BYTE_TYPE, std::uint8_t>))
static_assert("Must provide a signed/unsigned int8 type");
std::array<BYTE_TYPE, sizeof(T)> data;
std::memcpy(data.data(), in, sizeof(T));
if constexpr (ENDIAN_LOOKUP(little_endian))
{
// if we need to swap find the best way to do so
if constexpr (std::is_same_v<T, int16_t> || std::is_same_v<T, uint16_t>)
out = SWAP16(*reinterpret_cast<T*>(data.data()));
else if constexpr (std::is_same_v<T, int32_t> || std::is_same_v<T, uint32_t>)
out = SWAP32(*reinterpret_cast<T*>(data.data()));
else if constexpr (std::is_same_v<T, int64_t> || std::is_same_v<T, uint64_t>)
out = SWAP64(*reinterpret_cast<T*>(data.data()));
else
{
std::reverse(data.begin(), data.end());
out = *reinterpret_cast<T*>(data.data());
}
}
return 0;
}
2023-10-24 21:44:24 -04:00
template<bool little_endian = false, typename BYTE_TYPE, typename T>
inline static int fromBytes(const BYTE_TYPE* in, T* out)
{
return fromBytes(in, *out);
}
}
2023-07-20 21:44:52 -04:00
template<typename V>
2023-09-21 23:23:27 -04:00
struct ptr_iterator
{
2023-07-20 21:44:52 -04:00
public:
using iterator_category = std::random_access_iterator_tag;
2023-09-21 23:23:27 -04:00
using difference_type = std::ptrdiff_t;
using value_type = V;
using pointer = value_type*;
using reference = value_type&;
2023-07-20 21:44:52 -04:00
2023-09-21 23:23:27 -04:00
explicit ptr_iterator(V* v): _v(v)
{}
2023-07-20 21:44:52 -04:00
2023-09-21 23:23:27 -04:00
reference operator*() const
{ return *_v; }
pointer operator->()
{ return _v; }
ptr_iterator& operator++()
{
2023-07-20 21:44:52 -04:00
_v++;
return *this;
}
2023-09-21 23:23:27 -04:00
ptr_iterator& operator--()
{
2023-07-20 21:44:52 -04:00
_v--;
return *this;
}
2023-09-21 23:23:27 -04:00
ptr_iterator operator++(int)
{
2023-07-20 21:44:52 -04:00
auto tmp = *this;
++(*this);
return tmp;
}
2023-09-21 23:23:27 -04:00
ptr_iterator operator--(int)
{
2023-07-20 21:44:52 -04:00
auto tmp = *this;
--(*this);
return tmp;
}
2023-09-21 23:23:27 -04:00
friend bool operator==(const ptr_iterator& a, const ptr_iterator& b)
{
2023-07-20 21:44:52 -04:00
return a._v == b._v;
}
2023-09-21 23:23:27 -04:00
friend bool operator!=(const ptr_iterator& a, const ptr_iterator& b)
{
2023-07-20 21:44:52 -04:00
return a._v != b._v;
}
private:
V* _v;
};
2023-09-21 23:23:27 -04:00
/**
* Creates an encapsulation of a T array which will be automatically deleted when this object goes out of scope.
* This is a simple buffer meant to be used only inside of a function and not copied around.
* The internal buffer is allocated on the heap.
2023-07-24 02:55:03 -04:00
* The operator * has been overloaded to return the internal buffer.
* @tparam T type that is stored in buffer eg char
*/
template<typename T, bool = std::is_copy_constructible_v<T> || std::is_copy_assignable_v<T>>
class scoped_buffer
2023-09-21 23:23:27 -04:00
{
private:
2023-12-10 14:54:08 -05:00
T* buffer_ = nullptr;
size_t size_;
public:
2023-12-10 14:54:08 -05:00
scoped_buffer(): buffer_(nullptr), size_(0)
{}
2023-12-10 14:54:08 -05:00
explicit scoped_buffer(size_t size): size_(size)
2023-09-21 23:23:27 -04:00
{
if (size > 0)
2023-12-10 14:54:08 -05:00
buffer_ = new T[size];
else
2023-12-10 14:54:08 -05:00
buffer_ = nullptr;
}
scoped_buffer(const scoped_buffer& copy)
{
2023-11-09 19:07:24 -05:00
if (copy.size() == 0)
{
2023-12-10 14:54:08 -05:00
buffer_ = nullptr;
size_ = 0;
2023-11-08 20:25:47 -05:00
return;
}
2023-12-10 14:54:08 -05:00
buffer_ = new T[copy.size()];
size_ = copy.size_;
if constexpr (std::is_trivially_copyable_v<T>)
{
2023-12-10 14:54:08 -05:00
std::memcpy(buffer_, copy.buffer_, copy.size() * sizeof(T));
} else
{
if constexpr (std::is_copy_constructible_v<T> && !std::is_copy_assignable_v<T>)
{
2023-12-10 14:54:08 -05:00
for (size_t i = 0; i < this->size_; i++)
buffer_[i] = T(copy[i]);
} else
2023-12-10 14:54:08 -05:00
for (size_t i = 0; i < this->size_; i++)
buffer_[i] = copy[i];
}
}
scoped_buffer& operator=(const scoped_buffer& copy)
{
if (&copy == this)
return *this;
2023-11-09 19:07:24 -05:00
if (copy.size() == 0)
{
2023-12-10 14:54:08 -05:00
buffer_ = nullptr;
size_ = 0;
2023-11-08 20:25:47 -05:00
return *this;
}
2023-12-10 14:54:08 -05:00
delete[] this->buffer_;
buffer_ = new T[copy.size()];
size_ = copy.size_;
if constexpr (std::is_trivially_copyable_v<T>)
{
2023-12-10 14:54:08 -05:00
std::memcpy(buffer_, copy.buffer_, copy.size() * sizeof(T));
} else
{
if constexpr (std::is_copy_constructible_v<T> && !std::is_copy_assignable_v<T>)
{
2023-12-10 14:54:08 -05:00
for (size_t i = 0; i < this->size_; i++)
buffer_[i] = T(copy[i]);
} else
2023-12-10 14:54:08 -05:00
for (size_t i = 0; i < this->size_; i++)
buffer_[i] = copy[i];
}
return *this;
}
2023-09-21 23:23:27 -04:00
scoped_buffer(scoped_buffer&& move) noexcept
{
2023-12-10 14:54:08 -05:00
delete[] buffer_;
buffer_ = move.buffer_;
size_ = move.size();
move.buffer_ = nullptr;
}
2023-09-21 23:23:27 -04:00
scoped_buffer& operator=(scoped_buffer&& moveAssignment) noexcept
{
2023-12-10 14:54:08 -05:00
delete[] buffer_;
buffer_ = moveAssignment.buffer_;
size_ = moveAssignment.size();
moveAssignment.buffer_ = nullptr;
return *this;
}
2023-12-10 14:54:08 -05:00
inline T& operator[](size_t index)
2023-09-21 23:23:27 -04:00
{
2023-12-10 14:54:08 -05:00
return buffer_[index];
}
2023-12-10 14:54:08 -05:00
inline const T& operator[](size_t index) const
2023-09-21 23:23:27 -04:00
{
2023-12-10 14:54:08 -05:00
return buffer_[index];
}
2023-09-21 23:23:27 -04:00
inline T* operator*()
{
2023-12-10 14:54:08 -05:00
return buffer_;
}
2023-09-21 23:23:27 -04:00
[[nodiscard]] inline size_t size() const
{
2023-12-10 14:54:08 -05:00
return size_;
}
2023-10-27 14:29:41 -04:00
inline T*& ptr()
2023-09-21 23:23:27 -04:00
{
2023-12-10 14:54:08 -05:00
return buffer_;
}
2023-10-27 14:30:16 -04:00
inline const T* const& ptr() const
2023-10-25 01:23:16 -04:00
{
2023-12-10 14:54:08 -05:00
return buffer_;
2023-10-25 01:23:16 -04:00
}
2023-10-27 14:30:16 -04:00
inline const T* const& data() const
2023-10-25 01:23:33 -04:00
{
2023-12-10 14:54:08 -05:00
return buffer_;
2023-10-25 01:23:33 -04:00
}
2023-10-27 14:29:41 -04:00
inline T*& data()
2023-10-25 01:23:33 -04:00
{
2023-12-10 14:54:08 -05:00
return buffer_;
2023-10-25 01:23:33 -04:00
}
2023-12-10 14:54:08 -05:00
inline ptr_iterator<T> begin()
2023-10-25 01:26:44 -04:00
{
2023-12-10 14:54:08 -05:00
return ptr_iterator{buffer_};
2023-10-25 01:26:44 -04:00
}
2023-12-10 14:54:08 -05:00
inline ptr_iterator<T> end()
2023-09-21 23:23:27 -04:00
{
2023-12-10 14:54:08 -05:00
return ptr_iterator{&buffer_[size_]};
}
2023-09-21 23:23:27 -04:00
~scoped_buffer()
{
2023-12-10 14:54:08 -05:00
delete[] buffer_;
}
};
template<typename T, size_t MAX_SIZE>
class static_vector
{
private:
T buffer_[MAX_SIZE];
size_t size_ = 0;
public:
static_vector() = default;
2023-12-10 15:15:23 -05:00
inline bool push_back(const T& copy)
2023-12-10 14:54:08 -05:00
{
if (size_ >= MAX_SIZE)
return false;
2023-12-10 15:15:23 -05:00
buffer_[size_++] = copy;
2023-12-10 14:54:08 -05:00
return true;
}
inline bool push_back(T&& move)
{
if (size_ >= MAX_SIZE)
return false;
2023-12-10 15:15:23 -05:00
buffer_[size_++] = std::move(move);
2023-12-10 14:54:08 -05:00
return true;
}
2023-12-10 15:15:23 -05:00
inline T& at(size_t index)
{
if (index >= MAX_SIZE)
throw std::runtime_error("Array index " + std::to_string(index) + " out of bounds! (Max size: " + std::to_string(MAX_SIZE) + ')');
}
2023-12-10 14:54:08 -05:00
inline T& operator[](size_t index)
{
return buffer_[index];
}
inline const T& operator[](size_t index) const
{
return buffer_[index];
}
2023-12-10 15:15:23 -05:00
inline void reserve(size_t size)
{
if (size > MAX_SIZE)
size = MAX_SIZE;
size_ = size;
}
2023-12-10 14:54:08 -05:00
[[nodiscard]] inline size_t size() const
{
return size_;
}
2023-12-10 15:15:23 -05:00
[[nodiscard]] inline size_t capacity() const
{
return MAX_SIZE;
}
2023-12-10 14:54:08 -05:00
inline T* data()
{
return buffer_;
}
inline T* operator*()
{
return buffer_;
}
inline T* data() const
{
return buffer_;
}
2023-12-10 15:15:23 -05:00
inline T* begin()
2023-12-10 14:54:08 -05:00
{
2023-12-10 15:15:23 -05:00
return buffer_;
2023-12-10 14:54:08 -05:00
}
2023-12-10 15:15:23 -05:00
inline T* end()
2023-12-10 14:54:08 -05:00
{
2023-12-10 15:15:23 -05:00
return &buffer_[size_];
}
};
2023-03-05 12:57:57 -05:00
template<typename T>
class scoped_buffer<T, false> : scoped_buffer<T, true>
{
using scoped_buffer<T, true>::scoped_buffer;
public:
scoped_buffer(const scoped_buffer& copy) = delete;
scoped_buffer operator=(scoped_buffer& copyAssignment) = delete;
};
2023-03-05 12:57:57 -05:00
template<typename T>
2023-09-21 23:23:27 -04:00
struct nullptr_initializer
{
2023-03-05 12:57:57 -05:00
private:
T* m_ptr = nullptr;
public:
nullptr_initializer() = default;
2023-09-21 23:23:27 -04:00
explicit nullptr_initializer(T* ptr): m_ptr(ptr)
{}
nullptr_initializer(const nullptr_initializer<T>& ptr): m_ptr(ptr.m_ptr)
{}
nullptr_initializer(nullptr_initializer<T>&& ptr) noexcept: m_ptr(ptr.m_ptr)
{}
nullptr_initializer<T>& operator=(const nullptr_initializer<T>& ptr)
{
2023-03-05 12:57:57 -05:00
if (&ptr == this)
return *this;
this->m_ptr = ptr.m_ptr;
return *this;
}
2023-09-21 23:23:27 -04:00
nullptr_initializer<T>& operator=(nullptr_initializer<T>&& ptr) noexcept
{
2023-03-05 12:57:57 -05:00
if (&ptr == this)
return *this;
this->m_ptr = ptr.m_ptr;
return *this;
}
2023-09-21 23:23:27 -04:00
inline T* operator->()
{
2023-03-05 12:57:57 -05:00
return m_ptr;
}
~nullptr_initializer() = default;
};
2023-07-20 21:44:52 -04:00
/**
* Creates a hash-map like association between an enum key and any arbitrary value.
* The storage is backed by a contiguous array for faster access.
* @tparam K enum value
* @tparam V associated value
*/
template<typename K, typename V>
2023-09-21 23:23:27 -04:00
class enum_storage
{
2023-07-20 21:44:52 -04:00
private:
2023-09-21 23:23:27 -04:00
V* m_values;
size_t m_size = 0;
2023-07-20 21:44:52 -04:00
public:
2023-09-21 23:23:27 -04:00
enum_storage(std::initializer_list<std::pair<K, V>> init)
{
2023-07-20 21:44:52 -04:00
for (auto& i : init)
2023-09-21 23:23:27 -04:00
m_size = std::max((size_t) i.first, m_size);
m_values = new V[m_size];
for (auto& v : init)
2023-09-21 23:23:27 -04:00
m_values[(size_t) v.first] = v.second;
2023-07-20 21:44:52 -04:00
}
2023-09-21 23:23:27 -04:00
inline V& operator[](size_t index)
{
return m_values[index];
2023-07-20 21:44:52 -04:00
}
2023-09-21 23:23:27 -04:00
inline const V& operator[](size_t index) const
{
return m_values[index];
2023-07-20 21:44:52 -04:00
}
2023-09-21 23:23:27 -04:00
[[nodiscard]] inline size_t size() const
{
return m_size;
}
ptr_iterator<V> begin()
{
return ptr_iterator{m_values};
}
ptr_iterator<V> end()
{
return ptr_iterator{&m_values[m_size]};
2023-07-20 21:44:52 -04:00
}
2023-09-21 23:23:27 -04:00
~enum_storage()
{
delete[] m_values;
2023-07-20 21:44:52 -04:00
}
2023-09-21 23:23:27 -04:00
};
template<typename T, size_t BLOCK_SIZE = 8192>
2023-09-21 23:23:27 -04:00
class area_allocator
{
public:
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef void* void_pointer;
typedef const void* const_void_pointer;
private:
2023-12-18 19:51:55 -05:00
/**
* Stores a view to a region of memory that has been deallocated
* This is a non-owning reference to the memory block
*
* pointer p is the pointer to the beginning of the block of memory
* size_t n is the number of elements that this block can hold
*/
2023-09-21 23:23:27 -04:00
struct pointer_view
{
2023-12-18 19:22:29 -05:00
pointer p;
2023-09-21 23:23:27 -04:00
size_t n;
};
2023-07-20 21:44:52 -04:00
2023-12-18 19:51:55 -05:00
/**
* Stores the actual data for allocated blocks. Since we would like to be able to allocate an arbitrary number of items
* we need a way of storing that data. The block storage holds an owning pointer to a region of memory with used elements
* Only up to used has to have their destructors called, which should be handled by the deallocate function
* it is UB to not deallocate memory allocated by this allocator
*
* an internal vector is used to store the regions of memory which have been deallocated. the allocate function will search for
* free blocks with sufficient size in order to maximize memory usage. In the future more advanced methods should be used
* for both faster access to deallocated blocks of sufficient size and to ensure coherent memory.
*/
struct block_storage
2023-09-21 23:23:27 -04:00
{
pointer data;
size_t used = 0;
// TODO: b-tree?
std::vector<pointer_view> unallocated_blocks;
};
2023-07-20 21:44:52 -04:00
2023-12-18 19:51:55 -05:00
/**
* Stores an index to a pointer_view along with the amount of memory leftover after the allocation
* it also stores the block being allocated to in question. The new inserted leftover should start at old_ptr + size
*/
struct block_view
{
block_storage* blk;
size_t index;
size_t leftover;
block_view(block_storage* blk, size_t index, size_t leftover): blk(blk), index(index), leftover(leftover)
{}
};
/**
* Allocate a new block of memory and push it to the back of blocks.
*/
2023-12-18 19:22:29 -05:00
inline void allocate_block()
2023-09-21 23:23:27 -04:00
{
2023-12-18 19:51:55 -05:00
//BLT_INFO("Allocating a new block of size %d", BLOCK_SIZE);
2023-12-18 19:22:29 -05:00
auto* blk = new block_storage();
blk->data = static_cast<pointer>(malloc(sizeof(T) * BLOCK_SIZE));
blocks.push_back(blk);
2023-09-21 23:23:27 -04:00
}
2023-12-18 19:22:29 -05:00
2023-12-18 19:51:55 -05:00
/**
* Searches for a free block inside the block storage with sufficient space and returns an optional view to it
* The optional will be empty if no open block can be found.
*/
inline std::optional<block_view> search_for_block(block_storage* blk, size_t n)
{
for (auto kv : blt::enumerate(blk->unallocated_blocks))
{
if (kv.second.n >= n)
return block_view{blk, kv.first, kv.second.n - n};
}
return {};
}
/**
* removes the block of memory from the unallocated_blocks storage in the underlying block, inserting a new unallocated block if
* there was any leftover. Returns a pointer to the beginning of the new block.
*/
inline pointer swap_pop_resize_if(const block_view& view, size_t n)
{
pointer_view ptr = view.blk->unallocated_blocks[view.index];
std::iter_swap(view.blk->unallocated_blocks.begin() + view.index, view.blk->unallocated_blocks.end() - 1);
view.blk->unallocated_blocks.pop_back();
if (view.leftover > 0)
view.blk->unallocated_blocks.push_back({ptr.p + n, view.leftover});
return ptr.p;
}
/**
* Finds the next available unallocated block of memory, or empty if there is none which meet size requirements
*/
inline std::optional<pointer> find_available_block(size_t n)
2023-12-18 19:22:29 -05:00
{
for (auto* blk : blocks)
{
2023-12-18 19:51:55 -05:00
if (auto view = search_for_block(blk, n))
return swap_pop_resize_if(view.value(), n);
2023-12-18 19:22:29 -05:00
}
2023-12-18 19:51:55 -05:00
return {};
2023-12-18 19:22:29 -05:00
}
2023-12-18 19:51:55 -05:00
/**
* returns a pointer to a block of memory along with an offset into that pointer that the requested block can be found at
*/
2023-12-18 19:22:29 -05:00
inline std::pair<pointer, size_t> getBlock(size_t n)
{
2023-12-18 19:51:55 -05:00
if (auto blk = find_available_block(n))
return {blk.value(), 0};
2023-12-18 19:22:29 -05:00
if (blocks.back()->used + n > BLOCK_SIZE)
allocate_block();
auto ptr = std::pair<pointer, size_t>{blocks.back()->data, blocks.back()->used};
blocks.back()->used += n;
return ptr;
}
2023-12-18 19:51:55 -05:00
/**
* Calls the constructor on elements if they require construction, otherwise constructor will not be called and this function is useless
*/
2023-12-18 19:22:29 -05:00
inline void allocate_in_block(pointer begin, size_t n)
{
if constexpr (std::is_default_constructible_v<T> && !std::is_trivially_default_constructible_v<T>)
{
for (size_t i = 0; i < n; i++)
new(&begin[i]) T();
}
}
2023-09-21 23:23:27 -04:00
public:
area_allocator()
{
allocate_block();
2023-07-20 21:44:52 -04:00
}
2023-09-21 23:23:27 -04:00
[[nodiscard]] pointer allocate(size_t n)
2023-09-21 23:23:27 -04:00
{
if (n > BLOCK_SIZE)
throw std::runtime_error("Requested allocation is too large!");
2023-12-18 19:22:29 -05:00
auto block_info = getBlock(n);
auto* ptr = &block_info.first[block_info.second];
// call constructors on the objects if they require it
allocate_in_block(ptr, n);
return ptr;
2023-09-21 23:23:27 -04:00
}
void deallocate(pointer p, size_t n) noexcept
2023-09-21 23:23:27 -04:00
{
for (size_t i = 0; i < n; i++)
p[i].~T();
2023-12-18 19:22:29 -05:00
for (auto*& blk : blocks)
{
2023-12-18 19:22:29 -05:00
if (p >= blk->data && p <= (blk->data + BLOCK_SIZE))
{
2023-12-18 19:22:29 -05:00
blk->unallocated_blocks.push_back({p, n});
break;
}
}
2023-09-21 23:23:27 -04:00
}
~area_allocator()
{
2023-12-18 19:22:29 -05:00
for (auto*& blk : blocks)
{
2023-12-18 19:22:29 -05:00
free(blk->data);
delete blk;
}
2023-09-21 23:23:27 -04:00
}
private:
2023-12-18 19:22:29 -05:00
std::vector<block_storage*> blocks;
2023-07-20 21:44:52 -04:00
};
}
#endif //BLT_TESTS_MEMORY_H