blt-gp/include/blt/gp/stack.h

987 lines
41 KiB
C
Raw Normal View History

2024-06-06 02:25:42 -04:00
#pragma once
/*
* Copyright (C) 2024 Brett Terpstra
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef BLT_GP_STACK_H
#define BLT_GP_STACK_H
#include <blt/std/types.h>
2024-07-12 21:58:05 -04:00
#include <blt/std/assert.h>
2024-06-26 20:24:58 -04:00
#include <blt/std/logging.h>
2024-07-14 20:38:08 -04:00
#include <blt/std/allocator.h>
2024-08-16 21:27:33 -04:00
#include <blt/std/ranges.h>
2024-08-11 13:33:24 -04:00
#include <blt/std/meta.h>
2024-06-30 03:20:56 -04:00
#include <blt/gp/fwdecl.h>
2024-06-06 02:25:42 -04:00
#include <utility>
#include <stdexcept>
#include <cstdlib>
#include <memory>
2024-06-23 14:13:50 -04:00
#include <type_traits>
2024-06-30 03:20:56 -04:00
#include <cstring>
2024-07-16 14:13:23 -04:00
#include <iostream>
2024-06-06 02:25:42 -04:00
namespace blt::gp
{
2024-07-14 20:38:08 -04:00
2024-08-11 13:33:24 -04:00
namespace detail
{
BLT_META_MAKE_FUNCTION_CHECK(drop);
}
2024-06-06 02:25:42 -04:00
class stack_allocator
2024-08-16 21:27:33 -04:00
{
constexpr static blt::size_t PAGE_SIZE = 0x1000;
constexpr static blt::size_t MAX_ALIGNMENT = 8;
template<typename T>
using NO_REF_T = std::remove_cv_t<std::remove_reference_t<T>>;
public:
2024-08-17 00:06:28 -04:00
struct size_data_t
{
blt::size_t total_size_bytes = 0;
blt::size_t total_used_bytes = 0;
blt::size_t total_remaining_bytes = 0;
blt::size_t total_no_meta_bytes = 0;
blt::size_t total_dealloc = 0;
blt::size_t total_dealloc_used = 0;
blt::size_t total_dealloc_remaining = 0;
blt::size_t total_dealloc_no_meta = 0;
blt::size_t blocks = 0;
friend std::ostream& operator<<(std::ostream& stream, const size_data_t& data)
{
stream << "[";
stream << data.total_used_bytes << "/";
stream << data.total_size_bytes << "(";
stream << (static_cast<double>(data.total_used_bytes) / static_cast<double>(data.total_size_bytes) * 100) << "%), ";
stream << data.total_used_bytes << "/";
stream << data.total_no_meta_bytes << "(";
stream << (static_cast<double>(data.total_used_bytes) / static_cast<double>(data.total_no_meta_bytes) * 100)
<< "%), (empty space: ";
stream << data.total_remaining_bytes << ") blocks: " << data.blocks << " || unallocated space: ";
stream << data.total_dealloc_used << "/";
stream << data.total_dealloc;
if (static_cast<double>(data.total_dealloc) > 0)
stream << "(" << (static_cast<double>(data.total_dealloc_used) / static_cast<double>(data.total_dealloc) * 100) << "%)";
stream << ", ";
stream << data.total_dealloc_used << "/";
stream << data.total_dealloc_no_meta;
if (data.total_dealloc_no_meta > 0)
stream << "(" << (static_cast<double>(data.total_dealloc_used) / static_cast<double>(data.total_dealloc_no_meta * 100))
<< "%)";
stream << ", (empty space: " << data.total_dealloc_remaining << ")]";
return stream;
}
};
template<typename T>
static inline constexpr blt::size_t aligned_size() noexcept
{
return aligned_size(sizeof(NO_REF_T<T>));
}
static inline constexpr blt::size_t aligned_size(blt::size_t size) noexcept
{
return (size + (MAX_ALIGNMENT - 1)) & ~(MAX_ALIGNMENT - 1);
}
2024-08-16 21:27:33 -04:00
stack_allocator() = default;
stack_allocator(const stack_allocator& copy)
{
2024-08-17 00:06:28 -04:00
if (copy.data_ == nullptr || copy.bytes_stored == 0)
return;
2024-08-16 21:27:33 -04:00
expand(copy.size_);
2024-08-17 00:06:28 -04:00
std::memcpy(data_, copy.data_, copy.bytes_stored);
bytes_stored = copy.bytes_stored;
2024-08-16 21:27:33 -04:00
}
stack_allocator(stack_allocator&& move) noexcept:
2024-08-17 00:06:28 -04:00
data_(std::exchange(move.data_, nullptr)), bytes_stored(move.bytes_stored), size_(move.size_)
2024-08-16 21:27:33 -04:00
{}
stack_allocator& operator=(const stack_allocator& copy) = delete;
stack_allocator& operator=(stack_allocator&& move) noexcept
{
data_ = std::exchange(move.data_, data_);
size_ = std::exchange(move.size_, size_);
2024-08-17 00:06:28 -04:00
bytes_stored = std::exchange(move.bytes_stored, bytes_stored);
2024-08-16 21:27:33 -04:00
return *this;
}
~stack_allocator()
{
std::free(data_);
}
void insert(const stack_allocator& stack)
{
2024-08-17 00:06:28 -04:00
if (size_ < stack.bytes_stored + bytes_stored)
expand(stack.bytes_stored + bytes_stored);
std::memcpy(data_ + bytes_stored, stack.data_, stack.bytes_stored);
bytes_stored += stack.bytes_stored;
2024-08-16 21:27:33 -04:00
}
void copy_from(const stack_allocator& stack, blt::size_t bytes)
{
2024-08-17 00:06:28 -04:00
if (size_ < bytes + bytes_stored)
expand(bytes + bytes_stored);
std::memcpy(data_ + bytes_stored, stack.data_ + (stack.bytes_stored - bytes), bytes);
bytes_stored += bytes;
2024-08-16 21:27:33 -04:00
}
void copy_from(blt::u8* data, blt::size_t bytes)
{
2024-08-17 00:06:28 -04:00
if (size_ < bytes + bytes_stored)
expand(bytes + bytes_stored);
std::memcpy(data_ + bytes_stored, data, bytes);
bytes_stored += bytes;
2024-08-16 21:27:33 -04:00
}
void copy_to(blt::u8* data, blt::size_t bytes)
{
2024-08-17 00:06:28 -04:00
std::memcpy(data, data_ + (bytes_stored - bytes), bytes);
2024-08-16 21:27:33 -04:00
}
template<typename T, typename NO_REF = NO_REF_T<T>>
void push(const T& t)
{
static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!");
static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!");
auto ptr = allocate_bytes_for_size(sizeof(NO_REF));
std::memcpy(ptr, &t, sizeof(NO_REF));
}
template<typename T, typename NO_REF = NO_REF_T<T>>
T pop()
{
static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!");
static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!");
constexpr auto size = aligned_size(sizeof(NO_REF));
2024-08-17 00:06:28 -04:00
bytes_stored -= size;
2024-08-17 01:59:13 -04:00
return *reinterpret_cast<T*>(data_ + bytes_stored);
2024-08-16 21:27:33 -04:00
}
template<typename T, typename NO_REF = NO_REF_T<T>>
T& from(blt::size_t bytes)
{
static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!");
static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!");
2024-08-17 00:06:28 -04:00
auto size = aligned_size(sizeof(NO_REF)) + bytes;
return *reinterpret_cast<NO_REF*>(data_ + bytes_stored - size);
2024-08-16 21:27:33 -04:00
}
2024-08-17 00:06:28 -04:00
void pop_bytes(blt::size_t bytes)
{
bytes_stored -= bytes;
}
2024-08-16 21:27:33 -04:00
2024-08-17 00:06:28 -04:00
void transfer_bytes(stack_allocator& to, blt::size_t bytes)
{
2024-08-17 01:59:13 -04:00
to.copy_from(*this, aligned_size(bytes));
2024-08-17 00:06:28 -04:00
pop_bytes(bytes);
}
template<typename... Args>
void call_destructors(detail::bitmask_t* mask)
{
if constexpr (sizeof...(Args) > 0)
{
blt::size_t offset = (stack_allocator::aligned_size(sizeof(NO_REF_T<Args>)) + ...) -
stack_allocator::aligned_size(sizeof(NO_REF_T<typename blt::meta::arg_helper<Args...>::First>));
blt::size_t index = 0;
if (mask != nullptr)
index = mask->size() - sizeof...(Args);
((call_drop<Args>(offset, index, mask), offset -= stack_allocator::aligned_size(sizeof(NO_REF_T<Args>)), ++index), ...);
if (mask != nullptr)
{
auto& mask_r = *mask;
for (blt::size_t i = 0; i < sizeof...(Args); i++)
mask_r.pop_back();
}
}
}
2024-08-16 21:27:33 -04:00
[[nodiscard]] bool empty() const noexcept
{
2024-08-17 00:06:28 -04:00
return bytes_stored == 0;
2024-08-16 21:27:33 -04:00
}
2024-08-17 00:06:28 -04:00
[[nodiscard]] blt::ptrdiff_t remaining_bytes_in_block() const noexcept
2024-08-16 21:27:33 -04:00
{
2024-08-17 00:06:28 -04:00
return static_cast<blt::ptrdiff_t>(size_ - bytes_stored);
2024-08-16 21:27:33 -04:00
}
2024-08-17 00:06:28 -04:00
[[nodiscard]] blt::ptrdiff_t bytes_in_head() const noexcept
2024-08-16 21:27:33 -04:00
{
2024-08-17 00:06:28 -04:00
return static_cast<blt::ptrdiff_t>(bytes_stored);
}
[[nodiscard]] size_data_t size() const noexcept
{
size_data_t data;
data.total_used_bytes = bytes_stored;
data.total_size_bytes = size_;
data.total_remaining_bytes = remaining_bytes_in_block();
return data;
2024-08-16 21:27:33 -04:00
}
private:
void expand(blt::size_t bytes)
{
bytes = to_nearest_page_size(bytes);
auto new_data = static_cast<blt::u8*>(std::malloc(bytes));
2024-08-17 00:06:28 -04:00
if (bytes_stored > 0)
std::memcpy(new_data, data_, bytes_stored);
2024-08-16 21:27:33 -04:00
std::free(data_);
data_ = new_data;
size_ = bytes;
}
static size_t to_nearest_page_size(blt::size_t bytes) noexcept
{
constexpr static blt::size_t MASK = ~(PAGE_SIZE - 1);
return (bytes & MASK) + PAGE_SIZE;
}
void* get_aligned_pointer(blt::size_t bytes) noexcept
{
if (data_ == nullptr)
return nullptr;
blt::size_t remaining_bytes = remaining_bytes_in_block();
2024-08-17 00:06:28 -04:00
auto* pointer = static_cast<void*>(data_ + bytes_stored);
2024-08-16 21:27:33 -04:00
return std::align(MAX_ALIGNMENT, bytes, pointer, remaining_bytes);
}
void* allocate_bytes_for_size(blt::size_t bytes)
{
auto aligned_ptr = get_aligned_pointer(bytes);
if (aligned_ptr == nullptr)
{
expand(bytes + MAX_ALIGNMENT);
aligned_ptr = get_aligned_pointer(bytes);
}
if (aligned_ptr == nullptr)
throw std::bad_alloc();
2024-08-17 01:59:13 -04:00
auto used_bytes = aligned_size(bytes);
2024-08-17 00:06:28 -04:00
bytes_stored += used_bytes;
2024-08-16 21:27:33 -04:00
return aligned_ptr;
}
2024-08-17 00:06:28 -04:00
template<typename T>
inline void call_drop(blt::size_t offset, blt::size_t index, detail::bitmask_t* mask)
{
if constexpr (detail::has_func_drop_v<T>)
{
if (mask != nullptr)
{
auto& mask_r = *mask;
if (!mask_r[index])
return;
}
from<NO_REF_T<T>>(offset).drop();
}
}
2024-08-16 21:27:33 -04:00
blt::u8* data_ = nullptr;
// place in the data_ array which has a free spot.
2024-08-17 00:06:28 -04:00
blt::size_t bytes_stored = 0;
2024-08-16 21:27:33 -04:00
blt::size_t size_ = 0;
};
class stack_allocator_old
2024-06-06 02:25:42 -04:00
{
constexpr static blt::size_t PAGE_SIZE = 0x1000;
constexpr static blt::size_t MAX_ALIGNMENT = 8;
2024-07-16 00:25:23 -04:00
template<typename T>
using NO_REF_T = std::remove_cv_t<std::remove_reference_t<T>>;
2024-06-06 02:25:42 -04:00
public:
2024-07-10 12:39:01 -04:00
struct size_data_t
{
blt::size_t total_size_bytes = 0;
blt::size_t total_used_bytes = 0;
blt::size_t total_remaining_bytes = 0;
blt::size_t total_no_meta_bytes = 0;
2024-07-16 14:13:23 -04:00
blt::size_t total_dealloc = 0;
blt::size_t total_dealloc_used = 0;
blt::size_t total_dealloc_remaining = 0;
blt::size_t total_dealloc_no_meta = 0;
2024-07-10 22:00:28 -04:00
blt::size_t blocks = 0;
2024-07-16 14:13:23 -04:00
friend std::ostream& operator<<(std::ostream& stream, const size_data_t& data)
{
stream << "[";
stream << data.total_used_bytes << "/";
stream << data.total_size_bytes << "(";
stream << (static_cast<double>(data.total_used_bytes) / static_cast<double>(data.total_size_bytes) * 100) << "%), ";
2024-07-16 14:13:23 -04:00
stream << data.total_used_bytes << "/";
stream << data.total_no_meta_bytes << "(";
stream << (static_cast<double>(data.total_used_bytes) / static_cast<double>(data.total_no_meta_bytes) * 100)
<< "%), (empty space: ";
2024-07-16 14:13:23 -04:00
stream << data.total_remaining_bytes << ") blocks: " << data.blocks << " || unallocated space: ";
stream << data.total_dealloc_used << "/";
stream << data.total_dealloc;
if (static_cast<double>(data.total_dealloc) > 0)
stream << "(" << (static_cast<double>(data.total_dealloc_used) / static_cast<double>(data.total_dealloc) * 100) << "%)";
2024-07-16 14:13:23 -04:00
stream << ", ";
stream << data.total_dealloc_used << "/";
stream << data.total_dealloc_no_meta;
if (data.total_dealloc_no_meta > 0)
stream << "(" << (static_cast<double>(data.total_dealloc_used) / static_cast<double>(data.total_dealloc_no_meta * 100))
<< "%)";
2024-07-16 14:13:23 -04:00
stream << ", (empty space: " << data.total_dealloc_remaining << ")]";
return stream;
}
2024-07-10 12:39:01 -04:00
};
2024-08-16 21:27:33 -04:00
void insert(stack_allocator_old stack)
{
if (stack.empty())
return;
// take a copy of the pointer to this stack's blocks
auto old_head = stack.head;
// stack is now empty, we have the last reference to it.
stack.head = nullptr;
// we don't have any nodes to search through or re-point, we can just assign the head
if (head == nullptr)
{
head = old_head;
return;
}
// find the beginning of the stack
auto begin = old_head;
while (begin->metadata.prev != nullptr)
begin = begin->metadata.prev;
// move along blocks with free space, attempt to insert bytes from one stack to another
auto insert = head;
while (insert->metadata.next != nullptr && begin != nullptr)
{
if (begin->used_bytes_in_block() <= insert->remaining_bytes_in_block())
{
std::memcpy(insert->metadata.offset, begin->buffer, begin->used_bytes_in_block());
insert->metadata.offset += begin->used_bytes_in_block();
auto old_begin = begin;
begin = begin->metadata.next;
free_block(old_begin);
}
head = insert;
insert = insert->metadata.next;
}
if (begin == nullptr)
return;
while (insert->metadata.next != nullptr)
insert = insert->metadata.next;
// if here is space left we can move the pointers around
insert->metadata.next = begin;
begin->metadata.prev = insert;
// find where the head is now and set the head to this new point.
auto new_head = begin;
while (new_head->metadata.next != nullptr)
new_head = new_head->metadata.next;
head = new_head;
}
/**
* Bytes must be the number of bytes to move, all types must have alignment accounted for
*/
2024-08-16 21:27:33 -04:00
void copy_from(const stack_allocator_old& stack, blt::size_t bytes)
{
2024-08-05 02:40:16 -04:00
if (bytes == 0)
return;
if (stack.empty())
{
BLT_WARN("This stack is empty, we will copy no bytes from it!");
return;
}
auto [start_block, bytes_left, start_point] = get_start_from_bytes(stack, bytes);
2024-08-05 02:40:16 -04:00
if (bytes_left > 0)
{
allocate_block_to_head_for_size(bytes_left);
std::memcpy(head->metadata.offset, start_point, bytes_left);
head->metadata.offset += bytes_left;
start_block = start_block->metadata.next;
}
2024-08-05 02:40:16 -04:00
// we now copy whole blocks at a time.
while (start_block != nullptr)
{
allocate_block_to_head_for_size(start_block->used_bytes_in_block());
2024-08-05 02:40:16 -04:00
std::memcpy(head->metadata.offset, start_block->buffer, start_block->used_bytes_in_block());
head->metadata.offset += start_block->used_bytes_in_block();
start_block = start_block->metadata.next;
}
}
2024-07-10 12:39:01 -04:00
void copy_from(blt::u8* data, blt::size_t bytes)
{
if (bytes == 0 || data == nullptr)
return;
allocate_block_to_head_for_size(bytes);
std::memcpy(head->metadata.offset, data, bytes);
head->metadata.offset += bytes;
}
void copy_to(blt::u8* data, blt::size_t bytes) const
{
if (bytes == 0 || data == nullptr)
return;
auto [start_block, bytes_left, start_point] = get_start_from_bytes(*this, bytes);
blt::size_t write_point = 0;
if (bytes_left > 0)
{
std::memcpy(data + write_point, start_point, bytes_left);
write_point += bytes_left;
start_block = start_block->metadata.next;
}
// we now copy whole blocks at a time.
while (start_block != nullptr)
{
std::memcpy(data + write_point, start_block->buffer, start_block->used_bytes_in_block());
write_point += start_block->used_bytes_in_block();
start_block = start_block->metadata.next;
}
}
2024-06-06 02:25:42 -04:00
/**
* Pushes an instance of an object on to the stack
* @tparam T type to push
* @param value universal reference to the object to push
*/
template<typename T>
void push(const T& value)
2024-06-06 02:25:42 -04:00
{
2024-07-16 00:25:23 -04:00
using NO_REF_T = std::remove_cv_t<std::remove_reference_t<T>>;
2024-06-25 22:21:41 -04:00
static_assert(std::is_trivially_copyable_v<NO_REF_T> && "Type must be bitwise copyable!");
2024-07-16 03:17:18 -04:00
static_assert(alignof(NO_REF_T) <= MAX_ALIGNMENT && "Type must not be greater than the max alignment!");
2024-07-16 00:25:23 -04:00
auto ptr = allocate_bytes<NO_REF_T>();
head->metadata.offset = static_cast<blt::u8*>(ptr) + aligned_size<NO_REF_T>();
//new(ptr) NO_REF_T(std::forward<T>(value));
std::memcpy(ptr, &value, sizeof(NO_REF_T));
2024-06-06 02:25:42 -04:00
}
template<typename T>
T pop()
{
2024-07-16 00:25:23 -04:00
using NO_REF_T = std::remove_cv_t<std::remove_reference_t<T>>;
2024-06-30 03:20:56 -04:00
static_assert(std::is_trivially_copyable_v<NO_REF_T> && "Type must be bitwise copyable!");
2024-07-16 00:25:23 -04:00
constexpr static auto TYPE_SIZE = aligned_size<NO_REF_T>();
2024-07-16 21:56:21 -04:00
while (head->used_bytes_in_block() == 0 && move_back());
if (empty())
2024-06-06 02:25:42 -04:00
throw std::runtime_error("Silly boi the stack is empty!");
2024-07-16 21:56:21 -04:00
2024-07-16 14:13:23 -04:00
if (head->used_bytes_in_block() < static_cast<blt::ptrdiff_t>(aligned_size<NO_REF_T>()))
throw std::runtime_error((std::string("Mismatched Types! Not enough space left in block! Bytes: ") += std::to_string(
head->used_bytes_in_block()) += " Size: " + std::to_string(sizeof(NO_REF_T))).c_str());
// make copy
2024-07-16 00:25:23 -04:00
NO_REF_T t = *reinterpret_cast<NO_REF_T*>(head->metadata.offset - TYPE_SIZE);
// call destructor
2024-08-11 13:33:24 -04:00
if constexpr (detail::has_func_drop_v<T>)
2024-08-12 13:48:06 -04:00
call_drop<NO_REF_T>(0, 0, nullptr);
2024-07-13 15:36:49 -04:00
// move offset back
2024-06-06 02:25:42 -04:00
head->metadata.offset -= TYPE_SIZE;
2024-07-22 00:00:51 -04:00
// moving back allows us to allocate with other data, if there is room.
while (head->used_bytes_in_block() == 0 && move_back());
2024-06-06 02:25:42 -04:00
return t;
}
template<typename T>
T& from(blt::size_t bytes)
{
2024-07-16 00:25:23 -04:00
using NO_REF_T = std::remove_cv_t<std::remove_reference_t<T>>;
2024-07-17 00:54:24 -04:00
2024-07-16 00:25:23 -04:00
constexpr static auto TYPE_SIZE = aligned_size<NO_REF_T>();
2024-07-17 00:54:24 -04:00
auto remaining_bytes = static_cast<blt::ptrdiff_t>(bytes + TYPE_SIZE);
2024-06-06 02:25:42 -04:00
block* blk = head;
while (remaining_bytes > 0)
{
if (blk == nullptr)
2024-08-09 21:40:09 -04:00
{
BLT_WARN_STREAM << "Stack state: " << size() << "\n";
BLT_WARN_STREAM << "Requested " << bytes << " bytes which becomes " << (bytes + TYPE_SIZE) << "\n";
2024-06-06 02:25:42 -04:00
throw std::runtime_error("Requested size is beyond the scope of this stack!");
2024-08-09 21:40:09 -04:00
}
2024-07-17 00:54:24 -04:00
2024-06-06 02:25:42 -04:00
auto bytes_available = blk->used_bytes_in_block() - remaining_bytes;
2024-07-17 00:54:24 -04:00
2024-06-06 02:25:42 -04:00
if (bytes_available < 0)
{
2024-07-17 00:54:24 -04:00
remaining_bytes -= blk->used_bytes_in_block();
2024-08-06 21:38:12 -04:00
blk = blk->metadata.prev;
2024-06-06 02:25:42 -04:00
} else
break;
}
if (blk == nullptr)
throw std::runtime_error("Some nonsense is going on. This function already smells");
2024-07-16 00:25:23 -04:00
if (blk->used_bytes_in_block() < static_cast<blt::ptrdiff_t>(TYPE_SIZE))
2024-08-03 19:51:38 -04:00
{
BLT_WARN_STREAM << size() << "\n";
2024-08-10 04:41:41 -04:00
BLT_WARN_STREAM << "Requested " << bytes << " bytes which becomes " << (bytes + TYPE_SIZE) << "\n";
BLT_WARN_STREAM << "Block size: " << blk->storage_size() << "\n";
2024-08-09 21:40:09 -04:00
BLT_ABORT((std::string("Mismatched Types! Not enough space left in block! Bytes: ") += std::to_string(
2024-07-16 00:25:23 -04:00
blk->used_bytes_in_block()) += " Size: " + std::to_string(sizeof(NO_REF_T))).c_str());
2024-08-03 19:51:38 -04:00
}
2024-07-17 00:54:24 -04:00
return *reinterpret_cast<NO_REF_T*>(blk->metadata.offset - remaining_bytes);
2024-06-06 02:25:42 -04:00
}
void pop_bytes(blt::ptrdiff_t bytes)
{
if (bytes == 0)
return;
if (empty())
{
BLT_WARN("Cannot pop %ld bytes", bytes);
BLT_ABORT("Stack is empty, we cannot pop!");
}
2024-06-06 02:25:42 -04:00
while (bytes > 0)
{
if (head == nullptr)
{
BLT_WARN("The head is null, this stack doesn't contain enough data inside to pop %ld bytes!", bytes);
BLT_WARN_STREAM << "Stack State: " << size() << "\n";
BLT_ABORT("Stack doesn't contain enough data to preform a pop!");
}
2024-06-06 02:25:42 -04:00
auto diff = head->used_bytes_in_block() - bytes;
// if there is not enough room left to pop completely off the block, then move to the next previous block
// and pop from it, update the amount of bytes to reflect the amount removed from the current block
2024-07-22 20:49:34 -04:00
if (diff < 0)
2024-06-06 02:25:42 -04:00
{
bytes -= head->used_bytes_in_block();
2024-07-22 20:49:34 -04:00
// reset this head's buffer.
head->metadata.offset = head->buffer;
2024-07-13 15:36:49 -04:00
move_back();
2024-06-30 03:20:56 -04:00
} else
{
2024-06-26 20:24:58 -04:00
// otherwise update the offset pointer
2024-06-06 02:25:42 -04:00
head->metadata.offset -= bytes;
2024-06-26 20:24:58 -04:00
break;
}
2024-06-06 02:25:42 -04:00
}
2024-08-05 02:40:16 -04:00
while (head != nullptr && head->used_bytes_in_block() == 0 && move_back());
2024-06-06 02:25:42 -04:00
}
2024-07-12 21:58:05 -04:00
/**
* Warning this function should be used to transfer types, not arrays of types! It will produce an error if you attempt to pass more
2024-08-11 13:33:24 -04:00
* than one type # of bytes at a time!
2024-07-12 21:58:05 -04:00
* @param to stack to push to
* @param bytes number of bytes to transfer out.
*/
2024-08-16 21:27:33 -04:00
void transfer_bytes(stack_allocator_old& to, blt::size_t bytes)
2024-07-12 21:58:05 -04:00
{
2024-07-16 21:56:21 -04:00
while (head->used_bytes_in_block() == 0 && move_back());
2024-07-12 21:58:05 -04:00
if (empty())
throw std::runtime_error("This stack is empty!");
2024-07-16 21:56:21 -04:00
2024-07-21 13:45:00 -04:00
auto type_size = aligned_size(bytes);
if (head->used_bytes_in_block() < static_cast<blt::ptrdiff_t>(type_size))
2024-07-23 20:47:24 -04:00
{
BLT_ERROR_STREAM << "Stack State:\n" << size() << "\n" << "Bytes in head: " << bytes_in_head() << "\n";
2024-07-16 00:25:23 -04:00
BLT_ABORT(("This stack doesn't contain enough data for this type! " + std::to_string(head->used_bytes_in_block()) + " / " +
std::to_string(bytes) + " This is an invalid runtime state!").c_str());
2024-07-23 20:47:24 -04:00
}
2024-07-13 15:36:49 -04:00
2024-07-21 13:45:00 -04:00
auto ptr = to.allocate_bytes(type_size);
2024-07-12 21:58:05 -04:00
to.head->metadata.offset = static_cast<blt::u8*>(ptr) + type_size;
std::memcpy(ptr, head->metadata.offset - type_size, type_size);
head->metadata.offset -= type_size;
2024-07-22 00:00:51 -04:00
while (head->used_bytes_in_block() == 0 && move_back());
2024-07-12 21:58:05 -04:00
}
2024-06-19 14:12:04 -04:00
template<typename... Args>
2024-08-12 13:48:06 -04:00
void call_destructors(detail::bitmask_t* mask)
2024-06-19 14:12:04 -04:00
{
2024-08-16 21:27:33 -04:00
if constexpr (sizeof...(Args) > 0)
{
blt::size_t offset = (stack_allocator_old::aligned_size<NO_REF_T<Args>>() + ...) -
stack_allocator_old::aligned_size<NO_REF_T<typename blt::meta::arg_helper<Args...>::First>>();
2024-08-12 13:48:06 -04:00
blt::size_t index = 0;
if (mask != nullptr)
index = mask->size() - sizeof...(Args);
2024-08-16 21:27:33 -04:00
((call_drop<Args>(offset, index, mask), offset -= stack_allocator_old::aligned_size<NO_REF_T<Args>>(), ++index), ...);
2024-08-12 13:48:06 -04:00
if (mask != nullptr)
{
auto& mask_r = *mask;
for (blt::size_t i = 0; i < sizeof...(Args); i++)
mask_r.pop_back();
}
}
2024-06-19 14:12:04 -04:00
}
[[nodiscard]] bool empty() const noexcept
2024-06-06 02:25:42 -04:00
{
if (head == nullptr)
return true;
if (head->metadata.prev != nullptr)
return false;
return head->used_bytes_in_block() == 0;
}
[[nodiscard]] blt::ptrdiff_t bytes_in_head() const noexcept
2024-06-06 02:25:42 -04:00
{
if (head == nullptr)
return 0;
return head->used_bytes_in_block();
}
2024-07-10 12:39:01 -04:00
/**
* Warning this function is slow!
* @return the size of the stack allocator in bytes
*/
[[nodiscard]] size_data_t size() const noexcept
2024-07-10 12:39:01 -04:00
{
size_data_t size_data;
2024-07-16 14:13:23 -04:00
auto* prev = head;
while (prev != nullptr)
2024-07-10 12:39:01 -04:00
{
2024-07-16 14:13:23 -04:00
size_data.total_size_bytes += prev->metadata.size;
size_data.total_no_meta_bytes += prev->storage_size();
size_data.total_remaining_bytes += prev->remaining_bytes_in_block();
size_data.total_used_bytes += prev->used_bytes_in_block();
2024-07-10 22:00:28 -04:00
size_data.blocks++;
2024-07-16 14:13:23 -04:00
prev = prev->metadata.prev;
}
if (head != nullptr)
{
auto next = head->metadata.next;
while (next != nullptr)
{
size_data.total_dealloc += next->metadata.size;
size_data.total_dealloc_no_meta += next->storage_size();
size_data.total_dealloc_remaining += next->remaining_bytes_in_block();
size_data.total_dealloc_used += next->used_bytes_in_block();
size_data.blocks++;
next = next->metadata.next;
}
2024-07-10 12:39:01 -04:00
}
return size_data;
}
2024-08-16 21:27:33 -04:00
stack_allocator_old() = default;
2024-06-06 02:25:42 -04:00
2024-07-13 15:36:49 -04:00
// TODO: cleanup this allocator!
2024-07-10 12:39:01 -04:00
// if you keep track of type size information you can memcpy between stack allocators as you already only allow trivially copyable types
2024-08-16 21:27:33 -04:00
stack_allocator_old(const stack_allocator_old& copy) noexcept
2024-06-25 12:21:09 -04:00
{
2024-06-26 20:24:58 -04:00
if (copy.empty())
return;
2024-06-25 12:21:09 -04:00
head = nullptr;
block* list_itr = nullptr;
// start at the beginning of the list
block* current = copy.head;
while (current != nullptr)
{
list_itr = current;
current = current->metadata.prev;
}
// copy all the blocks
while (list_itr != nullptr)
{
push_block(list_itr->metadata.size);
std::memcpy(head->buffer, list_itr->buffer, list_itr->storage_size());
2024-06-26 20:24:58 -04:00
head->metadata.size = list_itr->metadata.size;
head->metadata.offset = head->buffer + list_itr->used_bytes_in_block();
2024-06-25 12:21:09 -04:00
list_itr = list_itr->metadata.next;
}
}
2024-06-06 02:25:42 -04:00
2024-08-16 21:27:33 -04:00
stack_allocator_old& operator=(const stack_allocator_old& copy) = delete;
2024-06-06 02:25:42 -04:00
2024-08-16 21:27:33 -04:00
stack_allocator_old(stack_allocator_old&& move) noexcept
2024-06-06 02:25:42 -04:00
{
head = move.head;
move.head = nullptr;
}
2024-08-16 21:27:33 -04:00
stack_allocator_old& operator=(stack_allocator_old&& move) noexcept
2024-06-06 02:25:42 -04:00
{
2024-06-30 03:20:56 -04:00
move.head = std::exchange(head, move.head);
2024-06-06 02:25:42 -04:00
return *this;
}
2024-08-16 21:27:33 -04:00
~stack_allocator_old() noexcept
2024-06-06 02:25:42 -04:00
{
2024-07-16 21:56:21 -04:00
if (head != nullptr)
{
auto blk = head->metadata.next;
while (blk != nullptr)
{
auto ptr = blk;
blk = blk->metadata.next;
free_block(ptr);
2024-07-16 21:56:21 -04:00
}
}
2024-07-16 23:45:02 -04:00
free_chain(head);
2024-06-06 02:25:42 -04:00
}
template<typename T>
static inline constexpr blt::size_t aligned_size() noexcept
{
2024-07-16 00:25:23 -04:00
return aligned_size(sizeof(NO_REF_T<T>));
2024-07-12 21:58:05 -04:00
}
static inline constexpr blt::size_t aligned_size(blt::size_t size) noexcept
{
return (size + (MAX_ALIGNMENT - 1)) & ~(MAX_ALIGNMENT - 1);
2024-06-06 02:25:42 -04:00
}
inline static constexpr auto metadata_size() noexcept
{
return sizeof(typename block::block_metadata_t);
}
inline static constexpr auto block_size() noexcept
{
return sizeof(block);
}
inline static constexpr auto page_size() noexcept
{
return PAGE_SIZE;
}
inline static constexpr auto page_size_no_meta() noexcept
{
return page_size() - metadata_size();
}
inline static constexpr auto page_size_no_block() noexcept
{
return page_size() - block_size();
}
2024-06-06 02:25:42 -04:00
private:
struct block
{
struct block_metadata_t
{
blt::size_t size = 0;
block* next = nullptr;
block* prev = nullptr;
blt::u8* offset = nullptr;
} metadata;
blt::u8 buffer[8]{};
explicit block(blt::size_t size) noexcept
2024-06-06 02:25:42 -04:00
{
2024-07-10 12:39:01 -04:00
#if BLT_DEBUG_LEVEL > 0
if (size < PAGE_SIZE)
{
BLT_WARN("Hey this block is too small, who allocated it?");
std::abort();
}
#endif
2024-06-06 02:25:42 -04:00
metadata.size = size;
metadata.offset = buffer;
}
void reset() noexcept
2024-07-12 21:58:05 -04:00
{
metadata.offset = buffer;
}
[[nodiscard]] blt::ptrdiff_t storage_size() const noexcept
2024-06-06 02:25:42 -04:00
{
return static_cast<blt::ptrdiff_t>(metadata.size - sizeof(typename block::block_metadata_t));
}
[[nodiscard]] blt::ptrdiff_t used_bytes_in_block() const noexcept
2024-06-06 02:25:42 -04:00
{
return static_cast<blt::ptrdiff_t>(metadata.offset - buffer);
}
[[nodiscard]] blt::ptrdiff_t remaining_bytes_in_block() const noexcept
2024-06-06 02:25:42 -04:00
{
return storage_size() - used_bytes_in_block();
}
};
struct copy_start_point
{
block* start_block;
blt::ptrdiff_t bytes_left;
blt::u8* start_point;
};
2024-08-11 13:33:24 -04:00
template<typename T>
2024-08-12 13:48:06 -04:00
inline void call_drop(blt::size_t offset, blt::size_t index, detail::bitmask_t* mask)
2024-08-11 13:33:24 -04:00
{
if constexpr (detail::has_func_drop_v<T>)
{
2024-08-12 13:48:06 -04:00
if (mask != nullptr)
{
auto& mask_r = *mask;
if (!mask_r[index])
return;
}
2024-08-11 13:33:24 -04:00
from<NO_REF_T<T>>(offset).drop();
}
}
2024-06-06 02:25:42 -04:00
template<typename T>
void* allocate_bytes()
{
2024-07-16 21:56:21 -04:00
return allocate_bytes(sizeof(NO_REF_T<T>));
2024-07-12 21:58:05 -04:00
}
void* allocate_bytes(blt::size_t size)
{
auto ptr = get_aligned_pointer(size);
2024-06-06 02:25:42 -04:00
if (ptr == nullptr)
allocate_block_to_head_for_size(aligned_size(size));
2024-07-12 21:58:05 -04:00
ptr = get_aligned_pointer(size);
2024-06-06 02:25:42 -04:00
if (ptr == nullptr)
throw std::bad_alloc();
return ptr;
}
/**
* Moves forward through the list of "deallocated" blocks, if none meet size requirements it'll allocate a new block.
* This function will take into account the size of the block metadata, but requires the size input to be aligned.
* It will perform no modification to the size value.
*
* The block which allows for size is now at head.
*/
void allocate_block_to_head_for_size(const blt::size_t size) noexcept
{
while (head != nullptr && head->metadata.next != nullptr)
{
head = head->metadata.next;
if (head != nullptr)
head->reset();
if (head->remaining_bytes_in_block() >= static_cast<blt::ptrdiff_t>(size))
break;
}
if (head == nullptr || head->remaining_bytes_in_block() < static_cast<blt::ptrdiff_t>(size))
push_block(size + sizeof(typename block::block_metadata_t));
}
void* get_aligned_pointer(blt::size_t bytes) noexcept
2024-06-06 02:25:42 -04:00
{
if (head == nullptr)
return nullptr;
blt::size_t remaining_bytes = head->remaining_bytes_in_block();
auto* pointer = static_cast<void*>(head->metadata.offset);
return std::align(MAX_ALIGNMENT, bytes, pointer, remaining_bytes);
}
void push_block(blt::size_t size) noexcept
2024-06-06 02:25:42 -04:00
{
auto blk = allocate_block(size);
if (head == nullptr)
{
head = blk;
return;
}
head->metadata.next = blk;
blk->metadata.prev = head;
head = blk;
}
static size_t to_nearest_page_size(blt::size_t bytes) noexcept
2024-06-06 02:25:42 -04:00
{
constexpr static blt::size_t MASK = ~(PAGE_SIZE - 1);
return (bytes & MASK) + PAGE_SIZE;
}
static block* allocate_block(blt::size_t bytes) noexcept
2024-06-06 02:25:42 -04:00
{
2024-07-16 21:56:21 -04:00
auto size = to_nearest_page_size(bytes);
2024-06-06 02:25:42 -04:00
auto* data = std::aligned_alloc(PAGE_SIZE, size);
2024-07-14 20:38:08 -04:00
//auto* data = get_allocator().allocate(size);
2024-06-06 02:25:42 -04:00
new(data) block{size};
return reinterpret_cast<block*>(data);
}
2024-07-12 21:58:05 -04:00
2024-08-16 21:27:33 -04:00
static void free_chain(block* current) noexcept
2024-07-12 21:58:05 -04:00
{
while (current != nullptr)
{
block* ptr = current;
current = current->metadata.prev;
free_block(ptr);
2024-07-14 20:38:08 -04:00
//get_allocator().deallocate(ptr);
2024-07-12 21:58:05 -04:00
}
}
static void free_block(block* ptr) noexcept
{
std::free(ptr);
}
inline bool move_back() noexcept
2024-07-12 21:58:05 -04:00
{
auto old = head;
head = head->metadata.prev;
if (head == nullptr)
2024-07-16 21:56:21 -04:00
{
2024-07-13 00:14:48 -04:00
head = old;
2024-07-16 21:56:21 -04:00
return false;
}
return true;
2024-07-12 21:58:05 -04:00
}
2024-08-16 21:27:33 -04:00
[[nodiscard]] inline static copy_start_point get_start_from_bytes(const stack_allocator_old& stack, blt::size_t bytes)
{
auto start_block = stack.head;
auto bytes_left = static_cast<blt::ptrdiff_t>(bytes);
blt::u8* start_point = nullptr;
while (bytes_left > 0)
{
if (start_block == nullptr)
{
BLT_WARN("This stack doesn't contain enough space to copy %ld bytes!", bytes);
BLT_WARN_STREAM << "State: " << stack.size() << "\n";
BLT_ABORT("Stack doesn't contain enough data for this copy operation!");
}
if (start_block->used_bytes_in_block() < bytes_left)
{
bytes_left -= start_block->used_bytes_in_block();
start_block = start_block->metadata.prev;
} else if (start_block->used_bytes_in_block() == bytes_left)
{
start_point = start_block->buffer;
break;
} else
{
start_point = start_block->metadata.offset - bytes_left;
break;
}
}
return copy_start_point{start_block, bytes_left, start_point};
}
2024-06-06 02:25:42 -04:00
private:
block* head = nullptr;
};
}
#endif //BLT_GP_STACK_H