docs and change
parent
0251bf33f8
commit
ea5759cf1a
|
@ -27,7 +27,7 @@ macro(compile_options target_name)
|
|||
sanitizers(${target_name})
|
||||
endmacro()
|
||||
|
||||
project(blt-gp VERSION 0.5.19)
|
||||
project(blt-gp VERSION 0.5.20)
|
||||
|
||||
include(CTest)
|
||||
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
bool type_aware_crossover_t::apply(gp_program& program, const tree_t& p1, const tree_t& p2, tree_t& c1, tree_t& c2)
|
||||
{
|
||||
if (p1.size() < config.min_tree_size || p2.size() < config.min_tree_size)
|
||||
return false;
|
||||
|
||||
tree_t::subtree_point_t point1, point2;
|
||||
if (config.traverse)
|
||||
{
|
||||
point1 = p1.select_subtree_traverse(config.terminal_chance, config.depth_multiplier);
|
||||
if (const auto val = p2.select_subtree_traverse(point1.type, config.max_crossover_tries, config.terminal_chance, config.depth_multiplier))
|
||||
point2 = *val;
|
||||
else
|
||||
return false;
|
||||
} else
|
||||
{
|
||||
point1 = p1.select_subtree(config.terminal_chance);
|
||||
if (const auto val = p2.select_subtree(point1.type, config.max_crossover_tries, config.terminal_chance))
|
||||
point2 = *val;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto& p1_operator = p1.get_operator(point1.pos);
|
||||
const auto& p2_operator = p2.get_operator(point2.pos);
|
||||
|
||||
// If either is a terminal (value), just do normal subtree crossover
|
||||
if (p1_operator.is_value() || p2_operator.is_value())
|
||||
{
|
||||
c1.swap_subtrees(point1, c2, point2);
|
||||
return true;
|
||||
}
|
||||
|
||||
const auto& p1_info = program.get_operator_info(p1_operator.id());
|
||||
const auto& p2_info = program.get_operator_info(p2_operator.id());
|
||||
|
||||
// Find the child subtrees of both operators
|
||||
thread_local tracked_vector<tree_t::child_t> children_data_p1;
|
||||
thread_local tracked_vector<tree_t::child_t> children_data_p2;
|
||||
children_data_p1.clear();
|
||||
children_data_p2.clear();
|
||||
|
||||
p1.find_child_extends(children_data_p1, point1.pos, p1_info.argument_types.size());
|
||||
p2.find_child_extends(children_data_p2, point2.pos, p2_info.argument_types.size());
|
||||
|
||||
// Check if all types are identical but possibly in different order
|
||||
bool same_types_different_order = p1_info.argument_types.size() == p2_info.argument_types.size();
|
||||
|
||||
if (same_types_different_order)
|
||||
{
|
||||
// Create frequency counts of types in both operators
|
||||
std::unordered_map<type_id, size_t> type_counts_p1;
|
||||
std::unordered_map<type_id, size_t> type_counts_p2;
|
||||
|
||||
for (const auto& type : p1_info.argument_types)
|
||||
type_counts_p1[type.id]++;
|
||||
|
||||
for (const auto& type : p2_info.argument_types)
|
||||
type_counts_p2[type.id]++;
|
||||
|
||||
// Check if the type counts match
|
||||
for (const auto& [type, count] : type_counts_p1)
|
||||
{
|
||||
if (type_counts_p2[type] != count)
|
||||
{
|
||||
same_types_different_order = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (same_types_different_order)
|
||||
{
|
||||
// Create a mapping from p1's argument positions to p2's positions
|
||||
std::vector<size_t> arg_mapping(p1_info.argument_types.size(), (size_t)-1);
|
||||
std::vector<bool> p2_used(p2_info.argument_types.size(), false);
|
||||
|
||||
// First pass: match exact types in order
|
||||
for (size_t i = 0; i < p1_info.argument_types.size(); i++)
|
||||
{
|
||||
for (size_t j = 0; j < p2_info.argument_types.size(); j++)
|
||||
{
|
||||
if (!p2_used[j] && p1_info.argument_types[i].id == p2_info.argument_types[j].id)
|
||||
{
|
||||
arg_mapping[i] = j;
|
||||
p2_used[j] = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy operators first
|
||||
auto& c1_temp = tree_t::get_thread_local(program);
|
||||
auto& c2_temp = tree_t::get_thread_local(program);
|
||||
c1_temp.clear(program);
|
||||
c2_temp.clear(program);
|
||||
|
||||
// Create new operators with the same return types
|
||||
c1_temp.insert_operator({
|
||||
program.get_typesystem().get_type(p2_info.return_type).size(),
|
||||
p2_operator.id(),
|
||||
program.is_operator_ephemeral(p2_operator.id()),
|
||||
program.get_operator_flags(p2_operator.id())
|
||||
});
|
||||
|
||||
c2_temp.insert_operator({
|
||||
program.get_typesystem().get_type(p1_info.return_type).size(),
|
||||
p1_operator.id(),
|
||||
program.is_operator_ephemeral(p1_operator.id()),
|
||||
program.get_operator_flags(p1_operator.id())
|
||||
});
|
||||
|
||||
// Copy child subtrees according to the mapping
|
||||
for (size_t i = 0; i < p1_info.argument_types.size(); i++)
|
||||
{
|
||||
auto& p1_child = children_data_p1[i];
|
||||
auto& p2_child = children_data_p2[arg_mapping[i]];
|
||||
|
||||
tree_t p1_subtree(program);
|
||||
tree_t p2_subtree(program);
|
||||
|
||||
p1.copy_subtree(tree_t::subtree_point_t(p1_child.start), p1_child.end, p1_subtree);
|
||||
p2.copy_subtree(tree_t::subtree_point_t(p2_child.start), p2_child.end, p2_subtree);
|
||||
|
||||
c1_temp.insert_subtree(tree_t::subtree_point_t(c1_temp.size()), p2_subtree);
|
||||
c2_temp.insert_subtree(tree_t::subtree_point_t(c2_temp.size()), p1_subtree);
|
||||
}
|
||||
|
||||
// Replace the original subtrees with our new reordered ones
|
||||
c1.replace_subtree(point1, c1_temp);
|
||||
c2.replace_subtree(point2, c2_temp);
|
||||
}
|
||||
else
|
||||
{
|
||||
// If types don't match exactly, fall back to simple operator swap
|
||||
// but we need to ensure the children are compatible
|
||||
|
||||
// Create new operators with swapped operators but appropriate children
|
||||
auto& c1_temp = tree_t::get_thread_local(program);
|
||||
auto& c2_temp = tree_t::get_thread_local(program);
|
||||
c1_temp.clear(program);
|
||||
c2_temp.clear(program);
|
||||
|
||||
c1_temp.insert_operator({
|
||||
program.get_typesystem().get_type(p2_info.return_type).size(),
|
||||
p2_operator.id(),
|
||||
program.is_operator_ephemeral(p2_operator.id()),
|
||||
program.get_operator_flags(p2_operator.id())
|
||||
});
|
||||
|
||||
c2_temp.insert_operator({
|
||||
program.get_typesystem().get_type(p1_info.return_type).size(),
|
||||
p1_operator.id(),
|
||||
program.is_operator_ephemeral(p1_operator.id()),
|
||||
program.get_operator_flags(p1_operator.id())
|
||||
});
|
||||
|
||||
// Create a mapping of which children we can reuse and which need to be regenerated
|
||||
for (size_t i = 0; i < p2_info.argument_types.size(); i++)
|
||||
{
|
||||
const auto& needed_type = p2_info.argument_types[i];
|
||||
bool found_match = false;
|
||||
|
||||
// Try to find a matching child from p1
|
||||
for (size_t j = 0; j < p1_info.argument_types.size(); j++)
|
||||
{
|
||||
if (needed_type.id == p1_info.argument_types[j].id)
|
||||
{
|
||||
// Copy this child subtree from p1
|
||||
auto& p1_child = children_data_p1[j];
|
||||
tree_t p1_subtree(program);
|
||||
p1.copy_subtree(tree_t::subtree_point_t(p1_child.start), p1_child.end, p1_subtree);
|
||||
c1_temp.insert_subtree(tree_t::subtree_point_t(c1_temp.size()), p1_subtree);
|
||||
found_match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found_match)
|
||||
{
|
||||
// If no matching child, we need to generate a new subtree of the correct type
|
||||
auto& tree = tree_t::get_thread_local(program);
|
||||
tree.clear(program);
|
||||
config.generator.get().generate(tree, {program, needed_type.id, config.replacement_min_depth, config.replacement_max_depth});
|
||||
c1_temp.insert_subtree(tree_t::subtree_point_t(c1_temp.size()), tree);
|
||||
}
|
||||
}
|
||||
|
||||
// Do the same for the other direction (c2)
|
||||
for (size_t i = 0; i < p1_info.argument_types.size(); i++)
|
||||
{
|
||||
const auto& needed_type = p1_info.argument_types[i];
|
||||
bool found_match = false;
|
||||
|
||||
// Try to find a matching child from p2
|
||||
for (size_t j = 0; j < p2_info.argument_types.size(); j++)
|
||||
{
|
||||
if (needed_type.id == p2_info.argument_types[j].id)
|
||||
{
|
||||
// Copy this child subtree from p2
|
||||
auto& p2_child = children_data_p2[j];
|
||||
tree_t p2_subtree(program);
|
||||
p2.copy_subtree(tree_t::subtree_point_t(p2_child.start), p2_child.end, p2_subtree);
|
||||
c2_temp.insert_subtree(tree_t::subtree_point_t(c2_temp.size()), p2_subtree);
|
||||
found_match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found_match)
|
||||
{
|
||||
// If no matching child, we need to generate a new subtree of the correct type
|
||||
auto& tree = tree_t::get_thread_local(program);
|
||||
tree.clear(program);
|
||||
config.generator.get().generate(tree, {program, needed_type.id, config.replacement_min_depth, config.replacement_max_depth});
|
||||
c2_temp.insert_subtree(tree_t::subtree_point_t(c2_temp.size()), tree);
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the original subtrees with our new ones
|
||||
c1.replace_subtree(point1, c1_temp);
|
||||
c2.replace_subtree(point2, c2_temp);
|
||||
}
|
||||
|
||||
#if BLT_DEBUG_LEVEL >= 2
|
||||
if (!c1.check(detail::debug::context_ptr) || !c2.check(detail::debug::context_ptr))
|
||||
throw std::runtime_error("Tree check failed");
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
|
@ -29,7 +29,10 @@ namespace blt::gp
|
|||
{
|
||||
namespace detail
|
||||
{
|
||||
static constexpr inline size_t MAX_ALIGNMENT = 8;
|
||||
#ifndef BLT_GP_MAX_ALIGNMENT
|
||||
#define BLT_GP_MAX_ALIGNMENT 8
|
||||
#endif
|
||||
static constexpr inline size_t MAX_ALIGNMENT = BLT_GP_MAX_ALIGNMENT;
|
||||
|
||||
#if BLT_DEBUG_LEVEL > 0
|
||||
static void check_alignment(const size_t bytes, const std::string& message = "Invalid alignment")
|
||||
|
|
|
@ -37,53 +37,40 @@ namespace blt::gp
|
|||
namespace detail
|
||||
{
|
||||
BLT_META_MAKE_FUNCTION_CHECK(drop);
|
||||
// BLT_META_MAKE_FUNCTION_CHECK(drop_ephemeral);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This is the primary class that enables a type-erased GP system without compromising on performance.
|
||||
*
|
||||
* This class provides an efficient way to allocate, deallocate, and manage memory blocks
|
||||
* in a stack-like structure. It supports operations like memory alignment, copying, moving,
|
||||
* insertion, and removal of memory. This is particularly useful for performance-critical
|
||||
* systems requiring temporary memory management without frequent heap allocation overhead.
|
||||
*
|
||||
* Types placed within this container cannot have an alignment greater than `BLT_GP_MAX_ALIGNMENT` bytes, doing so will result in unaligned pointer access.
|
||||
* You can configure this by setting `BLT_GP_MAX_ALIGNMENT` as a compiler definition but be aware it will increase memory requirements.
|
||||
* Setting `BLT_GP_MAX_ALIGNMENT` to lower than 8 is UB on x86-64 systems.
|
||||
* Consequently, all types have a minimum storage size of `BLT_GP_MAX_ALIGNMENT` (8) bytes, meaning a char, float, int, etc. will take `BLT_GP_MAX_ALIGNMENT` bytes
|
||||
*/
|
||||
class stack_allocator
|
||||
{
|
||||
constexpr static size_t PAGE_SIZE = 0x100;
|
||||
template <typename T>
|
||||
using NO_REF_T = std::remove_cv_t<std::remove_reference_t<T>>;
|
||||
using Allocator = aligned_allocator;
|
||||
|
||||
// todo remove this once i fix all the broken references
|
||||
struct detail
|
||||
static constexpr size_t align_bytes(const size_t size) noexcept
|
||||
{
|
||||
static constexpr size_t aligned_size(const size_t size) noexcept
|
||||
{
|
||||
return (size + (gp::detail::MAX_ALIGNMENT - 1)) & ~(gp::detail::MAX_ALIGNMENT - 1);
|
||||
return (size + (detail::MAX_ALIGNMENT - 1)) & ~(detail::MAX_ALIGNMENT - 1);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
static Allocator& get_allocator();
|
||||
|
||||
struct size_data_t
|
||||
{
|
||||
blt::size_t total_size_bytes = 0;
|
||||
blt::size_t total_used_bytes = 0;
|
||||
blt::size_t total_remaining_bytes = 0;
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& stream, const size_data_t& data)
|
||||
{
|
||||
stream << "[";
|
||||
stream << data.total_used_bytes << " / " << data.total_size_bytes;
|
||||
stream << " ("
|
||||
<< (data.total_size_bytes != 0
|
||||
? (static_cast<double>(data.total_used_bytes) / static_cast<double>(data.total_size_bytes) *
|
||||
100)
|
||||
: 0) << "%); space left: " << data.total_remaining_bytes << "]";
|
||||
return stream;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static constexpr size_t aligned_size() noexcept
|
||||
{
|
||||
const auto bytes = detail::aligned_size(sizeof(NO_REF_T<T>));
|
||||
if constexpr (blt::gp::detail::has_func_drop_v<gp::detail::remove_cv_ref<T>>)
|
||||
return bytes + detail::aligned_size(sizeof(std::atomic_uint64_t*));
|
||||
const auto bytes = align_bytes(sizeof(std::decay_t<T>));
|
||||
if constexpr (blt::gp::detail::has_func_drop_v<detail::remove_cv_ref<T>>)
|
||||
return bytes + align_bytes(sizeof(std::atomic_uint64_t*));
|
||||
return bytes;
|
||||
}
|
||||
|
||||
|
@ -166,26 +153,28 @@ namespace blt::gp
|
|||
std::memcpy(data, data_ + (bytes_stored - bytes), bytes);
|
||||
}
|
||||
|
||||
template <typename T, typename NO_REF = NO_REF_T<T>>
|
||||
template <typename T>
|
||||
void push(const T& t)
|
||||
{
|
||||
static_assert(std::is_trivially_copyable_v<NO_REF>, "Type must be bitwise copyable!");
|
||||
static_assert(alignof(NO_REF) <= gp::detail::MAX_ALIGNMENT, "Type alignment must not be greater than the max alignment!");
|
||||
const auto ptr = static_cast<char*>(allocate_bytes_for_size(aligned_size<NO_REF>()));
|
||||
std::memcpy(ptr, &t, sizeof(NO_REF));
|
||||
using DecayedT = std::decay_t<T>;
|
||||
static_assert(std::is_trivially_copyable_v<DecayedT>, "Type must be bitwise copyable!");
|
||||
static_assert(alignof(DecayedT) <= detail::MAX_ALIGNMENT, "Type alignment must not be greater than the max alignment!");
|
||||
const auto ptr = static_cast<char*>(allocate_bytes_for_size(aligned_size<DecayedT>()));
|
||||
std::memcpy(ptr, &t, sizeof(DecayedT));
|
||||
|
||||
if constexpr (gp::detail::has_func_drop_v<gp::detail::remove_cv_ref<T>>)
|
||||
if constexpr (gp::detail::has_func_drop_v<detail::remove_cv_ref<T>>)
|
||||
{
|
||||
new(ptr + sizeof(NO_REF)) mem::pointer_storage<std::atomic_uint64_t>{nullptr};
|
||||
new(ptr + sizeof(DecayedT)) mem::pointer_storage<std::atomic_uint64_t>{nullptr};
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename NO_REF = NO_REF_T<T>>
|
||||
template <typename T>
|
||||
T pop()
|
||||
{
|
||||
static_assert(std::is_trivially_copyable_v<NO_REF>, "Type must be bitwise copyable!");
|
||||
static_assert(alignof(NO_REF) <= gp::detail::MAX_ALIGNMENT, "Type alignment must not be greater than the max alignment!");
|
||||
constexpr auto size = aligned_size<NO_REF>();
|
||||
using DecayedT = std::decay_t<T>;
|
||||
static_assert(std::is_trivially_copyable_v<DecayedT>, "Type must be bitwise copyable!");
|
||||
static_assert(alignof(DecayedT) <= detail::MAX_ALIGNMENT, "Type alignment must not be greater than the max alignment!");
|
||||
constexpr auto size = aligned_size<DecayedT>();
|
||||
#if BLT_DEBUG_LEVEL > 0
|
||||
if (bytes_stored < size)
|
||||
throw std::runtime_error(("Not enough bytes left to pop!" __FILE__ ":") + std::to_string(__LINE__));
|
||||
|
@ -205,28 +194,34 @@ namespace blt::gp
|
|||
return data_ + (bytes_stored - bytes);
|
||||
}
|
||||
|
||||
template <typename T, typename NO_REF = NO_REF_T<T>>
|
||||
template <typename T>
|
||||
T& from(const size_t bytes) const
|
||||
{
|
||||
static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!");
|
||||
static_assert(alignof(NO_REF) <= gp::detail::MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!");
|
||||
return *reinterpret_cast<NO_REF*>(from(aligned_size<NO_REF>() + bytes));
|
||||
using DecayedT = std::decay_t<T>;
|
||||
static_assert(std::is_trivially_copyable_v<DecayedT> && "Type must be bitwise copyable!");
|
||||
static_assert(alignof(DecayedT) <= detail::MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!");
|
||||
return *reinterpret_cast<DecayedT*>(from(aligned_size<DecayedT>() + bytes));
|
||||
}
|
||||
|
||||
[[nodiscard]] std::pair<u8*, mem::pointer_storage<std::atomic_uint64_t>&> access_pointer(const size_t bytes, const size_t type_size) const
|
||||
{
|
||||
const auto type_ref = from(bytes);
|
||||
return {type_ref, *std::launder(
|
||||
reinterpret_cast<mem::pointer_storage<std::atomic_uint64_t>*>(type_ref + (type_size - detail::aligned_size(
|
||||
sizeof(std::atomic_uint64_t*)))))};
|
||||
return {
|
||||
type_ref, *std::launder(
|
||||
reinterpret_cast<mem::pointer_storage<std::atomic_uint64_t>*>(type_ref + (type_size - align_bytes(
|
||||
sizeof(std::atomic_uint64_t*)))))
|
||||
};
|
||||
}
|
||||
|
||||
[[nodiscard]] std::pair<u8*, mem::pointer_storage<std::atomic_uint64_t>&> access_pointer_forward(const size_t bytes, const size_t type_size) const
|
||||
[[nodiscard]] std::pair<u8*, mem::pointer_storage<std::atomic_uint64_t>&> access_pointer_forward(
|
||||
const size_t bytes, const size_t type_size) const
|
||||
{
|
||||
const auto type_ref = data_ + bytes;
|
||||
return {type_ref, *std::launder(
|
||||
reinterpret_cast<mem::pointer_storage<std::atomic_uint64_t>*>(type_ref + (type_size - detail::aligned_size(
|
||||
sizeof(std::atomic_uint64_t*)))))};
|
||||
return {
|
||||
type_ref, *std::launder(
|
||||
reinterpret_cast<mem::pointer_storage<std::atomic_uint64_t>*>(type_ref + (type_size - align_bytes(
|
||||
sizeof(std::atomic_uint64_t*)))))
|
||||
};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -236,7 +231,7 @@ namespace blt::gp
|
|||
return {
|
||||
type_ref, *std::launder(
|
||||
reinterpret_cast<mem::pointer_storage<std::atomic_uint64_t>*>(reinterpret_cast<char*>(&type_ref) +
|
||||
detail::aligned_size(sizeof(T))))
|
||||
align_bytes(sizeof(T))))
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -264,41 +259,19 @@ namespace blt::gp
|
|||
pop_bytes(aligned_bytes);
|
||||
}
|
||||
|
||||
// template <typename... Args>
|
||||
// void call_destructors()
|
||||
// {
|
||||
// if constexpr (sizeof...(Args) > 0)
|
||||
// {
|
||||
// size_t offset = (aligned_size<NO_REF_T<Args>>() + ...) - aligned_size<NO_REF_T<typename meta::arg_helper<Args...>::First>>();
|
||||
// ((call_drop<Args>(offset + (gp::detail::has_func_drop_v<Args> ? sizeof(u64*) : 0)), offset -= aligned_size<NO_REF_T<Args>>()), ...);
|
||||
// (void) offset;
|
||||
// }
|
||||
// }
|
||||
|
||||
[[nodiscard]] bool empty() const noexcept
|
||||
{
|
||||
return bytes_stored == 0;
|
||||
}
|
||||
|
||||
[[nodiscard]] ptrdiff_t remaining_bytes_in_block() const noexcept
|
||||
[[nodiscard]] ptrdiff_t remainder() const noexcept
|
||||
{
|
||||
return static_cast<ptrdiff_t>(size_ - bytes_stored);
|
||||
}
|
||||
|
||||
[[nodiscard]] ptrdiff_t bytes_in_head() const noexcept
|
||||
[[nodiscard]] size_t stored() const noexcept
|
||||
{
|
||||
return static_cast<ptrdiff_t>(bytes_stored);
|
||||
}
|
||||
|
||||
[[nodiscard]] size_data_t size() const noexcept
|
||||
{
|
||||
size_data_t data;
|
||||
|
||||
data.total_used_bytes = bytes_stored;
|
||||
data.total_size_bytes = size_;
|
||||
data.total_remaining_bytes = remaining_bytes_in_block();
|
||||
|
||||
return data;
|
||||
return bytes_stored;
|
||||
}
|
||||
|
||||
void reserve(const size_t bytes)
|
||||
|
@ -313,12 +286,7 @@ namespace blt::gp
|
|||
bytes_stored = bytes;
|
||||
}
|
||||
|
||||
[[nodiscard]] size_t stored() const
|
||||
{
|
||||
return bytes_stored;
|
||||
}
|
||||
|
||||
[[nodiscard]] size_t internal_storage_size() const
|
||||
[[nodiscard]] size_t capacity() const
|
||||
{
|
||||
return size_;
|
||||
}
|
||||
|
@ -333,8 +301,6 @@ namespace blt::gp
|
|||
return data_;
|
||||
}
|
||||
|
||||
|
||||
|
||||
private:
|
||||
void expand(const size_t bytes)
|
||||
{
|
||||
|
@ -363,7 +329,7 @@ namespace blt::gp
|
|||
{
|
||||
if (data_ == nullptr)
|
||||
return nullptr;
|
||||
size_t remaining_bytes = remaining_bytes_in_block();
|
||||
size_t remaining_bytes = remainder();
|
||||
auto* pointer = static_cast<void*>(data_ + bytes_stored);
|
||||
return std::align(gp::detail::MAX_ALIGNMENT, bytes, pointer, remaining_bytes);
|
||||
}
|
||||
|
@ -385,15 +351,6 @@ namespace blt::gp
|
|||
return aligned_ptr;
|
||||
}
|
||||
|
||||
// template <typename T>
|
||||
// void call_drop(const size_t offset)
|
||||
// {
|
||||
// if constexpr (blt::gp::detail::has_func_drop_v<T>)
|
||||
// {
|
||||
// from<NO_REF_T<T>>(offset).drop();
|
||||
// }
|
||||
// }
|
||||
|
||||
u8* data_ = nullptr;
|
||||
// place in the data_ array which has a free spot.
|
||||
size_t bytes_stored = 0;
|
||||
|
|
|
@ -149,12 +149,27 @@ namespace blt::gp
|
|||
const auto& p1_info = program.get_operator_info(p1_operator.id());
|
||||
const auto& p2_info = program.get_operator_info(p2_operator.id());
|
||||
|
||||
thread_local tracked_vector<tree_t::child_t> children_data_p1;
|
||||
thread_local tracked_vector<tree_t::child_t> children_data_p2;
|
||||
thread_local struct type_resolver_t
|
||||
{
|
||||
tracked_vector<tree_t::child_t> children_data_p1;
|
||||
tracked_vector<tree_t::child_t> children_data_p2;
|
||||
hashmap_t<type_id, std::vector<size_t>> missing_p1_types;
|
||||
hashmap_t<type_id, std::vector<size_t>> missing_p2_types;
|
||||
|
||||
void clear()
|
||||
{
|
||||
children_data_p1.clear();
|
||||
children_data_p2.clear();
|
||||
p1.find_child_extends(children_data_p1, point1.pos, p1_info.argument_types.size());
|
||||
p2.find_child_extends(children_data_p2, point2.pos, p2_info.argument_types.size());
|
||||
for (auto& [id, v] : missing_p1_types)
|
||||
v.clear();
|
||||
for (auto& [id, v] : missing_p2_types)
|
||||
v.clear();
|
||||
}
|
||||
} resolver;
|
||||
resolver.clear();
|
||||
|
||||
p1.find_child_extends(resolver.children_data_p1, point1.pos, p1_info.argument_types.size());
|
||||
p2.find_child_extends(resolver.children_data_p2, point2.pos, p2_info.argument_types.size());
|
||||
|
||||
for (size_t i = 0; i < std::min(p1_info.argument_types.size(), p2_info.argument_types.size()); i++)
|
||||
{
|
||||
|
|
20
src/tree.cpp
20
src/tree.cpp
|
@ -349,8 +349,8 @@ namespace blt::gp
|
|||
const auto copy_ptr_c1 = get_thread_pointer_for_size<struct c1_t>(c1_total);
|
||||
const auto copy_ptr_c2 = get_thread_pointer_for_size<struct c2_t>(c2_total);
|
||||
|
||||
values.reserve(values.bytes_in_head() - c1_subtree_bytes + c2_subtree_bytes);
|
||||
other_tree.values.reserve(other_tree.values.bytes_in_head() - c2_subtree_bytes + c1_subtree_bytes);
|
||||
values.reserve(values.stored() - c1_subtree_bytes + c2_subtree_bytes);
|
||||
other_tree.values.reserve(other_tree.values.stored() - c2_subtree_bytes + c1_subtree_bytes);
|
||||
|
||||
values.copy_to(copy_ptr_c1, c1_total);
|
||||
values.pop_bytes(c1_total);
|
||||
|
@ -535,7 +535,7 @@ namespace blt::gp
|
|||
bool tree_t::check(void* context) const
|
||||
{
|
||||
size_t bytes_expected = 0;
|
||||
const auto bytes_size = values.size().total_used_bytes;
|
||||
const auto bytes_size = values.stored();
|
||||
|
||||
for (const auto& op : operations)
|
||||
{
|
||||
|
@ -545,7 +545,7 @@ namespace blt::gp
|
|||
|
||||
if (bytes_expected != bytes_size)
|
||||
{
|
||||
BLT_ERROR("Stack state: {}", values.size());
|
||||
BLT_ERROR("Stack state: Stored: {}; Capacity: {}; Remainder: {}", values.stored(), values.capacity(), values.remainder());
|
||||
BLT_ERROR("Child tree bytes {} vs expected {}, difference: {}", bytes_size, bytes_expected,
|
||||
static_cast<ptrdiff_t>(bytes_expected) - static_cast<ptrdiff_t>(bytes_size));
|
||||
BLT_ERROR("Amount of bytes in stack doesn't match the number of bytes expected for the operations");
|
||||
|
@ -580,7 +580,7 @@ namespace blt::gp
|
|||
total_produced += m_program->get_typesystem().get_type(info.return_type).size();
|
||||
}
|
||||
|
||||
const auto v1 = results.values.bytes_in_head();
|
||||
const auto v1 = static_cast<ptrdiff_t>(results.values.stored());
|
||||
const auto v2 = static_cast<ptrdiff_t>(operations.front().type_size());
|
||||
|
||||
// ephemeral don't need to be dropped as there are no copies which matter when checking the tree
|
||||
|
@ -671,7 +671,7 @@ namespace blt::gp
|
|||
size_t tree_t::required_size() const
|
||||
{
|
||||
// 2 size_t used to store expected_length of operations + size of the values stack
|
||||
return 2 * sizeof(size_t) + operations.size() * sizeof(size_t) + values.bytes_in_head();
|
||||
return 2 * sizeof(size_t) + operations.size() * sizeof(size_t) + values.stored();
|
||||
}
|
||||
|
||||
void tree_t::to_byte_array(std::byte* out) const
|
||||
|
@ -686,7 +686,7 @@ namespace blt::gp
|
|||
std::memcpy(out, &id, size_of_op);
|
||||
out += size_of_op;
|
||||
}
|
||||
const auto val_size = values.bytes_in_head();
|
||||
const auto val_size = values.stored();
|
||||
std::memcpy(out, &val_size, sizeof(size_t));
|
||||
out += sizeof(size_t);
|
||||
std::memcpy(out, values.data(), val_size);
|
||||
|
@ -701,9 +701,9 @@ namespace blt::gp
|
|||
auto id = op.id();
|
||||
file.write(&id, sizeof(operator_id));
|
||||
}
|
||||
const auto val_size = values.bytes_in_head();
|
||||
const auto val_size = values.stored();
|
||||
BLT_ASSERT(file.write(&val_size, sizeof(size_t)) == sizeof(size_t));
|
||||
BLT_ASSERT(file.write(values.data(), val_size) == val_size);
|
||||
BLT_ASSERT(file.write(values.data(), val_size) == static_cast<i64>(val_size));
|
||||
}
|
||||
|
||||
void tree_t::from_byte_array(const std::byte* in)
|
||||
|
@ -792,7 +792,7 @@ namespace blt::gp
|
|||
{
|
||||
if (a.operations.size() != b.operations.size())
|
||||
return false;
|
||||
if (a.values.bytes_in_head() != b.values.bytes_in_head())
|
||||
if (a.values.stored() != b.values.stored())
|
||||
return false;
|
||||
return std::equal(a.operations.begin(), a.operations.end(), b.operations.begin());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue