silly tables
parent
17a2805ef1
commit
24f702b35e
|
@ -1,5 +1,5 @@
|
|||
cmake_minimum_required(VERSION 3.25)
|
||||
project(blt-gp VERSION 0.1.15)
|
||||
project(blt-gp VERSION 0.1.16)
|
||||
|
||||
include(CTest)
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ blt::gp::prog_config_t config = blt::gp::prog_config_t()
|
|||
.set_mutation_chance(0.1)
|
||||
.set_reproduction_chance(0)
|
||||
.set_max_generations(50)
|
||||
.set_pop_size(500)
|
||||
.set_pop_size(5000)
|
||||
.set_thread_count(0);
|
||||
|
||||
blt::gp::type_provider type_system;
|
||||
|
|
|
@ -120,21 +120,22 @@ namespace blt::gp
|
|||
const auto& vals = tree.get_values();
|
||||
|
||||
evaluation_context results{};
|
||||
results.values.reserve(largest);
|
||||
|
||||
auto value_stack = vals;
|
||||
auto& values_process = results.values;
|
||||
static thread_local detail::bitmask_t bitfield;
|
||||
bitfield.clear();
|
||||
blt::size_t total_so_far = 0;
|
||||
|
||||
for (const auto& operation : blt::reverse_iterate(ops.begin(), ops.end()))
|
||||
{
|
||||
if (operation.is_value)
|
||||
{
|
||||
value_stack.transfer_bytes(values_process, operation.type_size);
|
||||
total_so_far += stack_allocator::aligned_size(operation.type_size);
|
||||
results.values.copy_from(vals.from(total_so_far), stack_allocator::aligned_size(operation.type_size));
|
||||
bitfield.push_back(false);
|
||||
continue;
|
||||
}
|
||||
call_jmp_table(operation.id, context, values_process, values_process, &bitfield, operators...);
|
||||
call_jmp_table(operation.id, context, results.values, results.values, &bitfield, operators...);
|
||||
bitfield.push_back(true);
|
||||
}
|
||||
|
||||
|
@ -276,19 +277,26 @@ namespace blt::gp
|
|||
}
|
||||
}
|
||||
|
||||
template<bool HasContext, size_t id, typename Lambda>
|
||||
static inline bool execute(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack, detail::bitmask_t* mask,
|
||||
Lambda lambda)
|
||||
template<typename Lambda>
|
||||
static inline void execute(void* context, stack_allocator& write_stack, stack_allocator& read_stack, detail::bitmask_t* mask,
|
||||
Lambda& lambda)
|
||||
{
|
||||
if (op == id)
|
||||
{
|
||||
if constexpr (HasContext)
|
||||
if constexpr (std::is_same_v<detail::remove_cv_ref<typename Lambda::First_Arg>, Context>)
|
||||
{
|
||||
write_stack.push(lambda(context, read_stack, mask));
|
||||
} else
|
||||
{
|
||||
write_stack.push(lambda(read_stack, mask));
|
||||
}
|
||||
}
|
||||
|
||||
template<blt::size_t id, typename Lambda>
|
||||
static inline bool call(blt::size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack, detail::bitmask_t* mask,
|
||||
Lambda& lambda)
|
||||
{
|
||||
if (id == op)
|
||||
{
|
||||
execute(context, write_stack, read_stack, mask, lambda);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -296,17 +304,20 @@ namespace blt::gp
|
|||
|
||||
template<typename... Lambdas, size_t... operator_ids>
|
||||
static inline void call_jmp_table_internal(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack,
|
||||
detail::bitmask_t* mask, std::integer_sequence<size_t, operator_ids...>, Lambdas... lambdas)
|
||||
detail::bitmask_t* mask, std::integer_sequence<size_t, operator_ids...>, Lambdas& ... lambdas)
|
||||
{
|
||||
if (op >= sizeof...(operator_ids))
|
||||
{
|
||||
if (op > sizeof...(operator_ids))
|
||||
BLT_UNREACHABLE;
|
||||
(execute<detail::is_same_v<typename Lambdas::First_Arg, Context>, operator_ids>(
|
||||
op, context, write_stack, read_stack, mask, lambdas) && ...);
|
||||
}
|
||||
(call<operator_ids>(op, context, write_stack, read_stack, mask, lambdas) && ...);
|
||||
// std::initializer_list<int>{((op == operator_ids) ? (execute(context, write_stack, read_stack, mask, lambdas), 0) : 0)...};
|
||||
|
||||
}
|
||||
|
||||
template<typename... Lambdas>
|
||||
static inline void call_jmp_table(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack,
|
||||
detail::bitmask_t* mask, Lambdas... lambdas)
|
||||
detail::bitmask_t* mask, Lambdas& ... lambdas)
|
||||
{
|
||||
call_jmp_table_internal(op, context, write_stack, read_stack, mask, std::index_sequence_for<Lambdas...>(),
|
||||
lambdas...);
|
||||
|
|
|
@ -66,6 +66,7 @@ namespace blt::gp
|
|||
using Allocator = aligned_allocator;
|
||||
public:
|
||||
static Allocator& get_allocator();
|
||||
|
||||
struct size_data_t
|
||||
{
|
||||
blt::size_t total_size_bytes = 0;
|
||||
|
@ -185,18 +186,22 @@ namespace blt::gp
|
|||
return *reinterpret_cast<T*>(data_ + bytes_stored);
|
||||
}
|
||||
|
||||
[[nodiscard]] blt::u8* from(blt::size_t bytes) const
|
||||
{
|
||||
#if BLT_DEBUG_LEVEL > 0
|
||||
if (bytes_stored < bytes)
|
||||
BLT_ABORT(("Not enough bytes in stack to reference " + std::to_string(bytes) + " bytes requested but " + std::to_string(bytes) +
|
||||
" bytes stored!").c_str());
|
||||
#endif
|
||||
return data_ + (bytes_stored - bytes);
|
||||
}
|
||||
|
||||
template<typename T, typename NO_REF = NO_REF_T<T>>
|
||||
T& from(blt::size_t bytes)
|
||||
{
|
||||
static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!");
|
||||
static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!");
|
||||
auto size = aligned_size(sizeof(NO_REF)) + bytes;
|
||||
#if BLT_DEBUG_LEVEL > 0
|
||||
if (bytes_stored < size)
|
||||
BLT_ABORT(("Not enough bytes in stack to reference " + std::to_string(size) + " bytes requested but " + std::to_string(bytes) +
|
||||
" bytes stored!").c_str());
|
||||
#endif
|
||||
return *reinterpret_cast<NO_REF*>(data_ + (bytes_stored - size));
|
||||
return *reinterpret_cast<NO_REF*>(from(aligned_size(sizeof(NO_REF)) + bytes));
|
||||
}
|
||||
|
||||
void pop_bytes(blt::size_t bytes)
|
||||
|
@ -267,6 +272,12 @@ namespace blt::gp
|
|||
return data;
|
||||
}
|
||||
|
||||
void reserve(blt::size_t bytes)
|
||||
{
|
||||
if (bytes > size_)
|
||||
expand(bytes);
|
||||
}
|
||||
|
||||
private:
|
||||
void expand(blt::size_t bytes)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue