diff --git a/CMakeLists.txt b/CMakeLists.txt index 019bcd6..ba2e0f6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.25) -project(blt-gp VERSION 0.1.15) +project(blt-gp VERSION 0.1.16) include(CTest) diff --git a/examples/symbolic_regression.cpp b/examples/symbolic_regression.cpp index 53024da..7e4869d 100644 --- a/examples/symbolic_regression.cpp +++ b/examples/symbolic_regression.cpp @@ -39,7 +39,7 @@ blt::gp::prog_config_t config = blt::gp::prog_config_t() .set_mutation_chance(0.1) .set_reproduction_chance(0) .set_max_generations(50) - .set_pop_size(500) + .set_pop_size(5000) .set_thread_count(0); blt::gp::type_provider type_system; diff --git a/include/blt/gp/program.h b/include/blt/gp/program.h index 51fbefb..5c03aad 100644 --- a/include/blt/gp/program.h +++ b/include/blt/gp/program.h @@ -120,21 +120,22 @@ namespace blt::gp const auto& vals = tree.get_values(); evaluation_context results{}; + results.values.reserve(largest); - auto value_stack = vals; - auto& values_process = results.values; static thread_local detail::bitmask_t bitfield; bitfield.clear(); + blt::size_t total_so_far = 0; for (const auto& operation : blt::reverse_iterate(ops.begin(), ops.end())) { if (operation.is_value) { - value_stack.transfer_bytes(values_process, operation.type_size); + total_so_far += stack_allocator::aligned_size(operation.type_size); + results.values.copy_from(vals.from(total_so_far), stack_allocator::aligned_size(operation.type_size)); bitfield.push_back(false); continue; } - call_jmp_table(operation.id, context, values_process, values_process, &bitfield, operators...); + call_jmp_table(operation.id, context, results.values, results.values, &bitfield, operators...); bitfield.push_back(true); } @@ -212,7 +213,7 @@ namespace blt::gp auto add_operator(operation_t& op) { auto total_size_required = stack_allocator::aligned_size(sizeof(Return)); - ((total_size_required += stack_allocator::aligned_size(sizeof(Args))) , ...); + ((total_size_required += stack_allocator::aligned_size(sizeof(Args))), ...); auto return_type_id = system.get_type().id(); auto operator_id = blt::gp::operator_id(storage.operators.size()); @@ -276,19 +277,26 @@ namespace blt::gp } } - template - static inline bool execute(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack, detail::bitmask_t* mask, - Lambda lambda) + template + static inline void execute(void* context, stack_allocator& write_stack, stack_allocator& read_stack, detail::bitmask_t* mask, + Lambda& lambda) { - if (op == id) + if constexpr (std::is_same_v, Context>) { - if constexpr (HasContext) - { - write_stack.push(lambda(context, read_stack, mask)); - } else - { - write_stack.push(lambda(read_stack, mask)); - } + write_stack.push(lambda(context, read_stack, mask)); + } else + { + write_stack.push(lambda(read_stack, mask)); + } + } + + template + static inline bool call(blt::size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack, detail::bitmask_t* mask, + Lambda& lambda) + { + if (id == op) + { + execute(context, write_stack, read_stack, mask, lambda); return false; } return true; @@ -296,17 +304,20 @@ namespace blt::gp template static inline void call_jmp_table_internal(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack, - detail::bitmask_t* mask, std::integer_sequence, Lambdas... lambdas) + detail::bitmask_t* mask, std::integer_sequence, Lambdas& ... lambdas) { - if (op > sizeof...(operator_ids)) + if (op >= sizeof...(operator_ids)) + { BLT_UNREACHABLE; - (execute, operator_ids>( - op, context, write_stack, read_stack, mask, lambdas) && ...); + } + (call(op, context, write_stack, read_stack, mask, lambdas) && ...); +// std::initializer_list{((op == operator_ids) ? (execute(context, write_stack, read_stack, mask, lambdas), 0) : 0)...}; + } template static inline void call_jmp_table(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack, - detail::bitmask_t* mask, Lambdas... lambdas) + detail::bitmask_t* mask, Lambdas& ... lambdas) { call_jmp_table_internal(op, context, write_stack, read_stack, mask, std::index_sequence_for(), lambdas...); diff --git a/include/blt/gp/stack.h b/include/blt/gp/stack.h index a356ad9..59971b5 100644 --- a/include/blt/gp/stack.h +++ b/include/blt/gp/stack.h @@ -66,6 +66,7 @@ namespace blt::gp using Allocator = aligned_allocator; public: static Allocator& get_allocator(); + struct size_data_t { blt::size_t total_size_bytes = 0; @@ -185,18 +186,22 @@ namespace blt::gp return *reinterpret_cast(data_ + bytes_stored); } + [[nodiscard]] blt::u8* from(blt::size_t bytes) const + { +#if BLT_DEBUG_LEVEL > 0 + if (bytes_stored < bytes) + BLT_ABORT(("Not enough bytes in stack to reference " + std::to_string(bytes) + " bytes requested but " + std::to_string(bytes) + + " bytes stored!").c_str()); +#endif + return data_ + (bytes_stored - bytes); + } + template> T& from(blt::size_t bytes) { static_assert(std::is_trivially_copyable_v && "Type must be bitwise copyable!"); static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!"); - auto size = aligned_size(sizeof(NO_REF)) + bytes; -#if BLT_DEBUG_LEVEL > 0 - if (bytes_stored < size) - BLT_ABORT(("Not enough bytes in stack to reference " + std::to_string(size) + " bytes requested but " + std::to_string(bytes) + - " bytes stored!").c_str()); -#endif - return *reinterpret_cast(data_ + (bytes_stored - size)); + return *reinterpret_cast(from(aligned_size(sizeof(NO_REF)) + bytes)); } void pop_bytes(blt::size_t bytes) @@ -266,6 +271,12 @@ namespace blt::gp return data; } + + void reserve(blt::size_t bytes) + { + if (bytes > size_) + expand(bytes); + } private: void expand(blt::size_t bytes)