move evaluation function into tree class, can now call 'make_execution_lambda' to make the lambda previously found in the operator_builder class

dev-func-drop
Brett 2025-01-14 14:00:55 -05:00
parent 1e9442bbd4
commit 32a83e725c
6 changed files with 446 additions and 401 deletions

View File

@ -22,53 +22,13 @@
#include <blt/std/types.h>
#include <blt/gp/typesystem.h>
#include <blt/gp/stack.h>
#include <blt/gp/util/meta.h>
#include <functional>
#include <type_traits>
#include <optional>
namespace blt::gp
{
namespace detail
{
template<typename T>
using remove_cv_ref = std::remove_cv_t<std::remove_reference_t<T>>;
template<typename...>
struct first_arg;
template<typename First, typename... Args>
struct first_arg<First, Args...>
{
using type = First;
};
template<>
struct first_arg<>
{
using type = void;
};
template<bool b, typename... types>
struct is_same;
template<typename... types>
struct is_same<true, types...> : public std::false_type
{
};
template<typename... types>
struct is_same<false, types...> : public std::is_same<types...>
{
};
template<typename... types>
constexpr bool is_same_v = is_same<sizeof...(types) == 0, types...>::value;
struct empty_t
{
};
}
template<typename Return, typename... Args>
struct call_with

View File

@ -128,33 +128,9 @@ namespace blt::gp
largest_returns)), ...);
// largest = largest * largest_argc;
blt::size_t largest = largest_args * largest_argc * largest_returns * largest_argc;
size_t largest = largest_args * largest_argc * largest_returns * largest_argc;
storage.eval_func = [&operators..., largest](const tree_t& tree, void* context) -> evaluation_context& {
const auto& ops = tree.get_operations();
const auto& vals = tree.get_values();
static thread_local evaluation_context results{};
results.values.reset();
results.values.reserve(largest);
blt::size_t total_so_far = 0;
blt::size_t op_pos = 0;
for (const auto& operation : iterate(ops).rev())
{
op_pos++;
if (operation.is_value())
{
total_so_far += operation.type_size();
results.values.copy_from(vals.from(total_so_far), operation.type_size());
continue;
}
call_jmp_table(operation.id(), context, results.values, results.values, operators...);
}
return results;
};
storage.eval_func = tree_t::make_execution_lambda<Context>(largest, operators...);
blt::hashset_t<type_id> has_terminals;
@ -306,48 +282,7 @@ namespace blt::gp
types.push_back(storage.system.get_type<T>().id());
}
}
template <typename Operator>
static void execute(void* context, stack_allocator& write_stack, stack_allocator& read_stack, Operator& operation)
{
if constexpr (std::is_same_v<detail::remove_cv_ref<typename Operator::First_Arg>, Context>)
{
write_stack.push(operation(context, read_stack));
}
else
{
write_stack.push(operation(read_stack));
}
}
template <blt::size_t id, typename Operator>
static bool call(blt::size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack, Operator& operation)
{
if (id == op)
{
execute(context, write_stack, read_stack, operation);
return false;
}
return true;
}
template <typename... Operators, size_t... operator_ids>
static void call_jmp_table_internal(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack,
std::integer_sequence<size_t, operator_ids...>, Operators&... operators)
{
if (op >= sizeof...(operator_ids))
{
BLT_UNREACHABLE;
}
(call<operator_ids>(op, context, write_stack, read_stack, operators) && ...);
}
template <typename... Operators>
static void call_jmp_table(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack,
Operators&... operators)
{
call_jmp_table_internal(op, context, write_stack, read_stack, std::index_sequence_for<Operators...>(), operators...);
}
private:
program_operator_storage_t storage;
};

View File

@ -19,6 +19,7 @@
#ifndef BLT_GP_TREE_H
#define BLT_GP_TREE_H
#include <blt/gp/util/meta.h>
#include <blt/gp/typesystem.h>
#include <blt/gp/stack.h>
#include <blt/gp/fwdecl.h>
@ -31,16 +32,17 @@
namespace blt::gp
{
struct op_container_t
{
op_container_t(const size_t type_size, const operator_id id, const bool is_value):
m_type_size(type_size), m_id(id), m_is_value(is_value), m_has_drop(false)
{}
{
}
op_container_t(const size_t type_size, const operator_id id, const bool is_value, const bool has_drop):
m_type_size(type_size), m_id(id), m_is_value(is_value), m_has_drop(has_drop)
{}
{
}
[[nodiscard]] auto type_size() const
{
@ -61,6 +63,7 @@ namespace blt::gp
{
return m_has_drop;
}
private:
size_t m_type_size;
operator_id m_id;
@ -112,14 +115,12 @@ namespace blt::gp
{
if (op_it->has_drop())
{
}
if (copy_it == copy.operations.end())
break;
*op_it = *copy_it;
if (copy_it->has_drop())
{
}
++copy_it;
}
@ -128,7 +129,6 @@ namespace blt::gp
{
if (op_it->has_drop())
{
}
}
operations.erase(op_it_cpy, operations.end());
@ -144,11 +144,22 @@ namespace blt::gp
struct child_t
{
blt::ptrdiff_t start;
ptrdiff_t start;
// one past the end
blt::ptrdiff_t end;
ptrdiff_t end;
};
void insert_operator(const op_container_t& container)
{
operations.emplace_back(container);
}
template <typename... Args>
void emplace_operator(Args&&... args)
{
operations.emplace_back(std::forward<Args>(args)...);
}
[[nodiscard]] inline tracked_vector<op_container_t>& get_operations()
{
return operations;
@ -255,7 +266,77 @@ namespace blt::gp
return total_value_bytes(operations.begin(), operations.end());
}
template <typename Context, typename... Operators>
static auto make_execution_lambda(size_t call_reserve_size, Operators&... operators)
{
return [call_reserve_size, &operators...](const tree_t& tree, void* context) -> evaluation_context& {
const auto& ops = tree.operations;
const auto& vals = tree.values;
thread_local evaluation_context results{};
results.values.reset();
results.values.reserve(call_reserve_size);
size_t total_so_far = 0;
for (const auto& operation : iterate(ops).rev())
{
if (operation.is_value())
{
total_so_far += operation.type_size();
results.values.copy_from(vals.from(total_so_far), operation.type_size());
continue;
}
call_jmp_table<Context>(operation.id(), context, results.values, results.values, operators...);
}
return results;
};
}
private:
template <typename Context, typename Operator>
static void execute(void* context, stack_allocator& write_stack, stack_allocator& read_stack, Operator& operation)
{
if constexpr (std::is_same_v<detail::remove_cv_ref<typename Operator::First_Arg>, Context>)
{
write_stack.push(operation(context, read_stack));
}
else
{
write_stack.push(operation(read_stack));
}
}
template <typename Context, size_t id, typename Operator>
static bool call(const size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack, Operator& operation)
{
if (id == op)
{
execute<Context>(context, write_stack, read_stack, operation);
return false;
}
return true;
}
template <typename Context, typename... Operators, size_t... operator_ids>
static void call_jmp_table_internal(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack,
std::integer_sequence<size_t, operator_ids...>, Operators&... operators)
{
if (op >= sizeof...(operator_ids))
{
BLT_UNREACHABLE;
}
(call<Context, operator_ids>(op, context, write_stack, read_stack, operators) && ...);
}
template <typename Context, typename... Operators>
static void call_jmp_table(size_t op, void* context, stack_allocator& write_stack, stack_allocator& read_stack,
Operators&... operators)
{
call_jmp_table_internal<Context>(op, context, write_stack, read_stack, std::index_sequence_for<Operators...>(), operators...);
}
tracked_vector<op_container_t> operations;
stack_allocator values;
detail::eval_func_t* func;
@ -285,10 +366,12 @@ namespace blt::gp
individual_t() = delete;
explicit individual_t(tree_t&& tree): tree(std::move(tree))
{}
{
}
explicit individual_t(const tree_t& tree): tree(tree)
{}
{
}
individual_t(const individual_t&) = default;
@ -306,7 +389,8 @@ namespace blt::gp
{
public:
population_tree_iterator(tracked_vector<individual_t>& ind, blt::size_t pos): ind(ind), pos(pos)
{}
{
}
auto begin()
{

View File

@ -0,0 +1,66 @@
#pragma once
/*
* Copyright (C) 2024 Brett Terpstra
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef BLT_GP_UTIL_META_H
#define BLT_GP_UTIL_META_H
#include <type_traits>
namespace blt::gp::detail
{
template <typename T>
using remove_cv_ref = std::remove_cv_t<std::remove_reference_t<T>>;
template <typename...>
struct first_arg;
template <typename First, typename... Args>
struct first_arg<First, Args...>
{
using type = First;
};
template <>
struct first_arg<>
{
using type = void;
};
template <bool b, typename... types>
struct is_same;
template <typename... types>
struct is_same<true, types...> : std::false_type
{
};
template <typename... types>
struct is_same<false, types...> : std::is_same<types...>
{
};
template <typename... types>
constexpr bool is_same_v = is_same<sizeof...(types) == 0, types...>::value;
struct empty_t
{
};
}
#endif //BLT_GP_UTIL_META_H

View File

@ -61,7 +61,7 @@ namespace blt::gp
auto& info = args.program.get_operator_info(top.id);
tree.get_operations().emplace_back(
tree.emplace_operator(
args.program.get_typesystem().get_type(info.return_type).size(),
top.id,
args.program.is_operator_ephemeral(top.id));

View File

@ -189,22 +189,22 @@ inline void there(blt::size_t)
int main()
{
blt::gp::thread_manager_t threads{
std::thread::hardware_concurrency(), blt::gp::task_builder_t<test>::make_callable(
blt::gp::task_t{test::hello, hello},
blt::gp::task_t{test::there, there}
)
};
// blt::gp::thread_manager_t threads{
// std::thread::hardware_concurrency(), blt::gp::task_builder_t<test>::make_callable(
// blt::gp::task_t{test::hello, hello},
// blt::gp::task_t{test::there, there}
// )
// };
threads.add_task(test::hello);
threads.add_task(test::hello);
threads.add_task(test::hello);
threads.add_task(test::there);
// threads.add_task(test::hello);
// threads.add_task(test::hello);
// threads.add_task(test::hello);
// threads.add_task(test::there);
while (threads.has_tasks_left())
threads.execute();
// while (threads.has_tasks_left())
// threads.execute();
// for (int i = 0; i < 1; i++)
// do_run();
for (int i = 0; i < 1; i++)
do_run();
return 0;
}