reducing allocations

pages
Brett 2024-09-03 20:34:45 -04:00
parent 06a34d21f1
commit 47b3821b0e
6 changed files with 20 additions and 27 deletions

View File

@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.25)
project(blt-gp VERSION 0.1.44)
project(blt-gp VERSION 0.1.45)
include(CTest)

View File

@ -118,13 +118,16 @@ namespace blt::gp
template<typename... Operators>
program_operator_storage_t& build(Operators& ... operators)
{
blt::size_t largest = 0;
blt::size_t largest_args = 0;
blt::size_t largest_returns = 0;
blt::u32 largest_argc = 0;
operator_metadata_t meta;
((meta = add_operator(operators), largest_argc = std::max(meta.argc.argc, largest_argc),
largest = std::max(std::max(meta.arg_size_bytes, meta.return_size_bytes), largest)), ...);
largest_args = std::max(meta.arg_size_bytes, largest_args), largest_returns = std::max(meta.return_size_bytes,
largest_returns)), ...);
// largest = largest * largest_argc;
blt::size_t largest = largest_args * largest_argc * largest_returns * largest_argc;
BLT_TRACE(largest);
storage.eval_func = [&operators..., largest](const tree_t& tree, void* context) -> evaluation_context& {
@ -143,23 +146,11 @@ namespace blt::gp
op_pos++;
if (operation.is_value)
{
auto cur = tracker.start_measurement();
total_so_far += stack_allocator::aligned_size(operation.type_size);
results.values.copy_from(vals.from(total_so_far), stack_allocator::aligned_size(operation.type_size));
tracker.stop_measurement(cur);
if (cur.getAllocatedByteDifference() > 0)
{
BLT_TRACE("Operator %ld allocated! pos: %ld", operation.id, op_pos);
}
continue;
}
auto cur = tracker.start_measurement();
call_jmp_table(operation.id, context, results.values, results.values, operators...);
tracker.stop_measurement(cur);
if (cur.getAllocatedByteDifference() > 0)
{
BLT_TRACE("Operator %ld allocated! pos: %ld", operation.id, op_pos);
}
}
return results;

View File

@ -118,14 +118,7 @@ namespace blt::gp
evaluation_context& evaluate(void* context) const
{
auto cur = tracker.start_measurement();
auto& v = (*func)(*this, context);
tracker.stop_measurement(cur);
if (cur.getAllocatedByteDifference() > 0)
{
print(*program, std::cout, false, true, false);
}
return v;
return (*func)(*this, context);
}
blt::size_t get_depth(gp_program& program);
@ -201,7 +194,6 @@ namespace blt::gp
tracked_vector<op_container_t> operations;
blt::gp::stack_allocator values;
detail::eval_func_t* func;
gp_program* program;
};
struct fitness_t
@ -255,6 +247,16 @@ namespace blt::gp
normalized_fitness.push_back(v);
}
population_stats(population_stats&& move) noexcept:
overall_fitness(move.overall_fitness.load()), average_fitness(move.average_fitness.load()), best_fitness(move.best_fitness.load()),
worst_fitness(move.worst_fitness.load()), normalized_fitness(std::move(move.normalized_fitness))
{
move.overall_fitness = 0;
move.average_fitness = 0;
move.best_fitness = 0;
move.worst_fitness = 0;
}
std::atomic<double> overall_fitness = 0;
std::atomic<double> average_fitness = 0;
std::atomic<double> best_fitness = 0;

View File

@ -61,6 +61,7 @@ namespace blt::gp
#ifdef BLT_TRACK_ALLOCATIONS
tracker.reserve();
#endif
statistic_history.reserve(config.max_generations + 1);
if (config.threads == 0)
config.set_thread_count(std::thread::hardware_concurrency());
// main thread is thread0

View File

@ -459,8 +459,7 @@ namespace blt::gp
config.generator.get().generate(tree,
{program, replacement_func_info.argument_types[i].id, config.replacement_min_depth,
config.replacement_max_depth});
blt::size_t total_bytes_for = tree.total_value_bytes();
vals.copy_from(tree.get_values(), total_bytes_for);
vals.insert(tree.get_values());
ops.insert(ops.begin() + static_cast<blt::ptrdiff_t>(start_index), tree.get_operations().begin(),
tree.get_operations().end());
start_index += tree.get_operations().size();

View File

@ -295,7 +295,7 @@ namespace blt::gp
}
}
tree_t::tree_t(gp_program& program): func(&program.get_eval_func()), program(&program)
tree_t::tree_t(gp_program& program): func(&program.get_eval_func())
{
}