Compare commits

..

4 Commits

Author SHA1 Message Date
Brett 858a7f5cfe works 2024-08-18 02:08:48 -04:00
Brett 7c3b8c050b silly 2024-08-18 01:28:23 -04:00
Brett a8b81bc7a6 test on single thread 2024-08-17 20:07:18 -04:00
Brett 58b3ed02c3 threading on the next generation function + working on debug 2024-08-17 19:52:52 -04:00
8 changed files with 277 additions and 248 deletions

View File

@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.25) cmake_minimum_required(VERSION 3.25)
project(blt-gp VERSION 0.1.4) project(blt-gp VERSION 0.1.8)
include(CTest) include(CTest)

View File

@ -117,15 +117,15 @@ int main()
program.set_operations(builder.build()); program.set_operations(builder.build());
BLT_DEBUG("Generate Initial Population"); BLT_DEBUG("Generate Initial Population");
program.generate_population(type_system.get_type<float>().id(), fitness_function); auto sel = blt::gp::select_fitness_proportionate_t{};
program.generate_population(type_system.get_type<float>().id(), fitness_function, sel, sel, sel);
BLT_DEBUG("Begin Generation Loop"); BLT_DEBUG("Begin Generation Loop");
while (!program.should_terminate()) while (!program.should_terminate())
{ {
BLT_TRACE("------------{Begin Generation %ld}------------", program.get_current_generation()); BLT_TRACE("------------{Begin Generation %ld}------------", program.get_current_generation());
BLT_START_INTERVAL("Symbolic Regression", "Gen"); BLT_START_INTERVAL("Symbolic Regression", "Gen");
auto sel = blt::gp::select_fitness_proportionate_t{}; program.create_next_generation();
program.create_next_generation(sel, sel, sel);
BLT_END_INTERVAL("Symbolic Regression", "Gen"); BLT_END_INTERVAL("Symbolic Regression", "Gen");
BLT_TRACE("Move to next generation"); BLT_TRACE("Move to next generation");
BLT_START_INTERVAL("Symbolic Regression", "Fitness"); BLT_START_INTERVAL("Symbolic Regression", "Fitness");

View File

@ -264,19 +264,12 @@ namespace blt::gp
system(system), seed(seed), config(config) system(system), seed(seed), config(config)
{ create_threads(); } { create_threads(); }
template<typename Crossover, typename Mutation, typename Reproduction, typename CreationFunc = decltype(default_next_pop_creator<Crossover, Mutation, Reproduction>)> void create_next_generation()
void create_next_generation(Crossover&& crossover_selection, Mutation&& mutation_selection, Reproduction&& reproduction_selection,
CreationFunc& func = default_next_pop_creator<Crossover, Mutation, Reproduction>)
{ {
// should already be empty // should already be empty
next_pop.clear(); next_pop.clear();
crossover_selection.pre_process(*this, current_pop, current_stats); thread_helper.next_gen_left.store(config.population_size, std::memory_order_release);
mutation_selection.pre_process(*this, current_pop, current_stats); (*thread_execution_service)(0);
reproduction_selection.pre_process(*this, current_pop, current_stats);
auto args = get_selector_args();
func(args, std::forward<Crossover>(crossover_selection), std::forward<Mutation>(mutation_selection),
std::forward<Reproduction>(reproduction_selection));
} }
void evaluate_fitness() void evaluate_fitness()
@ -294,8 +287,10 @@ namespace blt::gp
* *
* NOTE: 0 is considered the best, in terms of standardized fitness * NOTE: 0 is considered the best, in terms of standardized fitness
*/ */
template<typename FitnessFunc> template<typename FitnessFunc, typename Crossover, typename Mutation, typename Reproduction, typename CreationFunc = decltype(default_next_pop_creator<Crossover, Mutation, Reproduction>)>
void generate_population(type_id root_type, FitnessFunc& fitness_function, bool eval_fitness_now = true) void generate_population(type_id root_type, FitnessFunc& fitness_function,
Crossover& crossover_selection, Mutation& mutation_selection, Reproduction& reproduction_selection,
CreationFunc& func = default_next_pop_creator<Crossover, Mutation, Reproduction>, bool eval_fitness_now = true)
{ {
using LambdaReturn = typename decltype(blt::meta::lambda_helper(fitness_function))::Return; using LambdaReturn = typename decltype(blt::meta::lambda_helper(fitness_function))::Return;
current_pop = config.pop_initializer.get().generate( current_pop = config.pop_initializer.get().generate(
@ -303,7 +298,10 @@ namespace blt::gp
if (config.threads == 1) if (config.threads == 1)
{ {
BLT_INFO("Starting with single thread variant!"); BLT_INFO("Starting with single thread variant!");
thread_execution_service = new std::function([this, &fitness_function](blt::size_t) { thread_execution_service = new std::function(
[this, &fitness_function, &crossover_selection, &mutation_selection, &reproduction_selection, &func](blt::size_t) {
if (thread_helper.evaluation_left > 0)
{
for (const auto& ind : blt::enumerate(current_pop.get_individuals())) for (const auto& ind : blt::enumerate(current_pop.get_individuals()))
{ {
if constexpr (std::is_same_v<LambdaReturn, bool> || std::is_convertible_v<LambdaReturn, bool>) if constexpr (std::is_same_v<LambdaReturn, bool> || std::is_convertible_v<LambdaReturn, bool>)
@ -324,12 +322,35 @@ namespace blt::gp
current_stats.overall_fitness = current_stats.overall_fitness + ind.second.fitness.adjusted_fitness; current_stats.overall_fitness = current_stats.overall_fitness + ind.second.fitness.adjusted_fitness;
} }
thread_helper.evaluation_left = 0;
}
if (thread_helper.next_gen_left > 0)
{
static thread_local std::vector<tree_t> new_children;
new_children.clear();
auto args = get_selector_args(new_children);
crossover_selection.pre_process(*this, current_pop, current_stats);
mutation_selection.pre_process(*this, current_pop, current_stats);
reproduction_selection.pre_process(*this, current_pop, current_stats);
perform_elitism(args);
while (new_children.size() < config.population_size)
func(args, crossover_selection, mutation_selection, reproduction_selection);
for (auto& i : new_children)
next_pop.get_individuals().emplace_back(std::move(i));
thread_helper.next_gen_left = 0;
}
}); });
} else } else
{ {
BLT_INFO("Starting thread execution service!"); BLT_INFO("Starting thread execution service!");
std::scoped_lock lock(thread_helper.thread_function_control); std::scoped_lock lock(thread_helper.thread_function_control);
thread_execution_service = new std::function([this, &fitness_function](blt::size_t) { thread_execution_service = new std::function(
[this, &fitness_function, &crossover_selection, &mutation_selection, &reproduction_selection, &func](blt::size_t id) {
thread_helper.barrier.wait(); thread_helper.barrier.wait();
if (thread_helper.evaluation_left > 0) if (thread_helper.evaluation_left > 0)
{ {
@ -363,12 +384,14 @@ namespace blt::gp
auto old_best = current_stats.best_fitness.load(std::memory_order_relaxed); auto old_best = current_stats.best_fitness.load(std::memory_order_relaxed);
while (ind.fitness.adjusted_fitness > old_best && while (ind.fitness.adjusted_fitness > old_best &&
!current_stats.best_fitness.compare_exchange_weak(old_best, ind.fitness.adjusted_fitness, !current_stats.best_fitness.compare_exchange_weak(old_best, ind.fitness.adjusted_fitness,
std::memory_order_relaxed, std::memory_order_relaxed)); std::memory_order_relaxed,
std::memory_order_relaxed));
auto old_worst = current_stats.worst_fitness.load(std::memory_order_relaxed); auto old_worst = current_stats.worst_fitness.load(std::memory_order_relaxed);
while (ind.fitness.adjusted_fitness < old_worst && while (ind.fitness.adjusted_fitness < old_worst &&
!current_stats.worst_fitness.compare_exchange_weak(old_worst, ind.fitness.adjusted_fitness, !current_stats.worst_fitness.compare_exchange_weak(old_worst, ind.fitness.adjusted_fitness,
std::memory_order_relaxed, std::memory_order_relaxed)); std::memory_order_relaxed,
std::memory_order_relaxed));
auto old_overall = current_stats.overall_fitness.load(std::memory_order_relaxed); auto old_overall = current_stats.overall_fitness.load(std::memory_order_relaxed);
while (!current_stats.overall_fitness.compare_exchange_weak(old_overall, while (!current_stats.overall_fitness.compare_exchange_weak(old_overall,
@ -380,6 +403,26 @@ namespace blt::gp
} }
if (thread_helper.next_gen_left > 0) if (thread_helper.next_gen_left > 0)
{ {
static thread_local std::vector<tree_t> new_children;
new_children.clear();
auto args = get_selector_args(new_children);
if (id == 0)
{
crossover_selection.pre_process(*this, current_pop, current_stats);
if (&crossover_selection != &mutation_selection)
mutation_selection.pre_process(*this, current_pop, current_stats);
if (&crossover_selection != &reproduction_selection)
reproduction_selection.pre_process(*this, current_pop, current_stats);
perform_elitism(args);
for (auto& i : new_children)
next_pop.get_individuals().emplace_back(std::move(i));
thread_helper.next_gen_left -= new_children.size();
new_children.clear();
}
thread_helper.barrier.wait();
while (thread_helper.next_gen_left > 0) while (thread_helper.next_gen_left > 0)
{ {
blt::size_t size = 0; blt::size_t size = 0;
@ -393,12 +436,16 @@ namespace blt::gp
std::memory_order::memory_order_relaxed, std::memory_order::memory_order_relaxed,
std::memory_order::memory_order_relaxed)); std::memory_order::memory_order_relaxed));
static thread_local std::vector<tree_t> new_children;
new_children.clear();
for (blt::size_t i = begin; i < end; i++) for (blt::size_t i = begin; i < end; i++)
{ func(args, crossover_selection, mutation_selection, reproduction_selection);
{
std::scoped_lock lock(thread_helper.thread_generation_lock);
for (auto& i : new_children)
{
if (next_pop.get_individuals().size() < config.population_size)
next_pop.get_individuals().emplace_back(i);
}
} }
} }
} }
@ -605,6 +652,7 @@ namespace blt::gp
std::vector<std::unique_ptr<std::thread>> threads; std::vector<std::unique_ptr<std::thread>> threads;
std::mutex thread_function_control; std::mutex thread_function_control;
std::mutex thread_generation_lock;
std::condition_variable thread_function_condition{}; std::condition_variable thread_function_condition{};
std::atomic_uint64_t evaluation_left = 0; std::atomic_uint64_t evaluation_left = 0;
@ -620,9 +668,9 @@ namespace blt::gp
// for convenience, shouldn't decrease performance too much // for convenience, shouldn't decrease performance too much
std::atomic<std::function<void(blt::size_t)>*> thread_execution_service = nullptr; std::atomic<std::function<void(blt::size_t)>*> thread_execution_service = nullptr;
inline selector_args get_selector_args() inline selector_args get_selector_args(std::vector<tree_t>& next_pop_trees)
{ {
return {*this, next_pop, current_pop, current_stats, config, get_random()}; return {*this, next_pop_trees, current_pop, current_stats, config, get_random()};
} }
template<typename Return, blt::size_t size, typename Accessor, blt::size_t... indexes> template<typename Return, blt::size_t size, typename Accessor, blt::size_t... indexes>
@ -637,7 +685,6 @@ namespace blt::gp
void evaluate_fitness_internal() void evaluate_fitness_internal()
{ {
current_stats.clear(); current_stats.clear();
if (config.threads != 1)
thread_helper.evaluation_left.store(current_pop.get_individuals().size(), std::memory_order_release); thread_helper.evaluation_left.store(current_pop.get_individuals().size(), std::memory_order_release);
(*thread_execution_service)(0); (*thread_execution_service)(0);

View File

@ -31,7 +31,7 @@ namespace blt::gp
struct selector_args struct selector_args
{ {
gp_program& program; gp_program& program;
population_t& next_pop; std::vector<tree_t>& next_pop;
population_t& current_pop; population_t& current_pop;
population_stats& current_stats; population_stats& current_stats;
prog_config_t& config; prog_config_t& config;
@ -52,8 +52,6 @@ namespace blt::gp
{ {
for (blt::size_t i = 0; i < config.elites; i++) for (blt::size_t i = 0; i < config.elites; i++)
{ {
// BLT_INFO("%lf >= %lf? // %lf (indexes: %ld %ld)", ind.second.fitness.adjusted_fitness, values[i].second,
// ind.second.fitness.raw_fitness, ind.first, values[i].first);
if (ind.second.fitness.adjusted_fitness >= values[i].second) if (ind.second.fitness.adjusted_fitness >= values[i].second)
{ {
bool doesnt_contain = true; bool doesnt_contain = true;
@ -70,77 +68,15 @@ namespace blt::gp
} }
for (blt::size_t i = 0; i < config.elites; i++) for (blt::size_t i = 0; i < config.elites; i++)
next_pop.get_individuals().push_back(current_pop.get_individuals()[values[i].first]); next_pop.push_back(current_pop.get_individuals()[values[i].first].tree);
}
};
template<typename Crossover, typename Mutation, typename Reproduction>
constexpr inline auto proportionate_next_pop_creator = [](
const selector_args& args, Crossover crossover_selection, Mutation mutation_selection, Reproduction reproduction_selection) {
auto& [program, next_pop, current_pop, current_stats, config, random] = args;
double total_prob = config.mutation_chance + config.crossover_chance;
double crossover_chance = config.crossover_chance / total_prob;
double mutation_chance = crossover_chance + config.mutation_chance / total_prob;
perform_elitism(args);
while (next_pop.get_individuals().size() < config.population_size)
{
auto type = random.get_double();
if (type > crossover_chance && type < mutation_chance)
{
// crossover
auto& p1 = crossover_selection.select(program, current_pop, current_stats);
auto& p2 = crossover_selection.select(program, current_pop, current_stats);
auto results = config.crossover.get().apply(program, p1, p2);
// if crossover fails, we can check for mutation on these guys. otherwise straight copy them into the next pop
if (results)
{
next_pop.get_individuals().emplace_back(std::move(results->child1));
// annoying check
if (next_pop.get_individuals().size() < config.population_size)
next_pop.get_individuals().emplace_back(std::move(results->child2));
} else
{
if (config.try_mutation_on_crossover_failure && random.choice(config.mutation_chance))
next_pop.get_individuals().emplace_back(std::move(config.mutator.get().apply(program, p1)));
else
next_pop.get_individuals().push_back(individual{p1});
// annoying check.
if (next_pop.get_individuals().size() < config.population_size)
{
if (config.try_mutation_on_crossover_failure && random.choice(config.mutation_chance))
next_pop.get_individuals().emplace_back(std::move(config.mutator.get().apply(program, p2)));
else
next_pop.get_individuals().push_back(individual{p2});
}
}
} else if (type > mutation_chance)
{
// mutation
auto& p = mutation_selection.select(program, current_pop, current_stats);
next_pop.get_individuals().emplace_back(std::move(config.mutator.get().apply(program, p)));
} else
{
// reproduction
auto& p = reproduction_selection.select(program, current_pop, current_stats);
next_pop.get_individuals().push_back(individual{p});
}
} }
}; };
template<typename Crossover, typename Mutation, typename Reproduction> template<typename Crossover, typename Mutation, typename Reproduction>
constexpr inline auto default_next_pop_creator = []( constexpr inline auto default_next_pop_creator = [](
const blt::gp::selector_args& args, Crossover crossover_selection, Mutation mutation_selection, Reproduction reproduction_selection) { blt::gp::selector_args& args, Crossover& crossover_selection, Mutation& mutation_selection, Reproduction& reproduction_selection) {
auto& [program, next_pop, current_pop, current_stats, config, random] = args; auto& [program, next_pop, current_pop, current_stats, config, random] = args;
perform_elitism(args);
while (next_pop.get_individuals().size() < config.population_size)
{
int sel = random.get_i32(0, 3); int sel = random.get_i32(0, 3);
switch (sel) switch (sel)
{ {
@ -157,10 +93,8 @@ namespace blt::gp
// if crossover fails, we can check for mutation on these guys. otherwise straight copy them into the next pop // if crossover fails, we can check for mutation on these guys. otherwise straight copy them into the next pop
if (results) if (results)
{ {
next_pop.get_individuals().emplace_back(std::move(results->child1)); next_pop.push_back(std::move(results->child1));
// annoying check next_pop.push_back(std::move(results->child2));
if (next_pop.get_individuals().size() < config.population_size)
next_pop.get_individuals().emplace_back(std::move(results->child2));
} }
} }
break; break;
@ -169,7 +103,7 @@ namespace blt::gp
{ {
// mutation // mutation
auto& p = mutation_selection.select(program, current_pop, current_stats); auto& p = mutation_selection.select(program, current_pop, current_stats);
next_pop.get_individuals().emplace_back(std::move(config.mutator.get().apply(program, p))); next_pop.push_back(std::move(config.mutator.get().apply(program, p)));
} }
break; break;
case 2: case 2:
@ -177,12 +111,15 @@ namespace blt::gp
{ {
// reproduction // reproduction
auto& p = reproduction_selection.select(program, current_pop, current_stats); auto& p = reproduction_selection.select(program, current_pop, current_stats);
next_pop.get_individuals().push_back(individual{p}); next_pop.push_back(p);
} }
break; break;
default: default:
#if BLT_DEBUG_LEVEL > 0
BLT_ABORT("This is not possible!"); BLT_ABORT("This is not possible!");
} #else
BLT_UNREACHABLE;
#endif
} }
}; };

View File

@ -54,37 +54,14 @@ namespace blt::gp
blt::size_t total_size_bytes = 0; blt::size_t total_size_bytes = 0;
blt::size_t total_used_bytes = 0; blt::size_t total_used_bytes = 0;
blt::size_t total_remaining_bytes = 0; blt::size_t total_remaining_bytes = 0;
blt::size_t total_no_meta_bytes = 0;
blt::size_t total_dealloc = 0;
blt::size_t total_dealloc_used = 0;
blt::size_t total_dealloc_remaining = 0;
blt::size_t total_dealloc_no_meta = 0;
blt::size_t blocks = 0;
friend std::ostream& operator<<(std::ostream& stream, const size_data_t& data) friend std::ostream& operator<<(std::ostream& stream, const size_data_t& data)
{ {
stream << "["; stream << "[";
stream << data.total_used_bytes << "/"; stream << data.total_used_bytes << " / " << data.total_size_bytes;
stream << data.total_size_bytes << "("; stream << " ("
stream << (static_cast<double>(data.total_used_bytes) / static_cast<double>(data.total_size_bytes) * 100) << "%), "; << (data.total_size_bytes != 0 ? (static_cast<double>(data.total_used_bytes) / static_cast<double>(data.total_size_bytes) *
stream << data.total_used_bytes << "/"; 100) : 0) << "%); space left: " << data.total_remaining_bytes << "]";
stream << data.total_no_meta_bytes << "(";
stream << (static_cast<double>(data.total_used_bytes) / static_cast<double>(data.total_no_meta_bytes) * 100)
<< "%), (empty space: ";
stream << data.total_remaining_bytes << ") blocks: " << data.blocks << " || unallocated space: ";
stream << data.total_dealloc_used << "/";
stream << data.total_dealloc;
if (static_cast<double>(data.total_dealloc) > 0)
stream << "(" << (static_cast<double>(data.total_dealloc_used) / static_cast<double>(data.total_dealloc) * 100) << "%)";
stream << ", ";
stream << data.total_dealloc_used << "/";
stream << data.total_dealloc_no_meta;
if (data.total_dealloc_no_meta > 0)
stream << "(" << (static_cast<double>(data.total_dealloc_used) / static_cast<double>(data.total_dealloc_no_meta * 100))
<< "%)";
stream << ", (empty space: " << data.total_dealloc_remaining << ")]";
return stream; return stream;
} }
}; };
@ -132,6 +109,8 @@ namespace blt::gp
void insert(const stack_allocator& stack) void insert(const stack_allocator& stack)
{ {
if (stack.empty())
return;
if (size_ < stack.bytes_stored + bytes_stored) if (size_ < stack.bytes_stored + bytes_stored)
expand(stack.bytes_stored + bytes_stored); expand(stack.bytes_stored + bytes_stored);
std::memcpy(data_ + bytes_stored, stack.data_, stack.bytes_stored); std::memcpy(data_ + bytes_stored, stack.data_, stack.bytes_stored);
@ -140,6 +119,8 @@ namespace blt::gp
void copy_from(const stack_allocator& stack, blt::size_t bytes) void copy_from(const stack_allocator& stack, blt::size_t bytes)
{ {
if (bytes == 0)
return;
if (size_ < bytes + bytes_stored) if (size_ < bytes + bytes_stored)
expand(bytes + bytes_stored); expand(bytes + bytes_stored);
std::memcpy(data_ + bytes_stored, stack.data_ + (stack.bytes_stored - bytes), bytes); std::memcpy(data_ + bytes_stored, stack.data_ + (stack.bytes_stored - bytes), bytes);
@ -148,6 +129,8 @@ namespace blt::gp
void copy_from(blt::u8* data, blt::size_t bytes) void copy_from(blt::u8* data, blt::size_t bytes)
{ {
if (bytes == 0 || data == nullptr)
return;
if (size_ < bytes + bytes_stored) if (size_ < bytes + bytes_stored)
expand(bytes + bytes_stored); expand(bytes + bytes_stored);
std::memcpy(data_ + bytes_stored, data, bytes); std::memcpy(data_ + bytes_stored, data, bytes);
@ -156,6 +139,8 @@ namespace blt::gp
void copy_to(blt::u8* data, blt::size_t bytes) void copy_to(blt::u8* data, blt::size_t bytes)
{ {
if (bytes == 0 || data == nullptr)
return;
std::memcpy(data, data_ + (bytes_stored - bytes), bytes); std::memcpy(data, data_ + (bytes_stored - bytes), bytes);
} }
@ -174,6 +159,10 @@ namespace blt::gp
static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!"); static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!");
static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!"); static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!");
constexpr auto size = aligned_size(sizeof(NO_REF)); constexpr auto size = aligned_size(sizeof(NO_REF));
#if BLT_DEBUG_LEVEL > 0
if (bytes_stored < size)
BLT_ABORT("Not enough bytes left to pop!");
#endif
bytes_stored -= size; bytes_stored -= size;
return *reinterpret_cast<T*>(data_ + bytes_stored); return *reinterpret_cast<T*>(data_ + bytes_stored);
} }
@ -184,16 +173,31 @@ namespace blt::gp
static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!"); static_assert(std::is_trivially_copyable_v<NO_REF> && "Type must be bitwise copyable!");
static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!"); static_assert(alignof(NO_REF) <= MAX_ALIGNMENT && "Type alignment must not be greater than the max alignment!");
auto size = aligned_size(sizeof(NO_REF)) + bytes; auto size = aligned_size(sizeof(NO_REF)) + bytes;
#if BLT_DEBUG_LEVEL > 0
if (bytes_stored < size)
BLT_ABORT(("Not enough bytes in stack to reference " + std::to_string(size) + " bytes requested but " + std::to_string(bytes) +
" bytes stored!").c_str());
#endif
return *reinterpret_cast<NO_REF*>(data_ + bytes_stored - size); return *reinterpret_cast<NO_REF*>(data_ + bytes_stored - size);
} }
void pop_bytes(blt::size_t bytes) void pop_bytes(blt::size_t bytes)
{ {
#if BLT_DEBUG_LEVEL > 0
if (bytes_stored < bytes)
BLT_ABORT(("Not enough bytes in stack to pop " + std::to_string(bytes) + " bytes requested but " + std::to_string(bytes) +
" bytes stored!").c_str());
#endif
bytes_stored -= bytes; bytes_stored -= bytes;
} }
void transfer_bytes(stack_allocator& to, blt::size_t bytes) void transfer_bytes(stack_allocator& to, blt::size_t bytes)
{ {
#if BLT_DEBUG_LEVEL > 0
if (bytes_stored < bytes)
BLT_ABORT(("Not enough bytes in stack to transfer " + std::to_string(bytes) + " bytes requested but " + std::to_string(bytes) +
" bytes stored!").c_str());
#endif
to.copy_from(*this, aligned_size(bytes)); to.copy_from(*this, aligned_size(bytes));
pop_bytes(bytes); pop_bytes(bytes);
} }
@ -297,7 +301,7 @@ namespace blt::gp
if (!mask_r[index]) if (!mask_r[index])
return; return;
} }
from<NO_REF_T<T>>(offset).drop(); from<NO_REF_T<T >>(offset).drop();
} }
} }

@ -1 +1 @@
Subproject commit 941aa6809c92f05c64ca6624d5898958cfac496d Subproject commit 97990401e2332276b5397060a3ccaf19f07fb999

View File

@ -243,7 +243,7 @@ namespace blt::gp
vals_r.pop_bytes(static_cast<blt::ptrdiff_t>(total_bytes_after + accumulate_type_sizes(begin_itr, end_itr))); vals_r.pop_bytes(static_cast<blt::ptrdiff_t>(total_bytes_after + accumulate_type_sizes(begin_itr, end_itr)));
// insert the new tree then move back the data from after the original mutation point. // insert the new tree then move back the data from after the original mutation point.
vals_r.insert(std::move(new_vals_r)); vals_r.insert(new_vals_r);
vals_r.copy_from(stack_after_data, total_bytes_after); vals_r.copy_from(stack_after_data, total_bytes_after);
auto before = begin_itr - 1; auto before = begin_itr - 1;
@ -252,7 +252,7 @@ namespace blt::gp
// this will check to make sure that the tree is in a correct and executable state. it requires that the evaluation is context free! // this will check to make sure that the tree is in a correct and executable state. it requires that the evaluation is context free!
#if BLT_DEBUG_LEVEL >= 2 #if BLT_DEBUG_LEVEL >= 2
BLT_ASSERT(new_vals_r.empty()); // BLT_ASSERT(new_vals_r.empty());
//BLT_ASSERT(stack_after.empty()); //BLT_ASSERT(stack_after.empty());
blt::size_t bytes_expected = 0; blt::size_t bytes_expected = 0;
auto bytes_size = vals_r.size().total_used_bytes; auto bytes_size = vals_r.size().total_used_bytes;
@ -690,7 +690,7 @@ namespace blt::gp
vals.copy_from(from_ptr, from_bytes); vals.copy_from(from_ptr, from_bytes);
vals.copy_from(after_ptr, after_to_bytes); vals.copy_from(after_ptr, after_to_bytes);
static std::vector<op_container_t> op_copy; static thread_local std::vector<op_container_t> op_copy;
op_copy.clear(); op_copy.clear();
op_copy.insert(op_copy.begin(), ops.begin() + from_child.start, ops.begin() + from_child.end); op_copy.insert(op_copy.begin(), ops.begin() + from_child.start, ops.begin() + from_child.end);

41
thread_branch.txt Normal file
View File

@ -0,0 +1,41 @@
Performance counter stats for './cmake-build-release/blt-symbolic-regression-example' (30 runs):
35,671,860,546 branches ( +- 5.05% ) (20.11%)
130,603,525 branch-misses # 0.37% of all branches ( +- 4.61% ) (20.67%)
43,684,408 cache-misses # 9.61% of all cache refs ( +- 3.08% ) (20.97%)
454,604,804 cache-references ( +- 4.53% ) (21.30%)
72,861,649,501 cycles ( +- 5.33% ) (22.00%)
170,811,735,018 instructions # 2.34 insn per cycle ( +- 5.59% ) (22.84%)
0 alignment-faults
33,002 cgroup-switches ( +- 1.71% )
293,932 faults ( +- 4.09% )
1,130,322,318 ns duration_time ( +- 3.73% )
16,750,942,537 ns user_time ( +- 1.71% )
1,165,192,903 ns system_time ( +- 0.87% )
57,551,179,178 L1-dcache-loads ( +- 5.63% ) (22.36%)
214,283,064 L1-dcache-load-misses # 0.37% of all L1-dcache accesses ( +- 5.58% ) (22.13%)
75,685,527 L1-dcache-prefetches ( +- 7.55% ) (22.07%)
1,115,360,458 L1-icache-loads ( +- 3.91% ) (21.67%)
2,868,754 L1-icache-load-misses # 0.26% of all L1-icache accesses ( +- 3.34% ) (21.34%)
65,107,178 dTLB-loads ( +- 8.94% ) (21.00%)
4,971,480 dTLB-load-misses # 7.64% of all dTLB cache accesses ( +- 3.70% ) (20.90%)
452,351 iTLB-loads ( +- 4.80% ) (20.62%)
1,600,933 iTLB-load-misses # 353.91% of all iTLB cache accesses ( +- 3.68% ) (20.62%)
332,075,460 l2_request_g1.all_no_prefetch ( +- 4.59% ) (20.73%)
293,932 page-faults ( +- 4.09% )
293,928 page-faults:u ( +- 4.09% )
3 page-faults:k ( +- 4.92% )
58,806,652,381 L1-dcache-loads ( +- 5.44% ) (20.61%)
216,591,223 L1-dcache-load-misses # 0.38% of all L1-dcache accesses ( +- 5.39% ) (21.02%)
<not supported> LLC-loads
<not supported> LLC-load-misses
1,059,748,012 L1-icache-loads ( +- 4.29% ) (21.55%)
2,615,017 L1-icache-load-misses # 0.23% of all L1-icache accesses ( +- 3.34% ) (21.85%)
65,917,126 dTLB-loads ( +- 8.89% ) (21.78%)
4,717,351 dTLB-load-misses # 7.25% of all dTLB cache accesses ( +- 3.52% ) (22.05%)
459,796 iTLB-loads ( +- 5.92% ) (21.77%)
1,512,986 iTLB-load-misses # 334.47% of all iTLB cache accesses ( +- 3.64% ) (21.26%)
74,656,433 L1-dcache-prefetches ( +- 7.94% ) (20.50%)
<not supported> L1-dcache-prefetch-misses
1.1303 +- 0.0422 seconds time elapsed ( +- 3.73% )