prevent pop functions from deallocating unless they need to

thread
Brett 2024-07-13 00:29:41 -04:00
parent 034071dae3
commit e6ec71da1d
4 changed files with 1386 additions and 14 deletions

View File

@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.25) cmake_minimum_required(VERSION 3.25)
project(blt-gp VERSION 0.0.69) project(blt-gp VERSION 0.0.70)
include(CTest) include(CTest)

1374
dhat.out.293761 Normal file

File diff suppressed because it is too large Load Diff

View File

@ -36,8 +36,8 @@ blt::gp::prog_config_t config = blt::gp::prog_config_t()
.set_initial_max_tree_size(6) .set_initial_max_tree_size(6)
.set_elite_count(0) .set_elite_count(0)
.set_max_generations(50) .set_max_generations(50)
.set_pop_size(500) .set_pop_size(5000)
.set_thread_count(1); .set_thread_count(0);
blt::gp::type_provider type_system; blt::gp::type_provider type_system;
blt::gp::gp_program program{type_system, SEED, config}; blt::gp::gp_program program{type_system, SEED, config};

View File

@ -73,15 +73,14 @@ namespace blt::gp
if (head->used_bytes_in_block() < static_cast<blt::ptrdiff_t>(aligned_size<T>())) if (head->used_bytes_in_block() < static_cast<blt::ptrdiff_t>(aligned_size<T>()))
throw std::runtime_error((std::string("Mismatched Types! Not enough space left in block! Bytes: ") += std::to_string( throw std::runtime_error((std::string("Mismatched Types! Not enough space left in block! Bytes: ") += std::to_string(
head->used_bytes_in_block()) += " Size: " + std::to_string(sizeof(T))).c_str()); head->used_bytes_in_block()) += " Size: " + std::to_string(sizeof(T))).c_str());
if (head->used_bytes_in_block() == 0)
move_back();
// make copy // make copy
T t = *reinterpret_cast<T*>(head->metadata.offset - TYPE_SIZE); T t = *reinterpret_cast<T*>(head->metadata.offset - TYPE_SIZE);
// call destructor // call destructor
reinterpret_cast<T*>(head->metadata.offset - TYPE_SIZE)->~T(); reinterpret_cast<T*>(head->metadata.offset - TYPE_SIZE)->~T();
// move offset back
head->metadata.offset -= TYPE_SIZE; head->metadata.offset -= TYPE_SIZE;
if (head->used_bytes_in_block() == 0)
{
move_back();
}
return t; return t;
} }
@ -140,9 +139,9 @@ namespace blt::gp
if (diff <= 0) if (diff <= 0)
{ {
bytes -= head->used_bytes_in_block(); bytes -= head->used_bytes_in_block();
move_back();
if (diff == 0) if (diff == 0)
break; break;
move_back();
} else } else
{ {
// otherwise update the offset pointer // otherwise update the offset pointer
@ -164,13 +163,15 @@ namespace blt::gp
throw std::runtime_error("This stack is empty!"); throw std::runtime_error("This stack is empty!");
if (head->used_bytes_in_block() < static_cast<blt::ptrdiff_t>(bytes)) if (head->used_bytes_in_block() < static_cast<blt::ptrdiff_t>(bytes))
BLT_ABORT("This stack doesn't contain enough data for this type! This is an invalid runtime state!"); BLT_ABORT("This stack doesn't contain enough data for this type! This is an invalid runtime state!");
if (head->used_bytes_in_block() == 0)
move_back();
auto type_size = aligned_size(bytes); auto type_size = aligned_size(bytes);
auto ptr = to.allocate_bytes(bytes); auto ptr = to.allocate_bytes(bytes);
to.head->metadata.offset = static_cast<blt::u8*>(ptr) + type_size; to.head->metadata.offset = static_cast<blt::u8*>(ptr) + type_size;
std::memcpy(ptr, head->metadata.offset - type_size, type_size); std::memcpy(ptr, head->metadata.offset - type_size, type_size);
head->metadata.offset -= type_size; head->metadata.offset -= type_size;
if (head->used_bytes_in_block() == 0)
move_back();
} }
template<typename... Args> template<typename... Args>
@ -218,10 +219,7 @@ namespace blt::gp
stack_allocator() = default; stack_allocator() = default;
// it should be possible to remove the complex copy contrusctor along with trasnfer functions // TODO: cleanup this allocator!
// simply keep track of the start of the stack, aloing with the current pointer and never dealloacted
// it adds another 8 bytes to each block but should prevent the need for copying when you can just reset the stack.
// (read copy)
// if you keep track of type size information you can memcpy between stack allocators as you already only allow trivially copyable types // if you keep track of type size information you can memcpy between stack allocators as you already only allow trivially copyable types
stack_allocator(const stack_allocator& copy) stack_allocator(const stack_allocator& copy)
{ {