Skip to content

Commit

Permalink
More WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
mjp41 committed Feb 24, 2025
1 parent c12afa6 commit 22343cd
Showing 1 changed file with 70 additions and 58 deletions.
128 changes: 70 additions & 58 deletions src/snmalloc/mem/corealloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -831,8 +831,8 @@ namespace snmalloc
return meta->return_objects(length);
}

template<ZeroMem zero_mem>
SNMALLOC_SLOW_PATH capptr::Alloc<void>
template<ZeroMem zero_mem, typename CheckInit>
SNMALLOC_FAST_PATH capptr::Alloc<void>
small_alloc(smallsizeclass_t sizeclass, freelist::Iter<>& fast_free_list)
{
void* result = SecondaryAllocator::allocate(
Expand All @@ -842,7 +842,18 @@ namespace snmalloc
});

if (result != nullptr)
return capptr::Alloc<void>::unsafe_from(result);
{
if constexpr (zero_mem == YesZero)
Config::Pal::zero(result, sizeclass_to_size(sizeclass));

return CheckInit::check_init(
[result]() { return capptr::Alloc<void>::unsafe_from(result); },
[](Allocator*, void* result) {
return capptr::Alloc<void>::unsafe_from(result);
},
result);
}

// Look to see if we can grab a free list.
auto& sl = alloc_classes[sizeclass].available;
if (SNMALLOC_LIKELY(alloc_classes[sizeclass].length > 0))
Expand All @@ -853,7 +864,8 @@ namespace snmalloc
if (SNMALLOC_UNLIKELY(alloc_classes[sizeclass].length == 1))
{
if (entropy.next_bit() == 0)
return small_alloc_slow<zero_mem>(sizeclass, fast_free_list);
return small_alloc_slow<zero_mem, CheckInit>(
sizeclass, fast_free_list);
}
}

Expand Down Expand Up @@ -884,7 +896,7 @@ namespace snmalloc
auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
return ticker.check_tick(r);
}
return small_alloc_slow<zero_mem>(sizeclass, fast_free_list);
return small_alloc_slow<zero_mem, CheckInit>(sizeclass, fast_free_list);
}

/**
Expand All @@ -905,57 +917,66 @@ namespace snmalloc
}
}

template<ZeroMem zero_mem>
template<ZeroMem zero_mem, typename CheckInit>
SNMALLOC_SLOW_PATH capptr::Alloc<void> small_alloc_slow(
smallsizeclass_t sizeclass, freelist::Iter<>& fast_free_list)
{
size_t rsize = sizeclass_to_size(sizeclass);
return CheckInit::check_init(
[this, sizeclass, &fast_free_list]() -> capptr::Alloc<void> {
size_t rsize = sizeclass_to_size(sizeclass);

// No existing free list get a new slab.
size_t slab_size = sizeclass_to_slab_size(sizeclass);
// No existing free list get a new slab.
size_t slab_size = sizeclass_to_slab_size(sizeclass);

#ifdef SNMALLOC_TRACING
message<1024>("small_alloc_slow rsize={} slab size={}", rsize, slab_size);
message<1024>(
"small_alloc_slow rsize={} slab size={}", rsize, slab_size);
#endif

auto [slab, meta] = Config::Backend::alloc_chunk(
get_backend_local_state(),
slab_size,
PagemapEntry::encode(
public_state(), sizeclass_t::from_small_class(sizeclass)),
sizeclass_t::from_small_class(sizeclass));
auto [slab, meta] = Config::Backend::alloc_chunk(
get_backend_local_state(),
slab_size,
PagemapEntry::encode(
public_state(), sizeclass_t::from_small_class(sizeclass)),
sizeclass_t::from_small_class(sizeclass));

if (slab == nullptr)
{
return nullptr;
}
if (slab == nullptr)
{
return nullptr;
}

// Set meta slab to empty.
meta->initialise(
sizeclass, address_cast(slab), freelist::Object::key_root);
// Set meta slab to empty.
meta->initialise(
sizeclass, address_cast(slab), freelist::Object::key_root);

// Build a free list for the slab
alloc_new_list(slab, meta, rsize, slab_size, entropy);
// Build a free list for the slab
alloc_new_list(slab, meta, rsize, slab_size, entropy);

auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Config>(backend_state_ptr(), p);
};
auto [p, still_active] = BackendSlabMetadata::alloc_free_list(
domesticate, meta, fast_free_list, entropy, sizeclass);
auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Config>(backend_state_ptr(), p);
};
auto [p, still_active] = BackendSlabMetadata::alloc_free_list(
domesticate, meta, fast_free_list, entropy, sizeclass);

if (still_active)
{
alloc_classes[sizeclass].length++;
alloc_classes[sizeclass].available.insert(meta);
}
else
{
laden.insert(meta);
}
if (still_active)
{
alloc_classes[sizeclass].length++;
alloc_classes[sizeclass].available.insert(meta);
}
else
{
laden.insert(meta);
}

auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
return ticker.check_tick(r);
auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
return ticker.check_tick(r);
},
[](Allocator* a, smallsizeclass_t sizeclass) {
return a->small_alloc<zero_mem, CheckInitNoOp>(
sizeclass_to_size(sizeclass));
},
sizeclass);
}

template<ZeroMem zero_mem, typename Slowpath, typename Domesticator>
Expand Down Expand Up @@ -1136,23 +1157,14 @@ namespace snmalloc
return finish_alloc<zero_mem, Config>(p, sizeclass);
}

return CheckInit::check_init(
[this, sizeclass, fl]() -> capptr::Alloc<void> {
return handle_message_queue(
[](
Allocator* alloc,
smallsizeclass_t sizeclass,
freelist::Iter<>* fl) -> capptr::Alloc<void> {
return alloc->small_alloc<zero_mem>(sizeclass, *fl);
},
this,
sizeclass,
fl);
},
[](Allocator* a, size_t size) {
return a->template small_alloc<zero_mem, CheckInitNoOp>(size);
return handle_message_queue(
[](Allocator* alloc, smallsizeclass_t sizeclass, freelist::Iter<>* fl)
-> capptr::Alloc<void> {
return alloc->small_alloc<zero_mem, CheckInit>(sizeclass, *fl);
},
size);
this,
sizeclass,
fl);
}

/**
Expand Down

0 comments on commit 22343cd

Please sign in to comment.