Skip to content

Commit

Permalink
tweak: improve value bounds
Browse files Browse the repository at this point in the history
  • Loading branch information
Force67 committed May 16, 2024
1 parent 6bbab5f commit 7a9758c
Show file tree
Hide file tree
Showing 4 changed files with 110 additions and 40 deletions.
64 changes: 35 additions & 29 deletions base/allocator/eq_alloc/bucket_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <base/allocator/eq_alloc/bucket_allocator.h>
#include <base/allocator/eq_alloc/eq_allocation_constants.h>
#include <base/math/alignment.h>
#include <base/math/value_bounds.h>

#include <base/threading/lock_guard.h>

Expand All @@ -19,6 +20,12 @@ constexpr i32 kPageAcquireAttempts = 3;
bool IntersectsReasonable(mem_size given, mem_size exepected) {
return given <= base::NextPowerOf2(exepected);
}

bool IsValidAllocation(mem_size size, mem_size alignment) {
return size <= eq_allocation_constants::kBucketThreshold &&
alignment <= eq_allocation_constants::kBucketThreshold &&
(alignment & (alignment - 1)) == 0;
}
} // namespace

BucketAllocator::BucketAllocator(PageTable& t) : page_table_(t) {
Expand All @@ -30,29 +37,23 @@ BucketAllocator::BucketAllocator(PageTable& t) : page_table_(t) {
void* BucketAllocator::Allocate(mem_size requested_size, mem_size alignment) {
// this case should be validated by the memory router.
#if defined(BASE_MEM_CORE_DEBUG)
// cant handle this...
if (requested_size > eq_allocation_constants::kBucketThreshold ||
alignment > eq_allocation_constants::kBucketThreshold)
return nullptr;
if ((alignment & (alignment - 1)) != 0) {
// alignment is not a power of 2
if (!IsValidAllocation(requested_size, alignment)) {
DEBUG_TRAP;
return nullptr;
}
#endif

// align to a sensible boundary
requested_size += sizeof(Bucket);
requested_size = base::Align(requested_size, alignment);
const auto aligned_size =
base::Min(base::NextPowerOf2(requested_size),
eq_allocation_constants::kBucketThreshold);

{
base::NonOwningScopedLockGuard _(lock_);
(void)_;

i32 attempts = 0;
byte* page_hint = nullptr;
base::NonOwningScopedLockGuard _(lock_);
(void)_;
do {
if (void* block = AcquireMemory(requested_size, page_hint))
if (void* block = AcquireMemory(requested_size, aligned_size))
return block;
if (!TryAcquireNewPage(page_table_, page_hint))
attempts++;
Expand Down Expand Up @@ -83,7 +84,7 @@ void* BucketAllocator::ReAllocate(void* former_block,
return nullptr;
}

mem_size former_size = former_bucket->size_;
const mem_size former_size = former_bucket->size();

if (former_size >= new_size) {
// If the former block is large enough, simply return it
Expand All @@ -110,15 +111,17 @@ void* BucketAllocator::ReAllocate(void* former_block,
return new_block;
}

void* BucketAllocator::AcquireMemory(mem_size size, byte* hint) {
void* BucketAllocator::AcquireMemory(mem_size user_size,
mem_size size,
byte* hint) {
// DCHECK(!lock_.held());

if (hint) {
// do something with the hint index
}

byte* page_head = nullptr;
if (Bucket* bucket = FindFreeBucket(size, page_head)) {
if (Bucket* bucket = FindFreeBucket(user_size, size, page_head)) {
// return user memory
return reinterpret_cast<void*>((page_head + sizeof(HeaderNode)) +
bucket->offset_);
Expand Down Expand Up @@ -159,7 +162,7 @@ bool BucketAllocator::DoAnyBucketsIntersect(const PageTag& tag) {
return true;
}
// Update the end of the previous bucket for the next iteration
prev_end_offset = bucket.offset_ + bucket.size_;
prev_end_offset = bucket.offset_ + bucket.size();
}
}

Expand All @@ -168,7 +171,8 @@ bool BucketAllocator::DoAnyBucketsIntersect(const PageTag& tag) {

// https://source.chromium.org/chromium/chromium/src/+/main:base/atomic_sequence_num.h;bpv=1;bpt=1
BucketAllocator::Bucket* BucketAllocator::FindFreeBucket(
mem_size requested_size,
mem_size user_size,
mem_size size, /*aligned size here is the full size*/
byte*& page_start) {
for (base::LinkNode<HeaderNode>* node = page_list_.head();
node != page_list_.end(); node = node->next()) {
Expand All @@ -180,7 +184,7 @@ BucketAllocator::Bucket* BucketAllocator::FindFreeBucket(
if (tag.bucket_count == 0) {
Bucket* free_bucket = new (
page_end - (sizeof(Bucket) * (node->value()->tag.bucket_count + 1)))
Bucket(/*offset*/ 0, /*size*/ requested_size,
Bucket(/*offset*/ 0, user_size, size,
/*flags*/ Bucket::kUsed);
node->value()->tag.bucket_count++;
return free_bucket;
Expand All @@ -192,11 +196,12 @@ BucketAllocator::Bucket* BucketAllocator::FindFreeBucket(
for (mem_size i = 0; i < tag.bucket_count; i++) {
Bucket* buck =
reinterpret_cast<Bucket*>(page_end - (sizeof(Bucket) * (i + 1)));
if ((buck->flags_ & Bucket::kReleased) && buck->size_ >= requested_size) {
if ((buck->IsFree()) && buck->size() >= size) {
buck->offset_ =
buck->offset_; // ye i know, we might be smarter to realloc.. idk
buck->flags_ = Bucket::kUsed;
buck->size_ = static_cast<u16>(requested_size);
buck->SetUsed();
buck->SetSize(size);
buck->SetUserSize(user_size);
return buck;
}
}
Expand All @@ -209,15 +214,16 @@ BucketAllocator::Bucket* BucketAllocator::FindFreeBucket(
// get the last metadata entry:
Bucket* last_buck = reinterpret_cast<Bucket*>(
page_end - (sizeof(Bucket) * (node->value()->tag.bucket_count)));
u64 last_size = data_end - (data_start + last_buck->offset_ + last_buck->size_);
if (last_size < requested_size) {
u64 last_size =
data_end - (data_start + last_buck->offset_ + last_buck->size());
if (last_size < size) {
DEBUG_TRAP;
return nullptr;
}

auto offset = last_buck->offset_ + last_buck->size_;
auto offset = last_buck->offset_ + last_buck->size();
Bucket* free_bucket =
new (data_end) Bucket(/*offset*/ offset, /*size*/ requested_size,
new (data_end) Bucket(/*offset*/ offset, user_size, size,
/*flags*/ Bucket::kUsed);
node->value()->tag.bucket_count++;
return free_bucket;
Expand All @@ -244,7 +250,7 @@ BucketAllocator::Bucket* BucketAllocator::FindBucket(pointer_size address) {
if (address >=
(reinterpret_cast<pointer_size>(page_start) + buck->offset_) &&
address < (reinterpret_cast<pointer_size>(page_start) +
(buck->offset_ + buck->size_))) {
(buck->offset_ + buck->size()))) {
return buck;
}
}
Expand All @@ -254,8 +260,8 @@ BucketAllocator::Bucket* BucketAllocator::FindBucket(pointer_size address) {

mem_size BucketAllocator::Free(void* pointer) {
if (Bucket* b = FindBucket(reinterpret_cast<pointer_size>(pointer))) {
const auto size = b->size_;
b->flags_ = Bucket::kReleased;
const auto size = b->user_size(); // we do a little bit of trolling here
b->SetFree();
return size;
}
DCHECK(false, "BucketAllocator::Free(): Failed to release memory");
Expand Down
49 changes: 38 additions & 11 deletions base/allocator/eq_alloc/bucket_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,27 +26,54 @@ class BucketAllocator final : public Allocator {
mem_size Free(void* block) override;

private:
void* AcquireMemory(mem_size size, byte* hint = nullptr);
void* AcquireMemory(mem_size user_size,
mem_size size,
byte* hint = nullptr);

private:
PageTable& page_table_;
base::SpinningMutex lock_;

struct Bucket {
u32 offset_{0}; // offset starting from page_base
u16 size_{0};

enum Flags : u16 {
kNone,
kReleased = 1 << 1,
kUsed = 1 << 2,
};
u16 flags_{kUsed};
union Value {
struct {
// 11 bits to store a value up to 1024, max
// would be 2047
u32 user_size : 11; // this is the allocation without alignment
u32 aligned_size : 11; // the actual size of the allocation
u32 flags : 10;
};
u32 raw;

Value() : raw(0) {}
explicit Value(uint32_t val) : raw(val) {}
};
Value value_;

explicit Bucket(u32 offset, u16 user_size, u16 size, Flags flags)
: offset_(offset) {
value_.user_size = user_size;
value_.aligned_size = size;
value_.flags = flags;
}

inline bool IsinUse() const { return value_.flags & Flags::kUsed; }
inline bool IsFree() const { return value_.flags & Flags::kReleased; }
inline void SetFree() { value_.flags = Flags::kReleased; }
inline void SetUsed() { value_.flags = Flags::kUsed; }

explicit Bucket(u32 offset, u16 size, Flags flags)
: offset_(offset), size_(size), flags_(flags) {}
inline void SetUserSize(u32 size) { value_.user_size = size; }
inline void SetSize(u32 size) { value_.aligned_size = size; }

inline bool IsinUse() const { return flags_ & Flags::kUsed; }
inline u32 user_size() const { return value_.user_size; }
inline u32 size() const { return value_.aligned_size; }
};
static_assert(sizeof(Bucket) == sizeof(pointer_size), "Bucket is too fat");

Expand Down Expand Up @@ -88,10 +115,10 @@ class BucketAllocator final : public Allocator {
struct PageTag {
base::Atomic<mem_size> ref_count; // TODO: impl it
base::Atomic<mem_size> bucket_count;
mem_size
page_size; // not really needed atm since we know the size is always 65k,
// but if we wanna go for a hybrid model, it might be worth it
//
mem_size page_size; // not really needed atm since we know the size is
// always 65k, but if we wanna go for a hybrid model,
// it might be worth it
//

// better dont ask. we need to do it since we loose 16 bytes to our ancestor
// aswell
Expand Down Expand Up @@ -132,6 +159,6 @@ class BucketAllocator final : public Allocator {
}

// void TakeMemoryChunk(Bucket&, uint8_t* start_hint, mem_size req_size);
Bucket* FindFreeBucket(mem_size requested_size, byte*&);
Bucket* FindFreeBucket(mem_size actual_size, mem_size aligned_size, byte*&);
};
} // namespace base
4 changes: 4 additions & 0 deletions base/math/alignment.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,16 @@ constexpr inline T Align(const T n, const T to) {
// multiple functions gets inlined.
return ((n + to - 1) & ~(to - 1U));
}
static_assert(Align(69, 1024) == 1024, "Alignment failed");

// align to skew
template <typename T>
constexpr inline T Align(const T n, const T to, T skew) {
skew %= to;
return ((n + to - 1 - skew) & ~(to - 1U)) + skew;
}
static_assert(Align(69, 1024, 1) == 1025, "Alignment failed");


template <typename T>
inline pointer_size AlignAddress(const void* address, T align) {
Expand All @@ -57,4 +60,5 @@ template <typename T>
constexpr inline bool IsAligned(T lhs, const mem_size byte_size) {
return byte_size % lhs == 0;
}
static_assert(IsAligned(4, 16), "Alignment failed");
} // namespace base
33 changes: 33 additions & 0 deletions base/math/value_bounds.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// Copyright (C) 2024 Vincent Hengel.
// For licensing information see LICENSE at the root of this distribution.
#pragma once

namespace base {
template <typename T>
inline T Min(T a, T b) {
return (a < b) ? a : b;
}

template <typename T>
inline T Max(T a, T b) {
return (a > b) ? a : b;
}

template <typename T>
inline T Min(const T* values, int count) {
T min_val = values[0];
for (int i = 1; i < count; i++) {
min_val = (values[i] < min_val) ? values[i] : min_val;
}
return min_val;
}

template <typename T>
inline T Max(const T* values, int count) {
T max_val = values[0];
for (int i = 1; i < count; i++) {
max_val = (values[i] > max_val) ? values[i] : max_val;
}
return max_val;
}
} // namespace base

0 comments on commit 7a9758c

Please sign in to comment.