Skip to content

Commit

Permalink
Pass size to upb_alloc when freeing an arena
Browse files Browse the repository at this point in the history
The API of upb_alloc_func implies this would already be the case, and this is useful for calling `free_sized` in the future, which can be faster for some allocators as it allows skipping a bucket lookup.

PiperOrigin-RevId: 713536794
  • Loading branch information
protobuf-github-bot authored and copybara-github committed Jan 9, 2025
1 parent 61be835 commit 66de8e7
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 12 deletions.
1 change: 1 addition & 0 deletions upb/mem/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ cc_test(
"//upb:port",
"@com_google_absl//absl/base:core_headers",
"@com_google_absl//absl/cleanup",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/random",
"@com_google_absl//absl/random:distributions",
"@com_google_absl//absl/synchronization",
Expand Down
5 changes: 5 additions & 0 deletions upb/mem/alloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ UPB_INLINE void upb_free(upb_alloc* alloc, void* ptr) {
alloc->func(alloc, ptr, 0, 0);
}

UPB_INLINE void upb_free_sized(upb_alloc* alloc, void* ptr, size_t size) {
UPB_ASSERT(alloc);
alloc->func(alloc, ptr, size, 0);
}

// The global allocator used by upb. Uses the standard malloc()/free().

extern upb_alloc upb_alloc_global;
Expand Down
25 changes: 13 additions & 12 deletions upb/mem/arena.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ void upb_Arena_SetMaxBlockSize(size_t max) {

typedef struct upb_MemBlock {
struct upb_MemBlock* next;
uint32_t size;
size_t size;
// Data follows.
} upb_MemBlock;

Expand Down Expand Up @@ -283,17 +283,18 @@ uint32_t upb_Arena_DebugRefCount(const upb_Arena* a) {
return (uint32_t)_upb_Arena_RefCountFromTagged(tagged);
}

static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t available,
size_t block_size) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
upb_MemBlock* block = ptr;

block->size = (uint32_t)size;
block->size = block_size;
// Insert into linked list.
block->next = ai->blocks;
ai->blocks = block;

a->UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, available, char);

UPB_POISON_MEMORY_REGION(a->UPB_PRIVATE(ptr),
a->UPB_PRIVATE(end) - a->UPB_PRIVATE(ptr));
Expand Down Expand Up @@ -323,7 +324,7 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
upb_malloc(_upb_ArenaInternal_BlockAlloc(ai), block_size);

if (!block) return false;
_upb_Arena_AddBlock(a, block, block_size);
_upb_Arena_AddBlock(a, block, block_size, block_size);
// Atomic add not required here, as threads won't race allocating blocks, plus
// atomic fetch-add is slower than load/add/store on arm devices compiled
// targetting pre-v8.1. Relaxed order is safe as nothing depends on order of
Expand All @@ -349,25 +350,25 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {

// We need to malloc the initial block.
char* mem;
size_t n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
size_t block_size = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, block_size))) {
return NULL;
}

a = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), upb_ArenaState);
n -= sizeof(upb_ArenaState);
size_t available = block_size - sizeof(upb_ArenaState);
a = UPB_PTR_AT(mem, available, upb_ArenaState);

a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->body.next, NULL);
upb_Atomic_Init(&a->body.previous_or_tail,
_upb_Arena_TaggedFromTail(&a->body));
upb_Atomic_Init(&a->body.space_allocated, n);
upb_Atomic_Init(&a->body.space_allocated, block_size);
a->body.blocks = NULL;
a->body.upb_alloc_cleanup = NULL;
UPB_TSAN_INIT_PUBLISHED(&a->body);

_upb_Arena_AddBlock(&a->head, mem, n);
_upb_Arena_AddBlock(&a->head, mem, available, block_size);

return &a->head;
}
Expand Down Expand Up @@ -435,7 +436,7 @@ static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
while (block != NULL) {
// Load first since we are deleting block.
upb_MemBlock* next_block = block->next;
upb_free(block_alloc, block);
upb_free_sized(block_alloc, block, block->size);
block = next_block;
}
if (alloc_cleanup != NULL) {
Expand Down
41 changes: 41 additions & 0 deletions upb/mem/arena_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,14 @@
#include <cstdlib>
#include <memory>
#include <thread>
#include <type_traits>
#include <vector>

#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/synchronization/mutex.h"
Expand Down Expand Up @@ -69,6 +71,45 @@ TEST(ArenaTest, ArenaWithAllocCleanup) {
EXPECT_TRUE(alloc.ran_cleanup);
}

struct SizeTracker {
upb_alloc alloc;
upb_alloc* delegate_alloc;
absl::flat_hash_map<void*, size_t>* sizes;
};

static_assert(std::is_standard_layout<SizeTracker>());

static void* size_checking_allocfunc(upb_alloc* alloc, void* ptr,
size_t oldsize, size_t size) {
SizeTracker* size_alloc = reinterpret_cast<SizeTracker*>(alloc);
void* result = size_alloc->delegate_alloc->func(alloc, ptr, oldsize, size);
if (ptr != nullptr) {
UPB_ASSERT(size_alloc->sizes->at(ptr) == oldsize);
size_alloc->sizes->erase(ptr);
}
if (result != nullptr) {
size_alloc->sizes->emplace(result, size);
}
return result;
}

TEST(ArenaTest, SizedFree) {
absl::flat_hash_map<void*, size_t> sizes;
SizeTracker alloc;
alloc.alloc.func = size_checking_allocfunc;
alloc.delegate_alloc = &upb_alloc_global;
alloc.sizes = &sizes;

upb_Arena* arena = upb_Arena_Init(nullptr, 0, &alloc.alloc);
(void)upb_Arena_Malloc(arena, 500);
void* to_resize = upb_Arena_Malloc(arena, 2000);
void* resized = upb_Arena_Realloc(arena, to_resize, 2000, 4000);
upb_Arena_ShrinkLast(arena, resized, 4000, 1);
EXPECT_GT(sizes.size(), 0);
upb_Arena_Free(arena);
EXPECT_EQ(sizes.size(), 0);
}

TEST(ArenaTest, ArenaFuse) {
upb_Arena* arena1 = upb_Arena_New();
upb_Arena* arena2 = upb_Arena_New();
Expand Down

0 comments on commit 66de8e7

Please sign in to comment.