aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2022-01-12 18:41:33 -0500
committerDavid Robillard <d@drobilla.net>2022-01-28 21:57:29 -0500
commit046f7307653a233199ed4215a1e535b598566374 (patch)
tree2a1ae3a6d07498f09c79bf2c138121138eb2f104
parentc02d28085a1f81b542df62fe97a530bb6cbce86d (diff)
downloadserd-046f7307653a233199ed4215a1e535b598566374.tar.gz
serd-046f7307653a233199ed4215a1e535b598566374.tar.bz2
serd-046f7307653a233199ed4215a1e535b598566374.zip
[TESTED] Use a custom allocator to simplify SerdNodes implementation
Also reduces the amount of code duplication in general, since the "measure with the constructor then allocate and actually construct" pattern (more or less) only needs to be implemented for a given node type in one place.
-rw-r--r--src/nodes.c266
1 files changed, 137 insertions, 129 deletions
diff --git a/src/nodes.c b/src/nodes.c
index 84c6b696..c5da3cd3 100644
--- a/src/nodes.c
+++ b/src/nodes.c
@@ -1,5 +1,5 @@
/*
- Copyright 2011-2020 David Robillard <d@drobilla.net>
+ Copyright 2011-2022 David Robillard <d@drobilla.net>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
@@ -75,9 +75,65 @@ typedef struct {
char body[MAX_STATIC_NODE_SIZE];
} StaticNode;
+/**
+ Allocator for allocating entries in the node hash table.
+
+ This allocator implements only the methods used by the serd_node_*
+ constructors, and transparently increases the allocation size so there is
+ room for an extra NodesEntryHead at the start. This allows the serd_node_*
+ constructors to be re-used here, even though the table stores entries (nodes
+ with an extra header) rather than node pointers directly.
+*/
+typedef struct {
+ SerdAllocator base; ///< Implementation of SerdAllocator (base "class")
+ SerdAllocator* real; ///< Underlying "real" memory allocator
+} SerdNodesEntryAllocator;
+
+SERD_MALLOC_FUNC
+static void*
+serd_nodes_entry_aligned_alloc(SerdAllocator* const allocator,
+ const size_t alignment,
+ const size_t size)
+{
+ SerdAllocator* const real = ((SerdNodesEntryAllocator*)allocator)->real;
+
+ void* const ptr =
+ real->aligned_alloc(real, alignment, sizeof(NodesEntryHead) + size);
+
+ return ptr ? (((uint8_t*)ptr) + sizeof(NodesEntryHead)) : NULL;
+}
+
+static void
+serd_nodes_entry_aligned_free(SerdAllocator* const allocator, void* const ptr)
+{
+ SerdAllocator* const real = ((SerdNodesEntryAllocator*)allocator)->real;
+
+ if (ptr) {
+ real->aligned_free(real, (((uint8_t*)ptr) - sizeof(NodesEntryHead)));
+ }
+}
+
+static SerdNodesEntryAllocator
+serd_nodes_entry_allocator(SerdAllocator* const real)
+{
+ const SerdNodesEntryAllocator entry_allocator = {
+ {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ serd_nodes_entry_aligned_alloc,
+ serd_nodes_entry_aligned_free,
+ },
+ real ? real : serd_default_allocator(),
+ };
+
+ return entry_allocator;
+}
+
struct SerdNodesImpl {
- SerdAllocator* allocator;
- ZixHash* hash;
+ SerdNodesEntryAllocator allocator;
+ ZixHash* hash;
};
static const StaticNode empty_static_node = {{0u, 0u, SERD_LITERAL}, {'\0'}};
@@ -185,17 +241,10 @@ nodes_equal(const SerdNode* const a, const SerdNode* const b)
return true;
}
-static NodesEntry*
-new_entry(SerdAllocator* const allocator, const size_t node_size)
+static void
+free_entry(SerdNodes* const nodes, NodesEntry* const entry)
{
- NodesEntry* const entry = (NodesEntry*)serd_aaligned_calloc(
- allocator, serd_node_align, sizeof(NodesEntryHead) + node_size);
-
- if (entry) {
- entry->head.refs = 1u;
- }
-
- return entry;
+ serd_aaligned_free(&nodes->allocator.base, &entry->node);
}
SerdNodes*
@@ -205,7 +254,7 @@ serd_nodes_new(SerdAllocator* const allocator)
(SerdNodes*)serd_acalloc(allocator, 1, sizeof(SerdNodes));
if (nodes) {
- nodes->allocator = allocator;
+ nodes->allocator = serd_nodes_entry_allocator(allocator);
if (!(nodes->hash = zix_hash_new(
(ZixAllocator*)allocator, nodes_key, nodes_hash, nodes_equal))) {
@@ -224,11 +273,11 @@ serd_nodes_free(SerdNodes* nodes)
for (ZixHashIter i = zix_hash_begin(nodes->hash);
i != zix_hash_end(nodes->hash);
i = zix_hash_next(nodes->hash, i)) {
- serd_aaligned_free(nodes->allocator, zix_hash_get(nodes->hash, i));
+ free_entry(nodes, (NodesEntry*)zix_hash_get(nodes->hash, i));
}
zix_hash_free(nodes->hash);
- serd_afree(nodes->allocator, nodes);
+ serd_afree(nodes->allocator.real, nodes);
}
}
@@ -256,13 +305,14 @@ serd_nodes_intern(SerdNodes* nodes, const SerdNode* node)
return &existing->node;
}
- const size_t node_size = serd_node_total_size(node);
- NodesEntry* const entry = new_entry(nodes->allocator, node_size);
- if (!entry) {
+ SerdNode* const new_node = serd_node_copy(&nodes->allocator.base, node);
+ if (!new_node) {
return NULL;
}
- memcpy(&entry->node, node, node_size);
+ NodesEntry* const entry = (NodesEntry*)(new_node - 1u);
+
+ entry->head.refs = 1u;
// Insert the entry (blissfully ignoring a failed hash size increase)
zix_hash_insert_at(nodes->hash, plan, entry);
@@ -284,31 +334,59 @@ serd_nodes_get(const SerdNodes* const nodes, const SerdNode* const node)
}
static const SerdNode*
-serd_nodes_manage_entry(SerdNodes* const nodes, NodesEntry* const entry)
+serd_nodes_manage_entry_at(SerdNodes* const nodes,
+ NodesEntry* const entry,
+ const ZixHashInsertPlan plan)
{
- if (!entry) {
+ assert(nodes);
+ assert(entry);
+
+ entry->head.refs = 1u;
+
+ // Insert the entry (blissfully ignoring a failed hash size increase)
+ if (zix_hash_insert_at(nodes->hash, plan, entry)) {
+ free_entry(nodes, entry);
return NULL;
}
- const SerdNode* const node = &entry->node;
+ return &entry->node;
+}
+
+static const SerdNode*
+serd_nodes_manage_entry_node_at(SerdNodes* const nodes,
+ SerdNode* const node,
+ const ZixHashInsertPlan plan)
+{
+ if (!node) {
+ return NULL;
+ }
+
+ NodesEntry* const entry = (NodesEntry*)(node - 1u);
+
+ return serd_nodes_manage_entry_at(nodes, entry, plan);
+}
+
+static const SerdNode*
+serd_nodes_manage_entry_node(SerdNodes* const nodes, SerdNode* const node)
+{
+ if (!node) {
+ return NULL;
+ }
+
+ NodesEntry* const entry = (NodesEntry*)(node - 1u);
const ZixHashInsertPlan plan = zix_hash_plan_insert(nodes->hash, node);
NodesEntry* const existing = zix_hash_record_at(nodes->hash, plan);
if (existing) {
assert(serd_node_equals(&existing->node, node));
assert(nodes_hash(&existing->node) == plan.code);
++existing->head.refs;
- serd_aaligned_free(nodes->allocator, entry);
+ free_entry(nodes, entry);
return &existing->node;
}
- // Insert the entry (or fail and free it on a failed hash size increase)
- if (zix_hash_insert_at(nodes->hash, plan, entry)) {
- serd_aaligned_free(nodes->allocator, entry);
- return NULL;
- }
-
assert(nodes_hash(&entry->node) == plan.code);
- return &entry->node;
+
+ return serd_nodes_manage_entry_at(nodes, entry, plan);
}
const SerdNode*
@@ -316,11 +394,15 @@ serd_nodes_token(SerdNodes* const nodes,
const SerdNodeType type,
const SerdStringView string)
{
- const NodeSpec key = token_spec(type, string);
- const ZixHashCode code = spec_hash(key);
+ // Calculate a hash code for the token without actually constructing it
+ const NodeSpec key = token_spec(type, string);
+ const ZixHashCode code = spec_hash(key);
+
+ // Find an insert position in the hash table
const ZixHashInsertPlan plan =
zix_hash_plan_insert_prehashed(nodes->hash, code, node_equals_spec, &key);
+ // If we found an existing node, bump its reference count and return it
NodesEntry* const existing = zix_hash_record_at(nodes->hash, plan);
if (existing) {
assert(nodes_hash(&existing->node) == code);
@@ -328,30 +410,11 @@ serd_nodes_token(SerdNodes* const nodes,
return &existing->node;
}
- const size_t padded_length = serd_node_pad_length(string.len);
- const size_t node_size = sizeof(SerdNode) + padded_length;
- NodesEntry* const entry = new_entry(nodes->allocator, node_size);
- SerdNode* const node = entry ? &entry->node : NULL;
- if (!node) {
- return NULL;
- }
-
- // Construct the token directly into the node in the new entry
- const SerdWriteResult r =
- serd_node_construct_token(node_size, &entry->node, type, string);
-
- assert(!r.status); // Never fails with sufficient space
- (void)r;
-
- // Insert the entry (blissfully ignoring a failed hash size increase)
- if (zix_hash_insert_at(nodes->hash, plan, entry)) {
- serd_aaligned_free(nodes->allocator, entry);
- return NULL;
- }
+ // Otherwise, allocate and manage a new one
+ SerdAllocator* const alloc = &nodes->allocator.base;
+ SerdNode* const node = serd_new_token(alloc, type, string);
- assert(nodes_hash(node) == code);
-
- return node;
+ return serd_nodes_manage_entry_node_at(nodes, node, plan);
}
const SerdNode*
@@ -376,28 +439,11 @@ serd_nodes_literal(SerdNodes* const nodes,
return &existing->node;
}
- // We need to insert a new entry, so determine how much space the node needs
- SerdWriteResult r = serd_node_construct_literal(0, NULL, string, flags, meta);
- if (r.status != SERD_OVERFLOW) {
- return NULL;
- }
-
- // Allocate a new entry with enough space for the node
- NodesEntry* const entry = new_entry(nodes->allocator, r.count);
- SerdNode* const node = entry ? &entry->node : NULL;
-
- if (node) {
- // Construct the literal directly into the node in the new entry
- r = serd_node_construct_literal(r.count, node, string, flags, meta);
- assert(!r.status);
- (void)r;
+ // Otherwise, allocate and manage a new one
+ SerdAllocator* const alloc = &nodes->allocator.base;
+ SerdNode* const node = serd_new_literal(alloc, string, flags, meta);
- // Insert the entry (blissfully ignoring a failed hash size increase)
- zix_hash_insert_at(nodes->hash, plan, entry);
- assert(nodes_hash(node) == code);
- }
-
- return node;
+ return serd_nodes_manage_entry_node_at(nodes, node, plan);
}
const SerdNode*
@@ -458,19 +504,10 @@ serd_nodes_hex(SerdNodes* const nodes,
a performance issue. More ambitiously, adding support for binary nodes
like a Real Database(TM) would largely avoid this problem. */
- // Determine how much space the node needs
- SerdWriteResult r = serd_node_construct_hex(0, NULL, value_size, value);
-
- // Allocate a new entry to and construct the node into it
- NodesEntry* const entry = new_entry(nodes->allocator, r.count);
- if (entry) {
- r = serd_node_construct_hex(r.count, &entry->node, value_size, value);
-
- assert(!r.status);
- (void)r;
- }
+ SerdAllocator* const alloc = &nodes->allocator.base;
+ SerdNode* const node = serd_new_hex(alloc, value, value_size);
- return serd_nodes_manage_entry(nodes, entry);
+ return serd_nodes_manage_entry_node(nodes, node);
}
const SerdNode*
@@ -483,19 +520,10 @@ serd_nodes_base64(SerdNodes* const nodes,
// Same situation as for hex above
- // Determine how much space the node needs
- SerdWriteResult r = serd_node_construct_base64(0, NULL, value_size, value);
+ SerdAllocator* const alloc = &nodes->allocator.base;
+ SerdNode* const node = serd_new_base64(alloc, value, value_size);
- // Allocate a new entry to and construct the node into it
- NodesEntry* const entry = new_entry(nodes->allocator, r.count);
- if (entry) {
- r = serd_node_construct_base64(r.count, &entry->node, value_size, value);
-
- assert(!r.status);
- (void)r;
- }
-
- return serd_nodes_manage_entry(nodes, entry);
+ return serd_nodes_manage_entry_node(nodes, node);
}
const SerdNode*
@@ -513,19 +541,10 @@ serd_nodes_parsed_uri(SerdNodes* const nodes, const SerdURIView uri)
since this isn't expected to be a particularly hot case, we just allocate
a new entry and try to do a normal insertion. */
- // Determine how much space the node needs
- SerdWriteResult r = serd_node_construct_uri(0u, NULL, uri);
- assert(r.status == SERD_OVERFLOW); // Currently no other errors
-
- // Allocate a new entry to write the URI node into
- NodesEntry* const entry = new_entry(nodes->allocator, r.count);
- if (entry) {
- r = serd_node_construct_uri(r.count, &entry->node, uri);
- assert(!r.status);
- (void)r;
- }
+ SerdAllocator* const alloc = &nodes->allocator.base;
+ SerdNode* const node = serd_new_parsed_uri(alloc, uri);
- return serd_nodes_manage_entry(nodes, entry);
+ return serd_nodes_manage_entry_node(nodes, node);
}
const SerdNode*
@@ -535,23 +554,12 @@ serd_nodes_file_uri(SerdNodes* const nodes,
{
assert(nodes);
- /* Computing a hash for the serialised URI here would be quite complex, so,
- since this isn't expected to be a particularly hot case, we just allocate
- a new entry and try to do a normal insertion. */
+ // Same situation here, not worth doing an in-place hash
- // Determine how much space the node needs
- SerdWriteResult r = serd_node_construct_file_uri(0u, NULL, path, hostname);
- assert(r.status == SERD_OVERFLOW); // Currently no other errors
-
- // Allocate a new entry to write the URI node into
- NodesEntry* const entry = new_entry(nodes->allocator, r.count);
- if (entry) {
- r = serd_node_construct_file_uri(r.count, &entry->node, path, hostname);
- assert(!r.status);
- (void)r;
- }
+ SerdAllocator* const alloc = &nodes->allocator.base;
+ SerdNode* const node = serd_new_file_uri(alloc, path, hostname);
- return serd_nodes_manage_entry(nodes, entry);
+ return serd_nodes_manage_entry_node(nodes, node);
}
const SerdNode*
@@ -577,6 +585,6 @@ serd_nodes_deref(SerdNodes* const nodes, const SerdNode* const node)
NodesEntry* removed = NULL;
zix_hash_erase(nodes->hash, i, &removed);
assert(removed == entry);
- serd_aaligned_free(nodes->allocator, removed);
+ free_entry(nodes, removed);
}
}