diff options
author | David Robillard <d@drobilla.net> | 2018-05-13 00:48:31 +0200 |
---|---|---|
committer | David Robillard <d@drobilla.net> | 2018-12-31 11:37:47 -0500 |
commit | 299e386243004582a323507a5e17482b28d31b69 (patch) | |
tree | d273f9a13b751be1b5a212bcbfe91107fe269db5 /src/node.c | |
parent | 3a62732bd03d1d0ba9daef1667afafa213828bf4 (diff) | |
download | serd-299e386243004582a323507a5e17482b28d31b69.tar.gz serd-299e386243004582a323507a5e17482b28d31b69.tar.bz2 serd-299e386243004582a323507a5e17482b28d31b69.zip |
Zero node padding before passing to reader sinks
Diffstat (limited to 'src/node.c')
-rw-r--r-- | src/node.c | 25 |
1 files changed, 25 insertions, 0 deletions
@@ -210,6 +210,24 @@ serd_node_new_uri(const char* str) return serd_node_new_simple(SERD_URI, str); } +/** + Zero node padding. + + This is used for nodes which live in re-used stack memory during reading, + which must be normalized before being passed to a sink so comparison will + work correctly. +*/ +void +serd_node_zero_pad(SerdNode* node) +{ + char* buf = serd_node_buffer(node); + const size_t size = node->n_bytes; + const size_t padded_size = serd_node_pad_size(node->n_bytes); + if (padded_size > size) { + memset(buf + size, 0, padded_size - size); + } +} + SerdNode* serd_node_copy(const SerdNode* node) { @@ -218,6 +236,13 @@ serd_node_copy(const SerdNode* node) } const size_t size = serd_node_total_size(node); +#ifndef NDEBUG + const size_t unpadded_size = node->n_bytes; + const size_t padded_size = serd_node_pad_size(node->n_bytes); + for (size_t i = 0; i < padded_size - unpadded_size; ++i) { + assert(serd_node_buffer_c(node)[unpadded_size + i] == '\0'); + } +#endif SerdNode* copy = (SerdNode*)calloc(1, size + 3); memcpy(copy, node, size); return copy; |