diff options
author | David Robillard <d@drobilla.net> | 2018-05-13 00:48:31 +0200 |
---|---|---|
committer | David Robillard <d@drobilla.net> | 2018-11-25 22:12:47 +0100 |
commit | 9c5e41683fc2da0ba4089c728992f481e9908bf2 (patch) | |
tree | 77a70acd7b107029ba5131a4d401fadf28b0f2a8 /src/node.c | |
parent | 7698e111ce687c6ba80590d66f9f4c0487783004 (diff) | |
download | serd-9c5e41683fc2da0ba4089c728992f481e9908bf2.tar.gz serd-9c5e41683fc2da0ba4089c728992f481e9908bf2.tar.bz2 serd-9c5e41683fc2da0ba4089c728992f481e9908bf2.zip |
Zero node padding before passing to reader sinks
Diffstat (limited to 'src/node.c')
-rw-r--r-- | src/node.c | 25 |
1 files changed, 25 insertions, 0 deletions
@@ -209,6 +209,24 @@ serd_node_new_uri(const char* str) return serd_node_new_simple(SERD_URI, str); } +/** + Zero node padding. + + This is used for nodes which live in re-used stack memory during reading, + which must be normalized before being passed to a sink so comparison will + work correctly. +*/ +void +serd_node_zero_pad(SerdNode* node) +{ + char* buf = serd_node_buffer(node); + const size_t size = node->n_bytes; + const size_t padded_size = serd_node_pad_size(node->n_bytes); + if (padded_size > size) { + memset(buf + size, 0, padded_size - size); + } +} + SerdNode* serd_node_copy(const SerdNode* node) { @@ -217,6 +235,13 @@ serd_node_copy(const SerdNode* node) } const size_t size = serd_node_total_size(node); +#ifndef NDEBUG + const size_t unpadded_size = node->n_bytes; + const size_t padded_size = serd_node_pad_size(node->n_bytes); + for (size_t i = 0; i < padded_size - unpadded_size; ++i) { + assert(serd_node_buffer_c(node)[unpadded_size + i] == '\0'); + } +#endif SerdNode* copy = (SerdNode*)calloc(1, size + 3); memcpy(copy, node, size); return copy; |