summaryrefslogtreecommitdiffstats
path: root/src/server/events/Mark.cpp
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2016-10-01 15:18:09 -0400
committerDavid Robillard <d@drobilla.net>2016-10-02 12:24:57 -0400
commita172e76897157e5a0d2ebd3fa3f7f77ec38a5df0 (patch)
treeafc1962fc5123ff8ad4558912e69227bca2a4192 /src/server/events/Mark.cpp
parent5c4356827e51b3d6e1256a050e6273a87728d588 (diff)
downloadingen-a172e76897157e5a0d2ebd3fa3f7f77ec38a5df0.tar.gz
ingen-a172e76897157e5a0d2ebd3fa3f7f77ec38a5df0.tar.bz2
ingen-a172e76897157e5a0d2ebd3fa3f7f77ec38a5df0.zip
Defer graph compilation in atomic bundles
This avoids situations like compiling a graph hundreds of times when it is loaded because it has hundreds of nodes and each event triggers a re-compile. This speeds things up dramatically, but exacerbates the theoretical problem of there not being enough time in a cycle to execute a bundle. As far as I can tell, the execute phase of events is very fast, so hundreds or thousands can easily run in a tiny fraction of the process cycle, but this still needs resolution to be truly hard real-time. What probably needs to happen is that all context and state used to process is moved to CompiledGraph and nodes do not access their own fields at all, but have some references into the CompiledGraph. This way, a compiled graph is separate from its "source code", and an old one could continue to be run while a new one is beng applied across several cycles.
Diffstat (limited to 'src/server/events/Mark.cpp')
-rw-r--r--src/server/events/Mark.cpp27
1 files changed, 25 insertions, 2 deletions
diff --git a/src/server/events/Mark.cpp b/src/server/events/Mark.cpp
index 11690487..c72cc14f 100644
--- a/src/server/events/Mark.cpp
+++ b/src/server/events/Mark.cpp
@@ -15,6 +15,7 @@
*/
#include "Engine.hpp"
+#include "PreProcessContext.hpp"
#include "UndoStack.hpp"
#include "events/Mark.hpp"
@@ -32,8 +33,15 @@ Mark::Mark(Engine& engine,
, _depth(0)
{}
+Mark::~Mark()
+{
+ for (const auto& g : _compiled_graphs) {
+ delete g.second;
+ }
+}
+
bool
-Mark::pre_process()
+Mark::pre_process(PreProcessContext& ctx)
{
UndoStack* const stack = ((_mode == Mode::UNDO)
? _engine.redo_stack()
@@ -41,10 +49,21 @@ Mark::pre_process()
switch (_type) {
case Type::BUNDLE_START:
+ ctx.set_in_bundle(true);
_depth = stack->start_entry();
break;
case Type::BUNDLE_END:
_depth = stack->finish_entry();
+ ctx.set_in_bundle(false);
+ if (!ctx.dirty_graphs().empty()) {
+ for (GraphImpl* g : ctx.dirty_graphs()) {
+ CompiledGraph* cg = CompiledGraph::compile(g);
+ if (cg) {
+ _compiled_graphs.insert(std::make_pair(g, cg));
+ }
+ }
+ ctx.dirty_graphs().clear();
+ }
break;
}
@@ -53,7 +72,11 @@ Mark::pre_process()
void
Mark::execute(RunContext& context)
-{}
+{
+ for (auto& g : _compiled_graphs) {
+ g.second = g.first->swap_compiled_graph(g.second);
+ }
+}
void
Mark::post_process()