From 56d0e4a26dffb59da4d2b32c681d88dba49c302d Mon Sep 17 00:00:00 2001 From: Anna Henningsen Date: Sun, 2 Jun 2019 17:23:50 +0200 Subject: [PATCH 1/2] worker: prevent event loop starvation through MessagePorts Limit the number of messages processed without interruption on a given `MessagePort` to prevent event loop starvation, but still make sure that all messages are emitted that were already in the queue when emitting began. This aligns the behaviour better with the web. Refs: https://github.com/nodejs/node/pull/28030 --- src/node_messaging.cc | 14 ++++++++++ ...rker-message-port-close-while-receiving.js | 15 +++++++++++ ...rker-message-port-infinite-message-loop.js | 27 +++++++++++++++++++ 3 files changed, 56 insertions(+) create mode 100644 test/parallel/test-worker-message-port-close-while-receiving.js create mode 100644 test/parallel/test-worker-message-port-infinite-message-loop.js diff --git a/src/node_messaging.cc b/src/node_messaging.cc index 5aec784f60cfb3..2dcfb4736d7c3f 100644 --- a/src/node_messaging.cc +++ b/src/node_messaging.cc @@ -604,11 +604,25 @@ void MessagePort::OnMessage() { HandleScope handle_scope(env()->isolate()); Local context = object(env()->isolate())->CreationContext(); + ssize_t processing_limit; + { + Mutex::ScopedLock(data_->mutex_); + processing_limit = data_->incoming_messages_.size(); + } + // data_ can only ever be modified by the owner thread, so no need to lock. // However, the message port may be transferred while it is processing // messages, so we need to check that this handle still owns its `data_` field // on every iteration. while (data_) { + if (--processing_limit < 0) { + // Prevent event loop starvation by only processing those messages without + // interruption that were already present when the OnMessage() call was + // first triggered. + TriggerAsync(); + return; + } + HandleScope handle_scope(env()->isolate()); Context::Scope context_scope(context); diff --git a/test/parallel/test-worker-message-port-close-while-receiving.js b/test/parallel/test-worker-message-port-close-while-receiving.js new file mode 100644 index 00000000000000..d6f73caff1fb66 --- /dev/null +++ b/test/parallel/test-worker-message-port-close-while-receiving.js @@ -0,0 +1,15 @@ +'use strict'; +const common = require('../common'); + +const { MessageChannel } = require('worker_threads'); + +// Make sure that closing a message port while receiving messages on it does +// not stop messages that are already in the queue from being emitted. + +const { port1, port2 } = new MessageChannel(); + +port1.on('message', common.mustCall(() => { + port1.close(); +}, 2)); +port2.postMessage('foo'); +port2.postMessage('bar'); diff --git a/test/parallel/test-worker-message-port-infinite-message-loop.js b/test/parallel/test-worker-message-port-infinite-message-loop.js new file mode 100644 index 00000000000000..972f91ab1586d0 --- /dev/null +++ b/test/parallel/test-worker-message-port-infinite-message-loop.js @@ -0,0 +1,27 @@ +'use strict'; +const common = require('../common'); +const assert = require('assert'); + +const { MessageChannel } = require('worker_threads'); + +// Make sure that an infinite asynchronous .on('message')/postMessage loop +// does not lead to a stack overflow and does not starve the event loop. +// We schedule timeouts both from before the the .on('message') handler and +// inside of it, which both should run. + +const { port1, port2 } = new MessageChannel(); +let count = 0; +port1.on('message', () => { + if (count === 0) { + setTimeout(common.mustCall(() => { + port1.close(); + }), 0); + } + + port2.postMessage(0); + assert(count++ < 10000, `hit ${count} loop iterations`); +}); + +port2.postMessage(0); + +setTimeout(common.mustCall(), 0); From fab5d00d7d8178a02bacf253676c6f0a1d64b16b Mon Sep 17 00:00:00 2001 From: Anna Henningsen Date: Sat, 7 Sep 2019 21:41:02 +0200 Subject: [PATCH 2/2] fixup! worker: prevent event loop starvation through MessagePorts --- src/node_messaging.cc | 13 +++++++++---- ...est-worker-message-port-infinite-message-loop.js | 2 ++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/node_messaging.cc b/src/node_messaging.cc index 2dcfb4736d7c3f..19065fdb7d1be5 100644 --- a/src/node_messaging.cc +++ b/src/node_messaging.cc @@ -604,10 +604,11 @@ void MessagePort::OnMessage() { HandleScope handle_scope(env()->isolate()); Local context = object(env()->isolate())->CreationContext(); - ssize_t processing_limit; + size_t processing_limit; { Mutex::ScopedLock(data_->mutex_); - processing_limit = data_->incoming_messages_.size(); + processing_limit = std::max(data_->incoming_messages_.size(), + static_cast(1000)); } // data_ can only ever be modified by the owner thread, so no need to lock. @@ -615,10 +616,14 @@ void MessagePort::OnMessage() { // messages, so we need to check that this handle still owns its `data_` field // on every iteration. while (data_) { - if (--processing_limit < 0) { + if (processing_limit-- == 0) { // Prevent event loop starvation by only processing those messages without // interruption that were already present when the OnMessage() call was - // first triggered. + // first triggered, but at least 1000 messages because otherwise the + // overhead of repeatedly triggering the uv_async_t instance becomes + // noticable, at least on Windows. + // (That might require more investigation by somebody more familiar with + // Windows.) TriggerAsync(); return; } diff --git a/test/parallel/test-worker-message-port-infinite-message-loop.js b/test/parallel/test-worker-message-port-infinite-message-loop.js index 972f91ab1586d0..640b3383ca62c3 100644 --- a/test/parallel/test-worker-message-port-infinite-message-loop.js +++ b/test/parallel/test-worker-message-port-infinite-message-loop.js @@ -24,4 +24,6 @@ port1.on('message', () => { port2.postMessage(0); +// This is part of the test -- the event loop should be available and not stall +// out due to the recursive .postMessage() calls. setTimeout(common.mustCall(), 0);