Skip to content

Commit

Permalink
fix complete messageport transfer
Browse files Browse the repository at this point in the history
  • Loading branch information
gterzian committed Nov 15, 2019
1 parent 4cdfe23 commit a256f2f
Show file tree
Hide file tree
Showing 7 changed files with 468 additions and 52 deletions.
300 changes: 265 additions & 35 deletions components/constellation/constellation.rs

Large diffs are not rendered by default.

88 changes: 73 additions & 15 deletions components/script/dom/globalscope.rs
Expand Up @@ -218,12 +218,48 @@ impl MessageListener {
/// and we can only access the root from the event-loop.
fn notify(&self, msg: MessagePortMsg) {
match msg {
MessagePortMsg::CompleteTransfer(port_id, tasks) => {
MessagePortMsg::CompleteTransfer(ports) => {
let context = self.context.clone();
let _ = self.task_source.queue_with_canceller(
task!(process_complete_transfer: move || {
let global = context.root();
global.complete_port_transfer(port_id, tasks);

let router_id = match global.port_router_id() {
Some(router_id) => router_id,
None => {
// If not managing any ports, no transfer can succeed,
// so just send back everything.
let _ = global.script_to_constellation_chan().send(
ScriptMsg::MessagePortTransferResult(None, vec![], ports),
);
return;
}
};

let mut succeeded = vec![];
let mut failed = HashMap::new();

for (id, buffer) in ports.into_iter() {
if global.is_managing_port(&id) {
succeeded.push(id.clone());
global.complete_port_transfer(id, buffer);
} else {
failed.insert(id, buffer);
}
}
let _ = global.script_to_constellation_chan().send(
ScriptMsg::MessagePortTransferResult(Some(router_id), succeeded, failed),
);
}),
&self.canceller,
);
},
MessagePortMsg::CompletePendingTransfer(port_id, buffer) => {
let context = self.context.clone();
let _ = self.task_source.queue_with_canceller(
task!(complete_pending: move || {
let global = context.root();
global.complete_port_transfer(port_id, buffer);
}),
&self.canceller,
);
Expand Down Expand Up @@ -294,14 +330,33 @@ impl GlobalScope {
}
}

/// The message-port router Id of the global, if any
fn port_router_id(&self) -> Option<MessagePortRouterId> {
if let MessagePortState::Managed(id, _message_ports) = &*self.message_port_state.borrow() {
Some(id.clone())
} else {
None
}
}

/// Is this global managing a given port?
fn is_managing_port(&self, port_id: &MessagePortId) -> bool {
if let MessagePortState::Managed(_router_id, message_ports) =
&*self.message_port_state.borrow()
{
return message_ports.contains_key(port_id);
}
false
}

/// Complete the transfer of a message-port.
fn complete_port_transfer(&self, port_id: MessagePortId, tasks: VecDeque<PortMessageTask>) {
let should_start = if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
match message_ports.get_mut(&port_id) {
None => {
panic!("CompleteTransfer msg received in a global not managing the port.");
panic!("complete_port_transfer called for an unknown port.");
},
Some(ManagedMessagePort::Pending(_, _)) => {
panic!("CompleteTransfer msg received for a pending port.");
Expand All @@ -312,7 +367,7 @@ impl GlobalScope {
},
}
} else {
return warn!("CompleteTransfer msg received in a global not managing any ports.");
panic!("complete_port_transfer called for an unknown port.");
};
if should_start {
self.start_message_port(&port_id);
Expand Down Expand Up @@ -554,22 +609,25 @@ impl GlobalScope {
_ => None,
})
.collect();
for id in to_be_added {
for id in to_be_added.iter() {
let (id, port_info) = message_ports
.remove_entry(&id)
.expect("Collected port-id to match an entry");
if let ManagedMessagePort::Pending(port_impl, dom_port) = port_info {
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::NewMessagePort(
router_id.clone(),
port_impl.message_port_id().clone(),
));
let new_port_info = ManagedMessagePort::Added(port_impl, dom_port);
let present = message_ports.insert(id, new_port_info);
assert!(present.is_none());
match port_info {
ManagedMessagePort::Pending(port_impl, dom_port) => {
let new_port_info = ManagedMessagePort::Added(port_impl, dom_port);
let present = message_ports.insert(id, new_port_info);
assert!(present.is_none());
},
_ => panic!("Only pending ports should be found in to_be_added"),
}
}
let _ =
self.script_to_constellation_chan()
.send(ScriptMsg::CompleteMessagePortTransfer(
router_id.clone(),
to_be_added,
));
} else {
warn!("maybe_add_pending_ports called on a global not managing any ports.");
}
Expand Down
8 changes: 6 additions & 2 deletions components/script_traits/lib.rs
Expand Up @@ -1036,8 +1036,12 @@ pub struct PortMessageTask {
/// Messages for communication between the constellation and a global managing ports.
#[derive(Debug, Deserialize, Serialize)]
pub enum MessagePortMsg {
/// Enables a port to catch-up on messages that were sent while the transfer was ongoing.
CompleteTransfer(MessagePortId, VecDeque<PortMessageTask>),
/// Complete the transfer for a batch of ports.
CompleteTransfer(HashMap<MessagePortId, VecDeque<PortMessageTask>>),
/// Complete the transfer of a single port,
/// whose transfer was pending because it had been requested
/// while a previous failed transfer was being rolled-back.
CompletePendingTransfer(MessagePortId, VecDeque<PortMessageTask>),
/// Remove a port, the entangled one doesn't exists anymore.
RemoveMessagePort(MessagePortId),
/// Handle a new port-message-task.
Expand Down
14 changes: 14 additions & 0 deletions components/script_traits/script_msg.rs
Expand Up @@ -30,6 +30,7 @@ use net_traits::storage_thread::StorageType;
use net_traits::CoreResourceMsg;
use servo_url::ImmutableOrigin;
use servo_url::ServoUrl;
use std::collections::{HashMap, VecDeque};
use std::fmt;
use style_traits::viewport::ViewportConstraints;
use style_traits::CSSPixel;
Expand Down Expand Up @@ -114,6 +115,17 @@ pub enum HistoryEntryReplacement {
/// Messages from the script to the constellation.
#[derive(Deserialize, Serialize)]
pub enum ScriptMsg {
/// Request to complete the transfer of a set of ports to a router.
CompleteMessagePortTransfer(MessagePortRouterId, Vec<MessagePortId>),
/// The results of attempting to complete the transfer of a batch of ports.
MessagePortTransferResult(
/* The router whose transfer of ports succeeded, if any */
Option<MessagePortRouterId>,
/* The ids of ports transferred successfully */
Vec<MessagePortId>,
/* The ids, and buffers, of ports whose transfer failed */
HashMap<MessagePortId, VecDeque<PortMessageTask>>,
),
/// A new message-port was created or transferred, with corresponding control-sender.
NewMessagePort(MessagePortRouterId, MessagePortId),
/// A global has started managing message-ports
Expand Down Expand Up @@ -248,6 +260,8 @@ impl fmt::Debug for ScriptMsg {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
use self::ScriptMsg::*;
let variant = match *self {
CompleteMessagePortTransfer(..) => "CompleteMessagePortTransfer",
MessagePortTransferResult(..) => "MessagePortTransferResult",
NewMessagePortRouter(..) => "NewMessagePortRouter",
RemoveMessagePortRouter(..) => "RemoveMessagePortRouter",
NewMessagePort(..) => "NewMessagePort",
Expand Down
34 changes: 34 additions & 0 deletions tests/wpt/mozilla/meta/MANIFEST.json
Expand Up @@ -11281,6 +11281,32 @@
{}
]
],
"mozilla/Channel_postMessage_with_second_transfer_in_timeout.window.js": [
[
"mozilla/Channel_postMessage_with_second_transfer_in_timeout.window.html",
{
"script_metadata": [
[
"script",
"/common/get-host-info.sub.js"
]
]
}
]
],
"mozilla/Channel_postMessage_with_second_transfer_in_timeout_with_delay.window.js": [
[
"mozilla/Channel_postMessage_with_second_transfer_in_timeout_with_delay.window.html",
{
"script_metadata": [
[
"script",
"/common/get-host-info.sub.js"
]
]
}
]
],
"mozilla/DOMParser.html": [
[
"mozilla/DOMParser.html",
Expand Down Expand Up @@ -18409,6 +18435,14 @@
"276791c4348ada7e1da71041f2ccd383305e209c",
"support"
],
"mozilla/Channel_postMessage_with_second_transfer_in_timeout.window.js": [
"4ee3f64beb095963f06fc53c1d53dad2244109f9",
"testharness"
],
"mozilla/Channel_postMessage_with_second_transfer_in_timeout_with_delay.window.js": [
"939995678895c07047709f6e265d0f6b7b705eb5",
"testharness"
],
"mozilla/DOMParser.html": [
"f386a3e0191af2c70dcb05790ce7db15dd5ccbf1",
"testharness"
Expand Down
@@ -0,0 +1,33 @@
// META: script=/common/get-host-info.sub.js

async_test(function(t) {
var channel1 = new MessageChannel();
var channel2 = new MessageChannel();
var host = get_host_info();
let iframe = document.createElement('iframe');
iframe.src = host.HTTP_NOTSAMESITE_ORIGIN + "/webmessaging/support/ChildWindowPostMessage.htm";
document.body.appendChild(iframe);
var TARGET = document.querySelector("iframe").contentWindow;
iframe.onload = t.step_func(function() {

// Send a message, expecting it to be received in the iframe.
channel1.port2.postMessage(1)

// First, transfer the port into the same realm.
channel2.port2.postMessage(0, [channel1.port1]);

channel2.port1.onmessage = t.step_func(function (evt) {
assert_equals(Number(evt.data), 0);

t.step_timeout(function () {
// Transfer the port to the iframe.
TARGET.postMessage("ports", "*", evt.ports);
}, 0);
});

channel1.port2.onmessage = t.step_func(function (evt) {
assert_equals(Number(evt.data), 1);
t.done();
});
});
}, `A port transferred outside of a onmessage handler does not lose messages along the way.`);
@@ -0,0 +1,43 @@
// META: script=/common/get-host-info.sub.js

async_test(function(t) {
var channel1 = new MessageChannel();
var channel2 = new MessageChannel();
var host = get_host_info();
let iframe = document.createElement('iframe');
iframe.src = host.HTTP_NOTSAMESITE_ORIGIN + "/webmessaging/support/ChildWindowPostMessage.htm";
document.body.appendChild(iframe);
var TARGET = document.querySelector("iframe").contentWindow;
iframe.onload = t.step_func(function() {

// Send a message, expecting it to be received in the iframe.
channel1.port2.postMessage(1)

// First, transfer the port into the same realm.
channel2.port2.postMessage(0, [channel1.port1]);

channel2.port1.onmessage = t.step_func(function (evt) {
assert_equals(Number(evt.data), 0);

t.step_timeout(function () {
// Transfer the port to the iframe.
TARGET.postMessage("ports", "*", evt.ports);

// Keep the event-loop busy for one second,
// which will result in the iframe
// starting the "complete port transfer" flow,
// before the window global could finish it's own.
var request = new XMLHttpRequest();
request.open('GET', 'blank.html?pipe=trickle(d1)', false);
request.send(null);
}, 0);
});

channel1.port2.onmessage = t.step_func(function (evt) {
assert_equals(Number(evt.data), 1);
t.done();
});
});
}, `A port transferred outside of a onmessage handler,
followed by a delay in returning the buffer caused by blocking the event-loop,
does not lose messages along the way.`);

1 comment on commit a256f2f

@community-tc-integration
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Submitting the task to Taskcluster failed. Details

Taskcluster-GitHub attempted to create a task for this event with the following scopes:

[
  "assume:repo:github.com/servo/servo:pull-request",
  "queue:route:statuses",
  "queue:scheduler-id:taskcluster-github"
]

The expansion of these scopes is not sufficient to create the task, leading to the following:

Client ID static/taskcluster/github does not have sufficient scopes and is missing the following scopes:

{
  "AnyOf": [
    {
      "AnyOf": [
        "queue:create-task:highest:aws-provisioner-v1/servo-docker-untrusted",
        "queue:create-task:very-high:aws-provisioner-v1/servo-docker-untrusted",
        "queue:create-task:high:aws-provisioner-v1/servo-docker-untrusted",
        "queue:create-task:medium:aws-provisioner-v1/servo-docker-untrusted",
        "queue:create-task:low:aws-provisioner-v1/servo-docker-untrusted",
        "queue:create-task:very-low:aws-provisioner-v1/servo-docker-untrusted",
        "queue:create-task:lowest:aws-provisioner-v1/servo-docker-untrusted"
      ]
    },
    {
      "AnyOf": [
        "queue:create-task:aws-provisioner-v1/servo-docker-untrusted",
        {
          "AllOf": [
            "queue:define-task:aws-provisioner-v1/servo-docker-untrusted",
            "queue:task-group-id:taskcluster-github/f-3sSZ1dRkyNerFb815Mjg",
            "queue:schedule-task:taskcluster-github/f-3sSZ1dRkyNerFb815Mjg/f-3sSZ1dRkyNerFb815Mjg"
          ]
        }
      ]
    }
  ]
}

This request requires the client to satisfy the following scope expression:

{
  "AllOf": [
    "assume:repo:github.com/servo/servo:pull-request",
    "queue:route:tc-treeherder.v2._/servo-prs.a256f2fccefb0618e36550350224e2b07ebb5337",
    "queue:route:tc-treeherder-staging.v2._/servo-prs.a256f2fccefb0618e36550350224e2b07ebb5337",
    "queue:route:statuses",
    {
      "AnyOf": [
        {
          "AllOf": [
            "queue:scheduler-id:taskcluster-github",
            {
              "AnyOf": [
                "queue:create-task:highest:aws-provisioner-v1/servo-docker-untrusted",
                "queue:create-task:very-high:aws-provisioner-v1/servo-docker-untrusted",
                "queue:create-task:high:aws-provisioner-v1/servo-docker-untrusted",
                "queue:create-task:medium:aws-provisioner-v1/servo-docker-untrusted",
                "queue:create-task:low:aws-provisioner-v1/servo-docker-untrusted",
                "queue:create-task:very-low:aws-provisioner-v1/servo-docker-untrusted",
                "queue:create-task:lowest:aws-provisioner-v1/servo-docker-untrusted"
              ]
            }
          ]
        },
        {
          "AnyOf": [
            "queue:create-task:aws-provisioner-v1/servo-docker-untrusted",
            {
              "AllOf": [
                "queue:define-task:aws-provisioner-v1/servo-docker-untrusted",
                "queue:task-group-id:taskcluster-github/f-3sSZ1dRkyNerFb815Mjg",
                "queue:schedule-task:taskcluster-github/f-3sSZ1dRkyNerFb815Mjg/f-3sSZ1dRkyNerFb815Mjg"
              ]
            }
          ]
        }
      ]
    }
  ]
}

  • method: createTask
  • errorCode: InsufficientScopes
  • statusCode: 403
  • time: 2019-11-15T06:13:23.088Z

Please sign in to comment.