Skip to content

Commit

Permalink
WebNN: Support MLDeviceType enum for MLContextOptions
Browse files Browse the repository at this point in the history
This CL implements `MLDevicetype` enum type and `deviceType` member of
`MLContextOptions` dictionary [1].

With this change, the `MLContextMojo` and corresponding WebNN mojo
service are only created when `MLContextOptions.deviceType` sets to
"gpu".

This change is required to run WPT WebNN test cases [2] on GPU devices.

[1]: https://www.w3.org/TR/webnn/#dom-mlcontextoptions-devicetype
[2]: https://github.com/web-platform-tests/wpt/tree/master/webnn

Bug: 1447973, 1273291
Change-Id: I79b8f1514503d6a85a047158e161155dbade1368
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/4986939
Reviewed-by: Jiewei Qian <qjw@chromium.org>
Commit-Queue: ningxin hu <ningxin.hu@intel.com>
Cr-Commit-Position: refs/heads/main@{#1216880}
  • Loading branch information
huningxin authored and Chromium LUCI CQ committed Oct 30, 2023
1 parent cb8e4b4 commit 26e5e8d
Show file tree
Hide file tree
Showing 10 changed files with 106 additions and 92 deletions.
2 changes: 2 additions & 0 deletions third_party/blink/renderer/bindings/generated_in_modules.gni
Original file line number Diff line number Diff line change
Expand Up @@ -1469,6 +1469,8 @@ generated_enumeration_sources_in_modules = [
"$root_gen_dir/third_party/blink/renderer/bindings/modules/v8/v8_ml_data_type.h",
"$root_gen_dir/third_party/blink/renderer/bindings/modules/v8/v8_ml_device_preference.cc",
"$root_gen_dir/third_party/blink/renderer/bindings/modules/v8/v8_ml_device_preference.h",
"$root_gen_dir/third_party/blink/renderer/bindings/modules/v8/v8_ml_device_type.cc",
"$root_gen_dir/third_party/blink/renderer/bindings/modules/v8/v8_ml_device_type.h",
"$root_gen_dir/third_party/blink/renderer/bindings/modules/v8/v8_ml_input_operand_layout.cc",
"$root_gen_dir/third_party/blink/renderer/bindings/modules/v8/v8_ml_input_operand_layout.h",
"$root_gen_dir/third_party/blink/renderer/bindings/modules/v8/v8_ml_interpolation_mode.cc",
Expand Down
59 changes: 22 additions & 37 deletions third_party/blink/renderer/modules/ml/ml.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ void ML::Trace(Visitor* visitor) const {
}

ScriptPromise ML::createContext(ScriptState* script_state,
MLContextOptions* option,
MLContextOptions* options,
ExceptionState& exception_state) {
if (!script_state->ContextIsValid()) {
exception_state.ThrowDOMException(DOMExceptionCode::kInvalidStateError,
Expand All @@ -83,34 +83,25 @@ ScriptPromise ML::createContext(ScriptState* script_state,

auto promise = resolver->Promise();

// TODO(crbug.com/1273291): Support async context creation for all contexts.
#if BUILDFLAG(BUILD_WEBNN_WITH_XNNPACK) || BUILDFLAG(BUILD_WEBNN_ON_CROS)
if (option->devicePreference() == V8MLDevicePreference::Enum::kAuto ||
option->devicePreference() == V8MLDevicePreference::Enum::kCpu) {
auto* ml_context = MakeGarbageCollected<MLContext>(
option->devicePreference(), option->powerPreference(),
option->modelFormat(), option->numThreads(), this);
resolver->Resolve(ml_context);
return promise;
}
#endif

#if !BUILDFLAG(IS_CHROMEOS)
if (base::FeatureList::IsEnabled(
webnn::features::kEnableMachineLearningNeuralNetworkService)) {
MLContextMojo::ValidateAndCreateAsync(resolver, option, this);
if (options->deviceType() == V8MLDeviceType::Enum::kGpu) {
if (base::FeatureList::IsEnabled(
webnn::features::kEnableMachineLearningNeuralNetworkService)) {
MLContextMojo::ValidateAndCreateAsync(resolver, options, this);
} else {
resolver->Reject(MakeGarbageCollected<DOMException>(
DOMExceptionCode::kNotSupportedError, "Not implemented"));
}
return promise;
}
#endif

// Notice that currently, we just create the context in the renderer. In the
// future we may add backend query ability to check whether a context is
// supportable or not. At that time, this function will be truly asynced.
auto* ml_context = MakeGarbageCollected<MLContext>(
option->devicePreference(), option->powerPreference(),
option->modelFormat(), option->numThreads(), this);
resolver->Resolve(ml_context);

//
// TODO(crbug.com/1273291): Support async context creation for all contexts.
resolver->Resolve(MLContext::ValidateAndCreateSync(options, this));
return promise;
}

Expand All @@ -123,28 +114,22 @@ MLContext* ML::createContextSync(ScriptState* script_state,
return nullptr;
}

// TODO(crbug.com/1273291): support sync context creation for all contexts.
#if BUILDFLAG(BUILD_WEBNN_WITH_XNNPACK) || BUILDFLAG(BUILD_WEBNN_ON_CROS)
if (options->devicePreference() == V8MLDevicePreference::Enum::kAuto ||
options->devicePreference() == V8MLDevicePreference::Enum::kCpu) {
return MLContext::ValidateAndCreateSync(options, this);
}
#endif

#if !BUILDFLAG(IS_CHROMEOS)
// The runtime enable feature is used to disable the cross process hardware
// acceleration by default.
if (base::FeatureList::IsEnabled(
webnn::features::kEnableMachineLearningNeuralNetworkService) &&
(options->devicePreference() == V8MLDevicePreference::Enum::kAuto ||
options->devicePreference() == V8MLDevicePreference::Enum::kGpu)) {
return MLContextMojo::ValidateAndCreateSync(script_state, exception_state,
options, this);
if (options->deviceType() == V8MLDeviceType::Enum::kGpu) {
if (base::FeatureList::IsEnabled(
webnn::features::kEnableMachineLearningNeuralNetworkService)) {
return MLContextMojo::ValidateAndCreateSync(script_state, exception_state,
options, this);
} else {
exception_state.ThrowDOMException(DOMExceptionCode::kNotSupportedError,
"Not implemented");
return nullptr;
}
}
#endif

// TODO(crbug.com/1273291): throw exception once tests support all context
// types.
return MLContext::ValidateAndCreateSync(options, this);
}

Expand Down
11 changes: 9 additions & 2 deletions third_party/blink/renderer/modules/ml/ml_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,19 @@ namespace blink {
// static
MLContext* MLContext::ValidateAndCreateSync(MLContextOptions* options, ML* ml) {
return MakeGarbageCollected<MLContext>(
options->devicePreference(), options->powerPreference(),
options->modelFormat(), options->numThreads(), ml);
options->devicePreference(), options->deviceType(),
options->powerPreference(), options->modelFormat(), options->numThreads(),
ml);
}

MLContext::MLContext(const V8MLDevicePreference device_preference,
const V8MLDeviceType device_type,
const V8MLPowerPreference power_preference,
const V8MLModelFormat model_format,
const unsigned int num_threads,
ML* ml)
: device_preference_(device_preference),
device_type_(device_type),
power_preference_(power_preference),
model_format_(model_format),
num_threads_(num_threads),
Expand All @@ -37,6 +40,10 @@ V8MLDevicePreference MLContext::GetDevicePreference() const {
return device_preference_;
}

V8MLDeviceType MLContext::GetDeviceType() const {
return device_type_;
}

V8MLPowerPreference MLContext::GetPowerPreference() const {
return power_preference_;
}
Expand Down
4 changes: 4 additions & 0 deletions third_party/blink/renderer/modules/ml/ml_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include "services/webnn/public/mojom/webnn_context_provider.mojom-blink.h"
#include "services/webnn/public/mojom/webnn_graph.mojom-blink.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_device_preference.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_device_type.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_model_format.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_power_preference.h"
#include "third_party/blink/renderer/core/inspector/console_message.h"
Expand All @@ -32,6 +33,7 @@ class MODULES_EXPORT MLContext : public ScriptWrappable {
// The constructor shouldn't be called directly. The callers should use
// CreateAsync() or CreateSync() method instead.
MLContext(const V8MLDevicePreference device_preference,
const V8MLDeviceType device_type,
const V8MLPowerPreference power_preference,
const V8MLModelFormat model_format,
const unsigned int num_threads,
Expand All @@ -43,6 +45,7 @@ class MODULES_EXPORT MLContext : public ScriptWrappable {
~MLContext() override;

V8MLDevicePreference GetDevicePreference() const;
V8MLDeviceType GetDeviceType() const;
V8MLPowerPreference GetPowerPreference() const;
V8MLModelFormat GetModelFormat() const;
unsigned int GetNumThreads() const;
Expand Down Expand Up @@ -95,6 +98,7 @@ class MODULES_EXPORT MLContext : public ScriptWrappable {

private:
V8MLDevicePreference device_preference_;
V8MLDeviceType device_type_;
V8MLPowerPreference power_preference_;
V8MLModelFormat model_format_;
unsigned int num_threads_;
Expand Down
12 changes: 11 additions & 1 deletion third_party/blink/renderer/modules/ml/ml_context_options.idl
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@ enum MLDevicePreference {
"cpu"
};

// Corresponds to WebNN MLDeviceType enum
// https://www.w3.org/TR/webnn/#enumdef-mldevicetype
enum MLDeviceType {
"cpu",
"gpu"
};

enum MLPowerPreference {
// Let the backend selects the most suitable behavior.
"auto",
Expand All @@ -34,9 +41,12 @@ enum MLModelFormat {
};

dictionary MLContextOptions {
// Preferred kind of device used.
// Preferred kind of device used for model loader API.
MLDevicePreference devicePreference = "auto";

// Specified type of device used for WebNN API.
MLDeviceType deviceType = "cpu";

// Preference as related to power consumption.
MLPowerPreference powerPreference = "auto";

Expand Down
14 changes: 10 additions & 4 deletions third_party/blink/renderer/modules/ml/webnn/ml_context_mojo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,13 @@ PowerPreference ConvertBlinkPowerPreferenceToMojo(
void MLContextMojo::ValidateAndCreateAsync(ScriptPromiseResolver* resolver,
MLContextOptions* options,
ML* ml) {
CHECK_EQ(options->deviceType(), V8MLDeviceType::Enum::kGpu);
// TODO(crbug.com/1273291): Remove unsupported options (ex. model_format)
// once the context gets implemented for non-mojo too.
auto* context = MakeGarbageCollected<MLContextMojo>(
options->devicePreference(), options->powerPreference(),
options->modelFormat(), options->numThreads(), ml);
options->devicePreference(), options->deviceType(),
options->powerPreference(), options->modelFormat(), options->numThreads(),
ml);
context->CreateAsync(resolver, options);
}

Expand All @@ -47,9 +49,11 @@ MLContext* MLContextMojo::ValidateAndCreateSync(ScriptState* script_state,
ExceptionState& exception_state,
MLContextOptions* options,
ML* ml) {
CHECK_EQ(options->deviceType(), V8MLDeviceType::Enum::kGpu);
auto* context = MakeGarbageCollected<MLContextMojo>(
options->devicePreference(), options->powerPreference(),
options->modelFormat(), options->numThreads(), ml);
options->devicePreference(), options->deviceType(),
options->powerPreference(), options->modelFormat(), options->numThreads(),
ml);
return context->CreateSync(script_state, options, exception_state);
}

Expand Down Expand Up @@ -94,11 +98,13 @@ MLContext* MLContextMojo::CreateSyncImpl(ScriptState* script_state,
}

MLContextMojo::MLContextMojo(const V8MLDevicePreference device_preference,
const V8MLDeviceType device_type,
const V8MLPowerPreference power_preference,
const V8MLModelFormat model_format,
const unsigned int num_threads,
ML* ml)
: MLContext(device_preference,
device_type,
power_preference,
model_format,
num_threads,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class MODULES_EXPORT MLContextMojo : public MLContext {
ML* ml);

MLContextMojo(const V8MLDevicePreference device_preference,
const V8MLDeviceType device_type,
const V8MLPowerPreference power_preference,
const V8MLModelFormat model_format,
const unsigned int num_threads,
Expand Down
12 changes: 5 additions & 7 deletions third_party/blink/renderer/modules/ml/webnn/ml_graph_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1737,17 +1737,15 @@ ScriptPromise MLGraphBuilder::build(ScriptState* script_state,
}

#if BUILDFLAG(BUILD_WEBNN_WITH_XNNPACK)
if (ml_context_->GetDevicePreference() == V8MLDevicePreference::Enum::kAuto ||
ml_context_->GetDevicePreference() == V8MLDevicePreference::Enum::kCpu) {
if (ml_context_->GetDeviceType() == V8MLDeviceType::Enum::kCpu) {
MLGraphXnnpack::ValidateAndBuildAsync(ml_context_, named_outputs, resolver);
return promise;
}
#endif

#if BUILDFLAG(BUILD_WEBNN_ON_CROS)
// On ChromeOS, ML model inferencing is off-loaded to ModelLoader service.
if (ml_context_->GetDevicePreference() == V8MLDevicePreference::Enum::kAuto ||
ml_context_->GetDevicePreference() == V8MLDevicePreference::Enum::kCpu) {
if (ml_context_->GetDeviceType() == V8MLDeviceType::Enum::kCpu) {
MLGraphCrOS::ValidateAndBuildAsync(ml_context_, named_outputs, resolver);
return promise;
}
Expand All @@ -1757,7 +1755,8 @@ ScriptPromise MLGraphBuilder::build(ScriptState* script_state,
// The runtime enable feature is used to disable the cross process hardware
// acceleration by default.
if (base::FeatureList::IsEnabled(
webnn::features::kEnableMachineLearningNeuralNetworkService)) {
webnn::features::kEnableMachineLearningNeuralNetworkService) &&
ml_context_->GetDeviceType() == V8MLDeviceType::Enum::kGpu) {
// Reject unsupported error on unimplemented platform when getting
// `WebNNContext` mojo interface with BrowserInterfaceBroker's
// GetInterface() method before creating `WebNNGraph` message pipe.
Expand All @@ -1782,8 +1781,7 @@ MLGraph* MLGraphBuilder::buildSync(const MLNamedOperands& named_outputs,
}

#if BUILDFLAG(BUILD_WEBNN_WITH_XNNPACK)
if (ml_context_->GetDevicePreference() == V8MLDevicePreference::Enum::kAuto ||
ml_context_->GetDevicePreference() == V8MLDevicePreference::Enum::kCpu) {
if (ml_context_->GetDeviceType() == V8MLDeviceType::Enum::kCpu) {
return MLGraphXnnpack::ValidateAndBuildSync(ml_context_, named_outputs,
exception_state);
}
Expand Down

0 comments on commit 26e5e8d

Please sign in to comment.