Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ class QnnInterface {
DEFINE_SHIM_FUNCTION_INTERFACE(log_set_log_level, logSetLogLevel);
// --------- QnnProfile ---------
DEFINE_SHIM_FUNCTION_INTERFACE(profile_create, profileCreate);
DEFINE_SHIM_FUNCTION_INTERFACE(profile_set_config, profileSetConfig);
DEFINE_SHIM_FUNCTION_INTERFACE(profile_get_events, profileGetEvents);
DEFINE_SHIM_FUNCTION_INTERFACE(profile_get_sub_events, profileGetSubEvents);
DEFINE_SHIM_FUNCTION_INTERFACE(profile_get_event_data, profileGetEventData);
Expand Down
6 changes: 5 additions & 1 deletion backends/qualcomm/runtime/backends/QnnGraphCommon.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class QnnGraph {

Qnn_ErrorHandle_t GraphFinalize() {
return implementation_.GetQnnInterface().qnn_graph_finalize(
handle_, nullptr /* profile_handle */, nullptr /* signal_handle */);
handle_, profile_->GetHandle(), nullptr /* signal_handle */);
};
Qnn_ErrorHandle_t ProfileExecuteData(
executorch::runtime::EventTracer* event_tracer) {
Expand All @@ -62,6 +62,10 @@ class QnnGraph {
return handle_;
}

QnnProfile* GetProfile() {
return profile_.get();
}

protected:
virtual executorch::runtime::Error MakeConfig(
std::vector<const QnnGraph_Config_t*>& config) {
Expand Down
35 changes: 34 additions & 1 deletion backends/qualcomm/runtime/backends/QnnProfiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,20 @@ QnnProfile::QnnProfile(
: handle_(nullptr), implementation_(implementation), backend_(backend) {
if (profile_level != QnnExecuTorchProfileLevel::kProfileOff) {
const QnnInterface& qnn_interface = implementation_.GetQnnInterface();

QnnProfile_Level_t qnnProfileLevel = 0;
if (profile_level == QnnExecuTorchProfileLevel::kProfileBasic) {
qnnProfileLevel = QNN_PROFILE_LEVEL_BASIC;
} else if (profile_level == QnnExecuTorchProfileLevel::kProfileDetailed
|| profile_level == QnnExecuTorchProfileLevel::kProfileOptrace) {
qnnProfileLevel = QNN_PROFILE_LEVEL_DETAILED;
} else {
QNN_EXECUTORCH_LOG_WARN("Invalid profile level");
return;
}

Qnn_ErrorHandle_t error = qnn_interface.qnn_profile_create(
backend_->GetHandle(), static_cast<int>(profile_level), &handle_);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you want to fixed profile_level?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe we should add the level to QnnExecuTorchProfileLevel. Maybe typo? I saw kProfileOptrace = 3 there.

@shewu-quic , how about the linting profile level? Should we reserve a number for that?

Copy link
Contributor Author

@limintang limintang Oct 29, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not typo. Optrace requires profiling level to be QNN_PROFILE_LEVEL_DETAILED (2), same as kProfileDetailed. I considered adding an additional config for optrace, but it requires signature change of many functions, so I chose to add a new profiling level kProfileOptrace in the schema, but the profiling level passed to QNN compiler for which is the same as kProfileDetailed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated to remove the hardcoded value of QNN profile level.

backend_->GetHandle(), qnnProfileLevel, &handle_);
if (error != QNN_SUCCESS) {
QNN_EXECUTORCH_LOG_WARN(
"Failed to create profile_handle for backend "
Expand All @@ -31,6 +43,27 @@ QnnProfile::QnnProfile(
// ignore error and continue to create backend handle...
handle_ = nullptr;
}

if (profile_level == QnnExecuTorchProfileLevel::kProfileOptrace) {
if (handle_ == nullptr) {
QNN_EXECUTORCH_LOG_WARN("Prfoile handle is null, cannot enable optrace");
return;
}

QnnProfile_Config_t qnnProfileConfig = QNN_PROFILE_CONFIG_INIT;
qnnProfileConfig.option = QNN_PROFILE_CONFIG_OPTION_ENABLE_OPTRACE;
std::array<const QnnProfile_Config_t*, 2> profileConfigs = {
&qnnProfileConfig, nullptr};
error = qnn_interface.qnn_profile_set_config(handle_, profileConfigs.data());

if (error != QNN_SUCCESS) {
QNN_EXECUTORCH_LOG_WARN(
"Failed to set optrace for backend "
" %u, error=%d",
qnn_interface.GetBackendId(),
QNN_GET_ERROR_CODE(error));
}
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ class QnnExecuTorchProfileLevel(IntEnum):
kProfileOff = 0
kProfileBasic = 1
kProfileDetailed = 2
kProfileOptrace = 3


@dataclass
Expand Down
1 change: 1 addition & 0 deletions backends/qualcomm/serialization/schema.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ enum QnnExecuTorchProfileLevel: int {
kProfileOff = 0,
kProfileBasic,
kProfileDetailed,
kProfileOptrace,
}

/// QNN backends currently supported
Expand Down
7 changes: 6 additions & 1 deletion backends/qualcomm/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -770,6 +770,7 @@ def generate_qnn_executorch_compiler_spec(
online_prepare: bool = False,
dump_intermediate_outputs: bool = False,
profile: bool = False,
optrace: bool = False,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about change profile: bool to an enum class or something similar? Then users can choose profile level they want.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. However, before this change kProfileDetailed is the only supported profile level. It might be better to make this change in a separate diff.

shared_buffer: bool = False,
is_from_context_binary: bool = False,
) -> List[CompileSpec]:
Expand Down Expand Up @@ -831,7 +832,11 @@ def generate_qnn_executorch_compiler_spec(
if saver:
qnn_executorch_options.library_path = "libQnnSaver.so"

if profile:
if optrace:
qnn_executorch_options.profile_level = (
QnnExecuTorchProfileLevel.kProfileOptrace
)
elif profile:
qnn_executorch_options.profile_level = (
QnnExecuTorchProfileLevel.kProfileDetailed
)
Expand Down